am ba6f2595: Merge "Remove superfluous include path from Makefile"

* commit 'ba6f2595884bb433cdf436b7d6fcde099c042189':
  Remove superfluous include path from Makefile
diff --git a/.gitignore b/.gitignore
index af1b0d7..b61faef 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,18 @@
 *.a
 *.exe
+*.idb
 *.lib
 *.log
 *.map
 *.mk
 *.ncb
+*.pdb
 *.pyc
 *.scons*
+*.so
 *.suo
 *.user
 *.xcodeproj
-*.idb
-*.pdb
 #*#
 *~
 .cpplint-cache
@@ -21,6 +22,7 @@
 shell_g
 /build/gyp
 /obj/
+/out/
 /test/es5conform/data/
 /test/mozilla/data/
 /test/sputnik/sputniktests/
@@ -31,3 +33,4 @@
 /tools/visual_studio/Release
 /xcodebuild/
 TAGS
+*.Makefile
diff --git a/AUTHORS b/AUTHORS
index 843d1d2..fcb5c20 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -7,7 +7,9 @@
 Sigma Designs Inc.
 ARM Ltd.
 Hewlett-Packard Development Company, LP
+Igalia, S.L.
 
+Akinori MUSHA <knu@FreeBSD.org>
 Alexander Botero-Lowry <alexbl@FreeBSD.org>
 Alexander Karpinsky <homm86@gmail.com>
 Alexandre Vassalotti <avassalotti@gmail.com>
@@ -19,6 +21,7 @@
 Daniel James <dnljms@gmail.com>
 Dineel D Sule <dsule@codeaurora.org>
 Erich Ocean <erich.ocean@me.com>
+Fedor Indutny <fedor@indutny.com>
 Jan de Mooij <jandemooij@gmail.com>
 Jay Freeman <saurik@saurik.com>
 Joel Stanley <joel.stan@gmail.com>
@@ -34,9 +37,11 @@
 Peter Varga <pvarga@inf.u-szeged.hu>
 Rafal Krypa <rafal@krypa.net>
 Rene Rebe <rene@exactcode.de>
+Robert Mustacchi <rm@fingolfin.org>
 Rodolph Perfetta <rodolph.perfetta@arm.com>
 Ryan Dahl <coldredlemur@gmail.com>
 Sanjoy Das <sanjoy@playingwithpointers.com>
 Subrato K De <subratokde@codeaurora.org>
 Vlad Burlik <vladbph@gmail.com>
+Yuqiang Xian <yuqiang.xian@intel.com>
 Zaheer Ahmad <zahmad@codeaurora.org>
diff --git a/Android.libv8.mk b/Android.libv8.mk
index f2a13c8..14c4e19 100644
--- a/Android.libv8.mk
+++ b/Android.libv8.mk
@@ -16,6 +16,7 @@
 # and V8_LOCAL_JS_LIBRARY_FILES
 V8_LOCAL_SRC_FILES :=
 V8_LOCAL_JS_LIBRARY_FILES :=
+V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES :=
 include $(LOCAL_PATH)/Android.v8common.mk
 
 # Target can only be linux
@@ -26,6 +27,7 @@
 LOCAL_SRC_FILES := $(V8_LOCAL_SRC_FILES)
 
 LOCAL_JS_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))
+LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES))
 
 # Copy js2c.py to intermediates directory and invoke there to avoid generating
 # jsmin.pyc in the source directory
@@ -35,14 +37,23 @@
 	$(copy-file-to-target)
 
 # Generate libraries.cc
-GEN1 := $(intermediates)/libraries.cc $(intermediates)/libraries-empty.cc
+GEN1 := $(intermediates)/libraries.cc
 $(GEN1): SCRIPT := $(intermediates)/js2c.py
 $(GEN1): $(LOCAL_JS_LIBRARY_FILES) $(JS2C_PY)
 	@echo "Generating libraries.cc"
 	@mkdir -p $(dir $@)
-	python $(SCRIPT) $(GEN1) CORE $(LOCAL_JS_LIBRARY_FILES)
+	python $(SCRIPT) $(GEN1) CORE off $(LOCAL_JS_LIBRARY_FILES)
 V8_GENERATED_LIBRARIES := $(intermediates)/libraries.cc
 
+# Generate experimental-libraries.cc
+GEN2 := $(intermediates)/experimental-libraries.cc
+$(GEN2): SCRIPT := $(intermediates)/js2c.py
+$(GEN2): $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES) $(JS2C_PY)
+	@echo "Generating experimental-libraries.cc"
+	@mkdir -p $(dir $@)
+	python $(SCRIPT) $(GEN2) EXPERIMENTAL off $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES)
+V8_GENERATED_LIBRARIES += $(intermediates)/experimental-libraries.cc
+
 LOCAL_GENERATED_SOURCES += $(V8_GENERATED_LIBRARIES)
 
 # Generate snapshot.cc
diff --git a/Android.mksnapshot.mk b/Android.mksnapshot.mk
index b8462ac..c1ffcea 100644
--- a/Android.mksnapshot.mk
+++ b/Android.mksnapshot.mk
@@ -12,6 +12,7 @@
 
 V8_LOCAL_SRC_FILES :=
 V8_LOCAL_JS_LIBRARY_FILES :=
+V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES :=
 include $(LOCAL_PATH)/Android.v8common.mk
 
 V8_LOCAL_SRC_FILES += \
@@ -33,6 +34,7 @@
 LOCAL_SRC_FILES := $(V8_LOCAL_SRC_FILES)
 
 LOCAL_JS_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))
+LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES))
 
 # Copy js2c.py to intermediates directory and invoke there to avoid generating
 # jsmin.pyc in the source directory
@@ -42,14 +44,23 @@
 	$(copy-file-to-target)
 
 # Generate libraries.cc
-GEN2 := $(intermediates)/libraries.cc $(intermediates)/libraries-empty.cc
-$(GEN2): SCRIPT := $(intermediates)/js2c.py
-$(GEN2): $(LOCAL_JS_LIBRARY_FILES) $(JS2C_PY)
+GEN3 := $(intermediates)/libraries.cc
+$(GEN3): SCRIPT := $(intermediates)/js2c.py
+$(GEN3): $(LOCAL_JS_LIBRARY_FILES) $(JS2C_PY)
 	@echo "Generating libraries.cc"
 	@mkdir -p $(dir $@)
-	python $(SCRIPT) $(GEN2) CORE $(LOCAL_JS_LIBRARY_FILES)
+	python $(SCRIPT) $(GEN3) CORE off $(LOCAL_JS_LIBRARY_FILES)
 LOCAL_GENERATED_SOURCES := $(intermediates)/libraries.cc
 
+# Generate experimental-libraries.cc
+GEN4 := $(intermediates)/experimental-libraries.cc
+$(GEN4): SCRIPT := $(intermediates)/js2c.py
+$(GEN4): $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES) $(JS2C_PY)
+	@echo "Generating experimental-libraries.cc"
+	@mkdir -p $(dir $@)
+	python $(SCRIPT) $(GEN4) EXPERIMENTAL off $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES)
+LOCAL_GENERATED_SOURCES += $(intermediates)/experimental-libraries.cc
+
 LOCAL_CFLAGS := \
 	-Wno-endif-labels \
 	-Wno-import \
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 92501d3..2012573 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -29,6 +29,7 @@
 	src/disassembler.cc \
 	src/diy-fp.cc \
 	src/dtoa.cc \
+	src/elements.cc \
 	src/execution.cc \
 	src/extensions/externalize-string-extension.cc \
 	src/extensions/gc-extension.cc \
@@ -36,7 +37,6 @@
 	src/fast-dtoa.cc \
 	src/fixed-dtoa.cc \
 	src/flags.cc \
-	src/frame-element.cc \
 	src/frames.cc \
 	src/full-codegen.cc \
 	src/func-name-inferrer.cc \
@@ -73,7 +73,7 @@
 	src/runtime-profiler.cc \
 	src/safepoint-table.cc \
 	src/scanner.cc \
-	src/scanner-base.cc \
+	src/scanner-character-streams.cc \
 	src/scopeinfo.cc \
 	src/scopes.cc \
 	src/serialize.cc \
@@ -84,12 +84,13 @@
 	src/strtod.cc \
 	src/stub-cache.cc \
 	src/token.cc \
-	src/top.cc \
 	src/type-info.cc \
 	src/unicode.cc \
 	src/utils.cc \
 	src/v8-counters.cc \
 	src/v8.cc \
+	src/v8conversions.cc \
+	src/v8utils.cc \
 	src/v8threads.cc \
 	src/variables.cc \
 	src/version.cc \
@@ -136,7 +137,7 @@
 	src/uri.js \
 	src/math.js \
 	src/messages.js \
-	src/apinatives.js
+	src/apinatives.js 
 
 # These JS library sources must follow the above sources but their order is not
 # important.
@@ -150,3 +151,8 @@
 
 V8_LOCAL_JS_LIBRARY_FILES += \
 	src/macros.py
+
+V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \
+	src/proxy.js \
+	src/weakmap.js
+
diff --git a/ChangeLog b/ChangeLog
index 781f0ca..99495dd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,604 @@
+2011-09-15: Version 3.6.4
+
+        Fixed d8's broken readline history.
+
+        Removed the need for code delete events in CPU profiler (Issue 1466).
+
+        Fixed debugger stepping next with trycatch recursion (Issue 1639).
+
+        Fixing parallel execution in d8 (with -p) and some memory leaks.
+
+        Support for precise stepping in functions compiled before debugging was
+        started (step 1).
+
+
+2011-09-13: Version 3.6.3
+
+        Implemented better support of typed arrays in the d8 shell.
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-09-08: Version 3.6.2
+
+        Added "dependencies" target to top-level Makefile.
+
+        Added ability to turn profiler on/off in d8.
+
+        Added "soname_version" parameter to common.gypi, v8.gyp, and Makefile.
+
+        Fixed several crash bugs.
+
+
+2011-09-07: Version 3.6.1
+
+        Fixed a bug in abrupt exit from with or catch inside finally.
+
+        Fixed possible crash in FixedDoubleArray::Initialize() (Chromium
+        issue 95113).
+
+        Fixed a bug in Page::GetRegionMaskForSpan (Chromium issue 94425).
+
+        Fixed a few clang warnings (which -Werror treated as errors).
+
+        Performance improvements on all platforms.
+
+
+2011-09-05: Version 3.6.0
+
+        Fixed a bug when optimizing named function expression (issue 1647).
+
+        Fixed a bug when optimizing f.call.apply (issue 1650).
+
+        Made arguments and caller always be null on native functions
+        (issues 1548 and 1643).
+
+        Fixed issue 1648 (cross-compiling x64 targeting ia32).
+
+        Fixed issue 371 (d8 printing of strings containing \0).
+
+        Fixed order of evaluation in arguments to parseInt (issue 1649).
+
+        Fixed a problem with large heap snapshots in Chrome DevTools
+        (issue 1658, chromium issue 89268).
+
+        Upped default maximum heap size from 512M to 700M.
+
+
+2011-08-31: Version 3.5.10
+
+        Added dependency of v8_base on WinSocket2 Windows library in
+        the GYP-build.
+
+        Various bugfixes.
+
+
+2011-08-29: Version 3.5.9
+
+        Made FromPropertyDescriptor not trigger inherited setters.
+
+        Fixed .gyp files to work on the ARM simulator.
+
+        Fixed shared library build warnings for MSVS.
+
+
+2011-08-24: Version 3.5.8
+
+        Added V8EXPORT attributes for v8::Array::CheckCast and
+        v8::Number::CheckCast.
+
+        Made a slight API change enabling opting out from null termination
+        in String::Write*().
+
+        Fixed arm build for gcc-4.6.
+
+
+2011-08-22: Version 3.5.7
+
+        Make scanner handle invalid unicode escapes in identifiers correctly.
+
+        Make regexp flag parsing stricter.
+
+        Fix several memory leaks.
+
+
+2011-08-17: Version 3.5.6
+
+        Fixed issue that could cause crashes when running with --heap-stats.
+
+        Fixed compilation on Linux 2.6.9 and older.
+
+        Fixed live-object-list to work with isolates.
+
+        Fixed memory leaks in zones and isolates.
+
+        Fixed a performance regression for TypedArrays on x64.
+
+        Stability improvements on all platforms.
+
+
+2011-08-15: Version 3.5.5
+
+        Fixed bugs involving negative zero and the optimizing compiler.
+
+        Fixed optimized version of Function.apply(x, arguments). (issue 1592)
+
+        Eliminated uses of deprecated ARM instructions.
+
+        Sped up Math.floor by using SSE 4.1 roundsd instruction on ia32.
+
+        Removed restriction on the size of disassembled code that is printed.
+
+
+2011-08-10: Version 3.5.4
+
+        Added a preliminary implementation of ES Harmony weak maps.  Weak
+        maps can be enabled by the flag --harmony-weakmaps.
+
+        Introduced a toplevel Makefile to support GYP-based building.  GYP
+        can be obtained from http://gyp.googlecode.com.
+
+        Fixed a bug in the length property of functions created by
+        Function.prototype.bind.
+
+        Reduced malloc heap allocation on process startup.
+
+        Several important code generation bug fixes.
+
+        Performance improvements on all platforms.
+
+
+2011-08-03: Version 3.5.3
+
+        MIPS: Port of fix to ClassOf check from ARM.
+        Patch from Paul Lind <plind44@gmail.com>.
+
+        Stopped using mprotect on Cygwin.
+        Avoided uninitialized member warning on gcc 4.3.4
+        Both patches by Bert Belder.
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-08-01: Version 3.5.2
+
+        Performance improvements on all platforms.
+
+
+2011-07-28: Version 3.5.1
+
+        Fixed setting the readonly flag on the prototype property using the
+        API call FunctionTemplate::SetPrototypeAttributes (issue 1539).
+
+        Changed the tools/test.py script to use d8 instead of shell for
+        testing.
+
+        Fixed crash in ToBooleanStub when GC happens during invocation.
+
+        Enabled automatic unboxing of double arrays.
+
+        Performance improvements on all platforms.
+
+
+2011-07-25: Version 3.5.0
+
+        Implemented Object.prototype.{hasOwnProperty, propertyIsEnumerable} for
+        proxies.
+
+        Removed logging to memory support.
+
+        Bugfixes and performance work.
+
+
+2011-07-20: Version 3.4.14
+
+        Fix the debugger for strict-mode functions. (Chromium issue 89236)
+
+        Add GetPropertyAttribute method for Object in the API. (Patch by 
+        Peter Varga)
+
+        Fix -Wunused-but-set-variable for gcc-4.6 on x64. (Issue 1291)
+
+
+2011-07-18: Version 3.4.13
+
+        Improved debugger support to allow inspection of optimized frames (issue
+        1140).
+
+        Fixed a bug in prototype transitions cache clearing introduced by r8165.
+
+        Fixed shortcutting bug in HInferRepresentation. Patch by Andy Wingo.
+
+        Fixed a memory leak in sample/shell.cc (dispose semaphores).
+
+        Simplified HClampToUint8. Patch by Andy Wingo.
+
+        Exposed APIs for detecting boxed primitives, native errors. Patch by
+        Luke Zarko.
+
+        Added map check for COW elements to crankshaft array handling code
+        (issue 1560).
+
+        Sample shell and (a light version of) D8 links against a shared library
+        now.
+
+        Fixed bug in array filter and reduce functions (issue 1559).
+
+        Avoid TLS load in AstNode constructor.
+
+        Introduced a random entropy source which can optionally be provided at
+        initialization. (Chromium issue 89462).
+
+
+2011-07-13: Version 3.4.12
+
+        Added --prof profiling option to d8 shell.
+
+        Fixed a bug where reading a directory in d8 shell hangs (issue 1533).
+
+        Fixed a potential assertion failure in const declarations.
+
+        Fixed an assertion failure in descriptor arrays (issue 1526).
+
+        Enabled fast thread-local storage by default on supported platforms.
+
+        Improved reporting of source position for global variable loads
+        (issue 1527).
+
+
+2011-07-11: Version 3.4.11
+
+        Fixed MinGW32 build.
+
+        Fixed a GC bug with RegExp code flushing.
+
+        Implemented Object.defineProperty for proxies.
+
+        Fixed a bug in for/in iteration of arguments objects (issue 1531).
+
+        Added debugger support for inspecting optimized frames (issue 1140).
+
+        Allowed JSObject::PreventExtensions to work for arguments objects.
+
+        Bugfixes and performance work.
+
+
+2011-07-06: Version 3.4.10
+
+        Fixed debugger not breaking on certain "if" statements (issue 1523).
+
+        Fixed assertion failure in runtime profiler when running on IA32
+        without snapshot (issue 1522).
+
+        Fixed ABI for API calls on IA32 (for clang compatibility).
+
+        Introduced code flushing of RegExp code to free memory used by
+        RegExps sooner.
+
+        Fixed linux-tick-processor built wrong version of v8 (issue 1532).
+
+        Fixed assertion failure in v8::TryCache::StackTrace (issue 1529).
+
+        Performance improvements on all platforms.
+
+
+2011-07-04: Version 3.4.9
+
+        Added support for debugger inspection of locals in optimized frames
+        (issue 1140).
+
+        Fixed SConstruct to pass correct defines to samples/preparser when
+        building with library=shared.
+
+        Made date parser handle ES5 Date Time Strings correctly (issue 1498).
+
+        Fixed a bug in Object.defineProperty on the arguments object.
+
+        Performance improvements on all platforms.
+
+
+2011-06-29: Version 3.4.8
+
+        Ensure 16-byte stack alignment on Solaris (issue 1505).
+
+        Fix "illegal access" when calling parseInt with a radix
+        that is not a smi. (issue 1246).
+
+
+2011-06-27: Version 3.4.7
+
+        Fixed 64-bit build on FreeBSD.
+
+        Added API to set the property attributes for the prototype
+        property on functions created from FunctionTemplates.
+
+        Bugfixes and performance work.
+
+
+2011-06-22: Version 3.4.6
+
+        Lowered limit on code space for systems with low memory supply.
+
+        Allowed compiling v8_shell with the 'host' toolset (issue 82437).
+
+        Extended setBreakpoint API to accept partial script name (issue 1418).
+
+        Made multi-line comments not count when deciding whether the '-->'
+        comment starter is first on a line. This matches Safari.
+
+        Made handling of non-array recievers in Array length setter correct
+        (issue 1491).
+
+        Added ability to heap profiler to iterate over snapshot's node
+        (issue 1481).
+
+
+2011-06-20: Version 3.4.5
+
+        Fixed issues 794, 1097, 1215(partial), 1417, 1435, 1472, 1473,
+        1476, and 1477.
+
+        Improved code generation for !0 and !1.
+
+        Reduced memory usage for regular expressions with nested qualifiers.
+        (issue 1472)
+
+        Fixed V8 to count line terminators in multi-line comments.
+        (Chromium issue 86431)
+
+        Fixed disassembler=on option for release-mode builds. (issue 1473)
+
+        Performance improvements on all platforms.
+
+
+2011-06-15: Version 3.4.4
+
+        Added snapshot compression support and --stress-opt flag to d8.
+
+        Improved performance of try/catch.
+
+        Several GYP-related changes: Added support for building Xcode project
+        files. Make the ARM simulator build with GYP again. Generate Makefiles
+        for all architectures on Linux.
+
+        Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
+        receiver for strict mode callbacks. (issue 1436)
+
+        Fixed a bug where an array load was incorrectly hoisted by GVN.
+
+        Handle 'undefined' correctly when === has been specialized for doubles.
+        (issue 1434)
+
+        Corrected the limit of local variables in an optimized function from 64
+        to 63.
+
+        Correctly set ReadOnly flag on indexed properties when using the API Set
+        method. (issue 1470)
+
+        Give the correct error message when Object.isExtensible is called on a
+        non-object. (issue 1452)
+
+        Added GetOwnPropertyNames method for Object in the API. Patch by Peter
+        Varga.
+
+        Do not redefine properties unneccesarily in seal and freeze. (issue
+        1447)
+
+        IsExecutionTerminating has an Isolate parameter now.
+
+        Distinguish keyed loads with a symbol key from fast elements loads,
+        avoiding some useless deoptimizations. (issue 1471)
+
+
+2011-06-08: Version 3.4.3
+
+        Clear the global thread table when an isolate is disposed
+        (issue 1433).
+
+        Converted time zone name to UTF8 on Windows (issue 1290).
+
+        Limited the number of arguments in a function call to 32766
+        (issue 1413).
+
+        Compress sources of JS libraries in addition to the snapshot.
+
+        Fixed a bug in Lithium environment iteration.
+
+        Performance improvements on all platforms.
+
+
+2011-06-06: Version 3.4.2
+
+        More work on ES-Harmony proxies.  Still hidden behind a flag.
+
+        Fixed some crash bugs and improved performance.
+
+        Fixed building with gdb debugging support.
+
+        Do not install SIGPROF handler until it is needed.
+
+        Added DateTimeFormat to i18n API.
+
+        Fixed compilation on OpenBSD.
+
+        Take the ulimit into account when sizing the heap.  OpenBSD users
+        may still have to increase the default ulimit to run heavy pages in
+        the browser.
+
+
+2011-06-01: Version 3.4.1
+
+        Fixed JSON stringify issue with arrays.
+
+        Changed calls to JS builtins to be passed undefined when called with
+        implicit receiver.
+
+        Implemented the set trap for Harmony proxies. Proxies still need to
+        be enabled with the --harmony-proxies flag.
+
+
+2011-05-30: Version 3.4.0
+
+        Changed calls to undefined property setters to not throw (issue 1355).
+
+        Made RegExp objects not callable.
+
+        Fixed issues on special case large JSON strings in new json parser
+        (issues http://crbug.com/83877 and http://crbug.com/84186).
+
+        Performance improvements on all platforms.
+
+
+2011-05-25: Version 3.3.10
+
+        Fixed calls of strict mode function with an implicit receiver.
+
+        Fixed fast handling of arrays to properly deal with changes to the
+        Object prototype (issue 1403).
+
+        Changed strict mode poison pill to be the same type error function
+        (issue 1387).
+
+        Fixed a debug crash in arguments object handling (issue 1227).
+
+        Fixed a bug in deoptimization on x64 (issue 1404).
+
+        Performance improvements and bug fixes on all platforms.
+
+
+2011-05-23: Version 3.3.9
+
+        Added DateTimeFormat class to experimental i18n API.
+
+        Extended preparser to give early errors for some strict mode
+        restrictions.
+
+        Removed legacy execScript function from V8.
+
+        Extended isolate API with the ability to add embedder-specific
+        data to an isolate.
+
+        Added basic support for polymorphic loads from JS and external
+        arrays.
+
+        Fixed bug in handling of switch statements in the optimizing
+        compiler.
+
+
+2011-05-18: Version 3.3.8
+
+        Added MarkIndependent to the persistent handle API.  Independent
+        handles are independent of all other persistent handles and can be
+        garbage collected more frequently.
+
+        Implemented the get trap for Harmony proxies.  Proxies are enabled
+        with the --harmony-proxies flag.
+
+        Performance improvements and bug fixes on all platforms.
+
+
+2011-05-16: Version 3.3.7
+
+        Updated MIPS infrastructure files.
+
+        Performance improvements and bug fixes on all platforms.
+
+
+2011-05-11: Version 3.3.6
+
+        Updated MIPS infrastructure files.
+
+        Added method IsCallable for Object to the API.
+        Patch by Peter Varga.
+
+
+2011-05-09: Version 3.3.5
+
+        Fixed build on FreeBSD. Patch by Akinori MUSHA.
+
+        Added check that receiver is JSObject on API calls.
+
+        Implemented CallAsConstructor method for Object in the API (Issue 1348).
+        Patch by Peter Varga.
+
+        Added CallAsFunction method to the Object class in the API (Issue 1336).
+        Patch by Peter Varga.
+
+        Added per-isolate locking and unlocking.
+
+        Fixed bug in x64 >>> operator (Issue 1359).
+
+
+2011-05-04: Version 3.3.4
+
+        Implemented API to disallow code generation from strings for a context
+        (issue 1258).
+
+        Fixed bug with whitespaces in parseInt (issue 955).
+
+        Fixed bug with == comparison of Date objects (issue 1356).
+
+        Added GYP variables for ARM code generation:
+        v8_can_use_vfp_instructions, v8_can_use_unaligned_accesses
+        and v8_use_arm_eabi_hardfloat.
+
+
+2011-05-02: Version 3.3.3
+
+        Added support for generating Visual Studio solution and project files
+        using GYP.
+
+        Implemented support for ARM EABI calling convention variation where
+        floating-point arguments are passed in registers (hardfloat).
+
+        Added Object::HasOwnProperty() to the API.
+
+        Added support for compressing startup data to reduce binary size. This
+        includes build time support and an API for the embedder to decompress
+        the startup data before initializing V8.
+
+        Reduced the profiling hooks overhead from >400% to 25% when using
+        ll_prof.
+
+        Performance improvements and bug fixes on all platforms.
+
+
+2011-04-27: Version 3.3.2
+
+        Fixed crash bug on ARM with no VFP3 hardware.
+
+        Fixed compilation of V8 without debugger support.
+
+        Improved performance on JSLint.
+
+        Added support Float64 WebGL arrays.
+
+        Fixed crash bug in regexp replace.
+
+
+2011-04-20: Version 3.3.1
+
+        Reduced V8 binary size by removing virtual functions from hydrogen.
+
+        Fixed crash bug on x64.
+
+        Performance improvements on ARM and IA32.
+
+
+2011-04-18: Version 3.3.0
+
+        Fixed bug in floating point rounding in Crankshaft on ARM
+        (issue 958)
+
+        Fixed a number of issues with running without VFPv3 support on ARM
+        (issue 1315)
+
+        Introduced v8Locale.Collator, a partial implementation of Collator
+        per last ECMAScript meeting + mailing list.
+
+        Minor performance improvements and bug fixes.
+
+
 2011-04-13: Version 3.2.10
 
         Fixed bug in external float arrays on ARM (issue 1323).
@@ -2687,3 +3288,6 @@
 
         Initial export.
 
+# Local Variables:
+# mode:text
+# End:
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..a7b2731
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,182 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Variable default definitions. Override them by exporting them in your shell.
+CXX ?= "g++"  # For distcc: export CXX="distcc g++"
+LINK ?= "g++"
+OUTDIR ?= out
+TESTJOBS ?= -j16
+GYPFLAGS ?=
+
+# Special build flags. Use them like this: "make library=shared"
+
+# library=shared || component=shared_library
+ifeq ($(library), shared)
+  GYPFLAGS += -Dcomponent=shared_library
+endif
+ifdef component
+  GYPFLAGS += -Dcomponent=$(component)
+endif
+# console=readline
+ifdef console
+  GYPFLAGS += -Dconsole=$(console)
+endif
+# disassembler=on
+ifeq ($(disassembler), on)
+  GYPFLAGS += -Dv8_enable_disassembler=1
+endif
+# snapshot=off
+ifeq ($(snapshot), off)
+  GYPFLAGS += -Dv8_use_snapshot='false'
+endif
+# gdbjit=on
+ifeq ($(gdbjit), on)
+  GYPFLAGS += -Dv8_enable_gdbjit=1
+endif
+# liveobjectlist=on
+ifeq ($(liveobjectlist), on)
+  GYPFLAGS += -Dv8_use_liveobjectlist=true
+endif
+# vfp3=off
+ifeq ($(vfp3), off)
+  GYPFLAGS += -Dv8_can_use_vfp_instructions=false
+else
+  GYPFLAGS += -Dv8_can_use_vfp_instructions=true
+endif
+# soname_version=1.2.3
+ifdef soname_version
+  GYPFLAGS += -Dsoname_version=$(soname_version)
+endif
+
+# ----------------- available targets: --------------------
+# - "dependencies": pulls in external dependencies (currently: GYP)
+# - any arch listed in ARCHES (see below)
+# - any mode listed in MODES
+# - every combination <arch>.<mode>, e.g. "ia32.release"
+# - any of the above with .check appended, e.g. "ia32.release.check"
+# - default (no target specified): build all ARCHES and MODES
+# - "check": build all targets and run all tests
+# - "<arch>.clean" for any <arch> in ARCHES
+# - "clean": clean all ARCHES
+
+# ----------------- internal stuff ------------------------
+
+# Architectures and modes to be compiled. Consider these to be internal
+# variables, don't override them (use the targets instead).
+ARCHES = ia32 x64 arm
+MODES = release debug
+
+# List of files that trigger Makefile regeneration:
+GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
+           preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
+           test/cctest/cctest.gyp tools/gyp/v8.gyp
+
+# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
+BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
+# Generates corresponding test targets, e.g. "ia32.release.check".
+CHECKS = $(addsuffix .check,$(BUILDS))
+# File where previously used GYPFLAGS are stored.
+ENVFILE = $(OUTDIR)/environment
+
+.PHONY: all check clean dependencies $(ENVFILE).new \
+        $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
+        $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
+
+# Target definitions. "all" is the default.
+all: $(MODES)
+
+# Compile targets. MODES and ARCHES are convenience targets.
+.SECONDEXPANSION:
+$(MODES): $(addsuffix .$$@,$(ARCHES))
+
+$(ARCHES): $(addprefix $$@.,$(MODES))
+
+# Defines how to build a particular target (e.g. ia32.release).
+$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \
+	         CXX="$(CXX)" LINK="$(LINK)" \
+	         BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+	                     python -c "print raw_input().capitalize()") \
+	         builddir="$(shell pwd)/$(OUTDIR)/$@"
+
+# Test targets.
+check: all
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
+
+$(addsuffix .check,$(MODES)): $$(basename $$@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --mode=$(basename $@)
+
+$(addsuffix .check,$(ARCHES)): $$(basename $$@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch=$(basename $@)
+
+$(CHECKS): $$(basename $$@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch-and-mode=$(basename $@)
+
+# Clean targets. You can clean each architecture individually, or everything.
+$(addsuffix .clean,$(ARCHES)):
+	rm -f $(OUTDIR)/Makefile-$(basename $@)
+	rm -rf $(OUTDIR)/$(basename $@).release
+	rm -rf $(OUTDIR)/$(basename $@).debug
+	find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
+
+clean: $(addsuffix .clean,$(ARCHES))
+
+# GYP file generation targets.
+$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \
+	              -S-ia32 $(GYPFLAGS)
+
+$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
+	              -S-x64 $(GYPFLAGS)
+
+$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
+	              -S-arm $(GYPFLAGS)
+
+# Replaces the old with the new environment file if they're different, which
+# will trigger GYP to regenerate Makefiles.
+$(ENVFILE): $(ENVFILE).new
+	@if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) >/dev/null; \
+	    then rm $(ENVFILE).new; \
+	    else mv $(ENVFILE).new $(ENVFILE); fi
+
+# Stores current GYPFLAGS in a file.
+$(ENVFILE).new:
+	@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new;
+
+# Dependencies.
+dependencies:
+	svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
+	    --revision 1026
diff --git a/SConstruct b/SConstruct
index d92dd02..f9c33ca 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -36,13 +36,6 @@
 sys.path.insert(0, join(root_dir, 'tools'))
 import js2c, utils
 
-# ANDROID_TOP is the top of the Android checkout, fetched from the environment
-# variable 'TOP'.   You will also need to set the CXX, CC, AR and RANLIB
-# environment variables to the cross-compiling tools.
-ANDROID_TOP = os.environ.get('TOP')
-if ANDROID_TOP is None:
-  ANDROID_TOP=""
-
 # ARM_TARGET_LIB is the path to the dynamic library to use on the target
 # machine if cross-compiling to an arm machine. You will also need to set
 # the additional cross-compiling environment variables to the cross compiler.
@@ -58,50 +51,6 @@
 GCC_EXTRA_CCFLAGS = []
 GCC_DTOA_EXTRA_CCFLAGS = []
 
-ANDROID_FLAGS = ['-march=armv7-a',
-                 '-mtune=cortex-a8',
-                 '-mfloat-abi=softfp',
-                 '-mfpu=vfp',
-                 '-fpic',
-                 '-mthumb-interwork',
-                 '-funwind-tables',
-                 '-fstack-protector',
-                 '-fno-short-enums',
-                 '-fmessage-length=0',
-                 '-finline-functions',
-                 '-fno-inline-functions-called-once',
-                 '-fgcse-after-reload',
-                 '-frerun-cse-after-loop',
-                 '-frename-registers',
-                 '-fomit-frame-pointer',
-                 '-finline-limit=64',
-                 '-DCAN_USE_VFP_INSTRUCTIONS=1',
-                 '-DCAN_USE_ARMV7_INSTRUCTIONS=1',
-                 '-DCAN_USE_UNALIGNED_ACCESSES=1',
-                 '-MD']
-
-ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
-                    ANDROID_TOP + '/bionic/libc/include',
-                    ANDROID_TOP + '/bionic/libstdc++/include',
-                    ANDROID_TOP + '/bionic/libc/kernel/common',
-                    ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
-                    ANDROID_TOP + '/bionic/libm/include',
-                    ANDROID_TOP + '/bionic/libm/include/arch/arm',
-                    ANDROID_TOP + '/bionic/libthread_db/include',
-                    ANDROID_TOP + '/frameworks/base/include',
-                    ANDROID_TOP + '/system/core/include']
-
-ANDROID_LINKFLAGS = ['-nostdlib',
-                     '-Bdynamic',
-                     '-Wl,-T,' + ANDROID_TOP + '/build/core/armelf.x',
-                     '-Wl,-dynamic-linker,/system/bin/linker',
-                     '-Wl,--gc-sections',
-                     '-Wl,-z,nocopyreloc',
-                     '-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
-                     ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
-                     ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork/libgcc.a',
-                     ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
-
 LIBRARY_FLAGS = {
   'all': {
     'CPPPATH': [join(root_dir, 'src')],
@@ -111,26 +60,17 @@
     'mode:debug': {
       'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT']
     },
-    'vmstate:on': {
-      'CPPDEFINES':   ['ENABLE_VMSTATE_TRACKING'],
-    },
     'objectprint:on': {
       'CPPDEFINES':   ['OBJECT_PRINT'],
     },
-    'protectheap:on': {
-      'CPPDEFINES':   ['ENABLE_VMSTATE_TRACKING', 'ENABLE_HEAP_PROTECTION'],
-    },
-    'profilingsupport:on': {
-      'CPPDEFINES':   ['ENABLE_VMSTATE_TRACKING', 'ENABLE_LOGGING_AND_PROFILING'],
-    },
     'debuggersupport:on': {
       'CPPDEFINES':   ['ENABLE_DEBUGGER_SUPPORT'],
     },
     'inspector:on': {
       'CPPDEFINES':   ['INSPECTOR'],
     },
-    'fasttls:on': {
-      'CPPDEFINES':   ['V8_FAST_TLS'],
+    'fasttls:off': {
+      'CPPDEFINES':   ['V8_NO_FAST_TLS'],
     },
     'liveobjectlist:on': {
       'CPPDEFINES':   ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
@@ -140,7 +80,7 @@
   'gcc': {
     'all': {
       'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
-      'CXXFLAGS':     ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+      'CXXFLAGS':     ['-fno-rtti', '-fno-exceptions'],
     },
     'visibility:hidden': {
       # Use visibility=default to disable this.
@@ -152,17 +92,10 @@
     'mode:debug': {
       'CCFLAGS':      ['-g', '-O0'],
       'CPPDEFINES':   ['ENABLE_DISASSEMBLER', 'DEBUG'],
-      'os:android': {
-        'CCFLAGS':    ['-mthumb']
-      }
     },
     'mode:release': {
       'CCFLAGS':      ['-O3', '-fomit-frame-pointer', '-fdata-sections',
                        '-ffunction-sections'],
-      'os:android': {
-        'CCFLAGS':    ['-mthumb', '-Os'],
-        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
-      }
     },
     'os:linux': {
       'CCFLAGS':      ['-ansi'] + GCC_EXTRA_CCFLAGS,
@@ -200,14 +133,6 @@
       'CCFLAGS':      ['-DWIN32'],
       'CXXFLAGS':     ['-DWIN32'],
     },
-    'os:android': {
-      'CPPDEFINES':   ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
-                       '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
-      'CCFLAGS':      ANDROID_FLAGS,
-      'WARNINGFLAGS': ['-Wall', '-Wno-unused', '-Werror=return-type',
-                       '-Wstrict-aliasing=2'],
-      'CPPPATH':      ANDROID_INCLUDES,
-    },
     'arch:ia32': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_IA32'],
       'CCFLAGS':      ['-m32'],
@@ -220,6 +145,30 @@
       },
       'unalignedaccesses:off' : {
         'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
+      },
+      'armeabi:soft' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=soft'],
+        }
+      },
+      'armeabi:softfp' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
+        'vfp3:on': {
+          'CPPDEFINES' : ['CAN_USE_VFP_INSTRUCTIONS']
+        },
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=softfp'],
+        }
+      },
+      'armeabi:hard' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1'],
+        'vfp3:on': {
+          'CPPDEFINES' : ['CAN_USE_VFP_INSTRUCTIONS']
+        },
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=hard'],
+        }
       }
     },
     'simulator:arm': {
@@ -258,6 +207,9 @@
       'LINKFLAGS':    ['-m32'],
       'mipsabi:softfloat': {
         'CPPDEFINES':    ['__mips_soft_float=1'],
+      },
+      'mipsabi:hardfloat': {
+        'CPPDEFINES':    ['__mips_hard_float=1'],
       }
     },
     'arch:x64': {
@@ -267,12 +219,15 @@
     },
     'gdbjit:on': {
       'CPPDEFINES':   ['ENABLE_GDB_JIT_INTERFACE']
+    },
+    'compress_startup_data:bz2': {
+      'CPPDEFINES':   ['COMPRESS_STARTUP_DATA_BZ2']
     }
   },
   'msvc': {
     'all': {
       'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
-      'CXXFLAGS':     ['$CCFLAGS', '/GR-', '/Gy'],
+      'CXXFLAGS':     ['/GR-', '/Gy'],
       'CPPDEFINES':   ['WIN32'],
       'LINKFLAGS':    ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
       'CCPDBFLAGS':   ['/Zi']
@@ -352,6 +307,11 @@
     'os:macos': {
       'WARNINGFLAGS': ['-pedantic']
     },
+    'arch:arm': {
+      # This is to silence warnings about ABI changes that some versions of the
+      # CodeSourcery G++ tool chain produce for each occurrence of varargs.
+      'WARNINGFLAGS': ['-Wno-abi']
+    },
     'disassembler:on': {
       'CPPDEFINES':   ['ENABLE_DISASSEMBLER']
     }
@@ -404,6 +364,11 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'compress_startup_data:bz2': {
+      'os:linux': {
+        'LIBS': ['bz2']
+      }
+    },
   },
   'msvc': {
     'all': {
@@ -432,13 +397,20 @@
 CCTEST_EXTRA_FLAGS = {
   'all': {
     'CPPPATH': [join(root_dir, 'src')],
+    'library:shared': {
+      'CPPDEFINES': ['USING_V8_SHARED']
+    },
   },
   'gcc': {
     'all': {
-      'LIBPATH': [abspath('.')]
+      'LIBPATH':      [abspath('.')],
+      'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
+      'CXXFLAGS':     ['-fno-rtti', '-fno-exceptions'],
+      'LINKFLAGS':    ['$CCFLAGS'],
     },
     'os:linux': {
       'LIBS':         ['pthread'],
+      'CCFLAGS':      ['-Wno-unused-but-set-variable'],
     },
     'os:macos': {
       'LIBS':         ['pthread'],
@@ -456,19 +428,6 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32']
     },
-    'os:android': {
-      'CPPDEFINES':   ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
-                       '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
-      'CCFLAGS':      ANDROID_FLAGS,
-      'CPPPATH':      ANDROID_INCLUDES,
-      'LIBPATH':     [ANDROID_TOP + '/out/target/product/generic/obj/lib',
-                      ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
-      'LINKFLAGS':    ANDROID_LINKFLAGS,
-      'LIBS':         ['log', 'c', 'stdc++', 'm', 'gcc'],
-      'mode:release': {
-        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
-      }
-    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
@@ -478,15 +437,12 @@
       'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
       'LIBS': ['winmm', 'ws2_32']
     },
-    'library:shared': {
-      'CPPDEFINES': ['USING_V8_SHARED']
-    },
     'arch:ia32': {
       'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
     },
     'arch:x64': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_X64'],
-      'LINKFLAGS': ['/STACK:2091752']
+      'LINKFLAGS': ['/STACK:2097152']
     },
   }
 }
@@ -495,11 +451,16 @@
 SAMPLE_FLAGS = {
   'all': {
     'CPPPATH': [join(abspath('.'), 'include')],
+    'library:shared': {
+      'CPPDEFINES': ['USING_V8_SHARED']
+    },
   },
   'gcc': {
     'all': {
-      'LIBPATH': ['.'],
-      'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
+      'LIBPATH':      ['.'],
+      'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
+      'CXXFLAGS':     ['-fno-rtti', '-fno-exceptions'],
+      'LINKFLAGS':    ['$CCFLAGS'],
     },
     'os:linux': {
       'LIBS':         ['pthread'],
@@ -512,6 +473,9 @@
       'LIBS':         ['execinfo', 'pthread']
     },
     'os:solaris': {
+      # On Solaris, to get isinf, INFINITY, fpclassify and other macros one
+      # needs to define __C99FEATURES__.
+      'CPPDEFINES': ['__C99FEATURES__'],
       'LIBPATH' :     ['/usr/local/lib'],
       'LIBS':         ['m', 'pthread', 'socket', 'nsl', 'rt'],
       'LINKFLAGS':    ['-mt']
@@ -523,21 +487,29 @@
     'os:win32': {
       'LIBS':         ['winmm', 'ws2_32']
     },
-    'os:android': {
-      'CPPDEFINES':   ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
-                       '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
-      'CCFLAGS':      ANDROID_FLAGS,
-      'CPPPATH':      ANDROID_INCLUDES,
-      'LIBPATH':     [ANDROID_TOP + '/out/target/product/generic/obj/lib',
-                      ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
-      'LINKFLAGS':    ANDROID_LINKFLAGS,
-      'LIBS':         ['log', 'c', 'stdc++', 'm', 'gcc'],
-      'mode:release': {
-        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
-      }
-    },
     'arch:arm': {
-      'LINKFLAGS':   ARM_LINK_FLAGS
+      'LINKFLAGS':   ARM_LINK_FLAGS,
+      'armeabi:soft' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=soft'],
+        }
+      },
+      'armeabi:softfp' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=softfp'],
+        }
+      },
+      'armeabi:hard' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1'],
+        'vfp3:on': {
+          'CPPDEFINES' : ['CAN_USE_VFP_INSTRUCTIONS']
+        },
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=hard'],
+        }
+      }
     },
     'arch:ia32': {
       'CCFLAGS':      ['-m32'],
@@ -549,10 +521,29 @@
     },
     'arch:mips': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_MIPS'],
+      'mips_arch_variant:mips32r2': {
+        'CPPDEFINES':    ['_MIPS_ARCH_MIPS32R2']
+      },
       'simulator:none': {
-        'CCFLAGS':      ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+        'CCFLAGS':      ['-EL'],
         'LINKFLAGS':    ['-EL'],
-        'LDFLAGS':      ['-EL']
+        'mips_arch_variant:mips32r2': {
+          'CCFLAGS':      ['-mips32r2', '-Wa,-mips32r2']
+        },
+        'mips_arch_variant:mips32r1': {
+          'CCFLAGS':      ['-mips32', '-Wa,-mips32']
+        },
+        'library:static': {
+          'LINKFLAGS':    ['-static', '-static-libgcc']
+        },
+        'mipsabi:softfloat': {
+          'CCFLAGS':      ['-msoft-float'],
+          'LINKFLAGS':    ['-msoft-float']
+        },
+        'mipsabi:hardfloat': {
+          'CCFLAGS':      ['-mhard-float'],
+          'LINKFLAGS':    ['-mhard-float']
+        }
       }
     },
     'simulator:arm': {
@@ -570,6 +561,12 @@
       'CCFLAGS':      ['-g', '-O0'],
       'CPPDEFINES':   ['DEBUG']
     },
+    'compress_startup_data:bz2': {
+      'CPPDEFINES':   ['COMPRESS_STARTUP_DATA_BZ2'],
+      'os:linux': {
+        'LIBS':       ['bz2']
+      }
+    },
   },
   'msvc': {
     'all': {
@@ -582,9 +579,6 @@
     'verbose:on': {
       'LINKFLAGS': ['/VERBOSE']
     },
-    'library:shared': {
-      'CPPDEFINES': ['USING_V8_SHARED']
-    },
     'prof:on': {
       'LINKFLAGS': ['/MAP']
     },
@@ -616,7 +610,7 @@
     },
     'arch:x64': {
       'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
-      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
+      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2097152']
     },
     'mode:debug': {
       'CCFLAGS':    ['/Od'],
@@ -635,31 +629,39 @@
 
 PREPARSER_FLAGS = {
   'all': {
-    'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')]
+    'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
+    'library:shared': {
+      'CPPDEFINES': ['USING_V8_SHARED']
+    },
   },
   'gcc': {
     'all': {
-      'LIBPATH': ['.'],
-      'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
+      'LIBPATH':      ['.'],
+      'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
+      'CXXFLAGS':     ['-fno-rtti', '-fno-exceptions'],
+      'LINKFLAGS':    ['$CCFLAGS'],
     },
     'os:win32': {
       'LIBS':         ['winmm', 'ws2_32']
     },
-    'os:android': {
-      'CPPDEFINES':   ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
-                       '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
-      'CCFLAGS':      ANDROID_FLAGS,
-      'CPPPATH':      ANDROID_INCLUDES,
-      'LIBPATH':     [ANDROID_TOP + '/out/target/product/generic/obj/lib',
-                      ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
-      'LINKFLAGS':    ANDROID_LINKFLAGS,
-      'LIBS':         ['log', 'c', 'stdc++', 'm', 'gcc'],
-      'mode:release': {
-        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
-      }
-    },
     'arch:arm': {
-      'LINKFLAGS':   ARM_LINK_FLAGS
+      'LINKFLAGS':   ARM_LINK_FLAGS,
+      'armeabi:soft' : {
+        'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=soft'],
+        }
+      },
+      'armeabi:softfp' : {
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=softfp'],
+        }
+      },
+      'armeabi:hard' : {
+        'simulator:none': {
+          'CCFLAGS':     ['-mfloat-abi=hard'],
+        }
+      }
     },
     'arch:ia32': {
       'CCFLAGS':      ['-m32'],
@@ -705,6 +707,9 @@
       'LINKFLAGS':    ['-m32'],
       'mipsabi:softfloat': {
         'CPPDEFINES':    ['__mips_soft_float=1'],
+      },
+      'mipsabi:hardfloat': {
+        'CPPDEFINES':    ['__mips_hard_float=1'],
       }
     },
     'mode:release': {
@@ -714,6 +719,9 @@
       'CCFLAGS':      ['-g', '-O0'],
       'CPPDEFINES':   ['DEBUG']
     },
+    'os:freebsd': {
+      'LIBPATH' : ['/usr/local/lib'],
+    },
   },
   'msvc': {
     'all': {
@@ -726,9 +734,6 @@
     'verbose:on': {
       'LINKFLAGS': ['/VERBOSE']
     },
-    'library:shared': {
-      'CPPDEFINES': ['USING_V8_SHARED']
-    },
     'prof:on': {
       'LINKFLAGS': ['/MAP']
     },
@@ -760,7 +765,7 @@
     },
     'arch:x64': {
       'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
-      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
+      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2097152']
     },
     'mode:debug': {
       'CCFLAGS':    ['/Od'],
@@ -778,7 +783,19 @@
 
 
 D8_FLAGS = {
+  'all': {
+    'library:shared': {
+      'CPPDEFINES': ['V8_SHARED'],
+      'LIBS': ['v8'],
+      'LIBPATH': ['.']
+    },
+  },
   'gcc': {
+    'all': {
+      'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
+      'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
+      'LINKFLAGS': ['$CCFLAGS'],
+    },
     'console:readline': {
       'LIBS': ['readline']
     },
@@ -798,22 +815,73 @@
     'os:openbsd': {
       'LIBS': ['pthread'],
     },
-    'os:android': {
-      'LIBPATH':     [ANDROID_TOP + '/out/target/product/generic/obj/lib',
-                      ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
-      'LINKFLAGS':    ANDROID_LINKFLAGS,
-      'LIBS':         ['log', 'c', 'stdc++', 'm', 'gcc'],
-    },
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
+    'compress_startup_data:bz2': {
+      'CPPDEFINES':   ['COMPRESS_STARTUP_DATA_BZ2'],
+      'os:linux': {
+        'LIBS': ['bz2']
+      }
+    }
   },
   'msvc': {
     'all': {
       'LIBS': ['winmm', 'ws2_32']
+    },
+    'verbose:off': {
+      'CCFLAGS': ['/nologo'],
+      'LINKFLAGS': ['/NOLOGO']
+    },
+    'verbose:on': {
+      'LINKFLAGS': ['/VERBOSE']
+    },
+    'prof:on': {
+      'LINKFLAGS': ['/MAP']
+    },
+    'mode:release': {
+      'CCFLAGS':   ['/O2'],
+      'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
+      'msvcrt:static': {
+        'CCFLAGS': ['/MT']
+      },
+      'msvcrt:shared': {
+        'CCFLAGS': ['/MD']
+      },
+      'msvcltcg:on': {
+        'CCFLAGS':      ['/GL'],
+        'pgo:off': {
+          'LINKFLAGS':    ['/LTCG'],
+        },
+      },
+      'pgo:instrument': {
+        'LINKFLAGS':    ['/LTCG:PGI']
+      },
+      'pgo:optimize': {
+        'LINKFLAGS':    ['/LTCG:PGO']
+      }
+    },
+    'arch:ia32': {
+      'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'],
+      'LINKFLAGS': ['/MACHINE:X86']
+    },
+    'arch:x64': {
+      'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
+      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2097152']
+    },
+    'mode:debug': {
+      'CCFLAGS':    ['/Od'],
+      'LINKFLAGS':  ['/DEBUG'],
+      'CPPDEFINES': ['DEBUG'],
+      'msvcrt:static': {
+        'CCFLAGS':  ['/MTd']
+      },
+      'msvcrt:shared': {
+        'CCFLAGS':  ['/MDd']
+      }
     }
   }
 }
@@ -883,7 +951,7 @@
     'help': 'the architecture to build for'
   },
   'os': {
-    'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
+    'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin'],
     'guess': GuessOS,
     'help': 'the os to build for'
   },
@@ -920,21 +988,11 @@
     'default': 'static',
     'help': 'the type of library to produce'
   },
-  'vmstate': {
-    'values': ['on', 'off'],
-    'default': 'off',
-    'help': 'enable VM state tracking'
-  },
   'objectprint': {
     'values': ['on', 'off'],
     'default': 'off',
     'help': 'enable object printing'
   },
-  'protectheap': {
-    'values': ['on', 'off'],
-    'default': 'off',
-    'help': 'enable heap protection'
-  },
   'profilingsupport': {
     'values': ['on', 'off'],
     'default': 'on',
@@ -1021,6 +1079,11 @@
     'default': 'off',
     'help': 'select profile guided optimization variant',
   },
+  'armeabi': {
+    'values': ['hard', 'softfp', 'soft'],
+    'default': 'softfp',
+    'help': 'generate calling conventiont according to selected ARM EABI variant'
+  },
   'mipsabi': {
     'values': ['hardfloat', 'softfloat', 'none'],
     'default': 'hardfloat',
@@ -1030,7 +1093,18 @@
     'values': ['mips32r2', 'mips32r1'],
     'default': 'mips32r2',
     'help': 'mips variant'
-  }
+  },
+  'compress_startup_data': {
+    'values': ['off', 'bz2'],
+    'default': 'off',
+    'help': 'compress startup data (snapshot) [Linux only]'
+  },
+  'vfp3': {
+    'values': ['on', 'off'],
+    'default': 'on',
+    'help': 'use vfp3 instructions when building the snapshot [Arm only]'
+  },
+
 }
 
 ALL_OPTIONS = dict(PLATFORM_OPTIONS, **SIMPLE_OPTIONS)
@@ -1139,8 +1213,8 @@
     return False
   if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
     Abort("Profiling on windows only supported for static library.")
-  if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
-    Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
+  if env['gdbjit'] == 'on' and ((env['os'] != 'linux' and env['os'] != 'macos') or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
+    Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux/OSX target.")
   if env['os'] == 'win32' and env['soname'] == 'on':
     Abort("Shared Object soname not applicable for Windows.")
   if env['soname'] == 'on' and env['library'] == 'static':
@@ -1153,6 +1227,8 @@
     print env['arch']
     print env['simulator']
     Abort("Option unalignedaccesses only supported for the ARM architecture.")
+  if env['os'] != 'linux' and env['compress_startup_data'] != 'off':
+    Abort("Startup data compression is only available on Linux")
   for (name, option) in ALL_OPTIONS.iteritems():
     if (not name in env):
       message = ("A value for option %s must be specified (%s)." %
@@ -1254,12 +1330,8 @@
     if 'msvcltcg' in ARGUMENTS:
       print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
     options['msvcltcg'] = 'on'
-    if (options['simulator'] == 'mips' and options['mipsabi'] != 'softfloat'):
-      # Print a warning if soft-float ABI is not selected for mips simulator
-      print "Warning: forcing soft-float mips ABI when running on simulator"
-      options['mipsabi'] = 'softfloat'
-    if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
-      options['mipsabi'] = 'none'
+  if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
+    options['mipsabi'] = 'none'
   if options['liveobjectlist'] == 'on':
     if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
       # Print a warning that liveobjectlist will implicitly enable the debugger
@@ -1337,10 +1409,12 @@
     env['SONAME'] = soname
 
   # Build the object files by invoking SCons recursively.
+  d8_env = Environment(tools=tools)
+  d8_env.Replace(**context.flags['d8'])
   (object_files, shell_files, mksnapshot, preparser_files) = env.SConscript(
     join('src', 'SConscript'),
     build_dir=join('obj', target_id),
-    exports='context tools',
+    exports='context tools d8_env',
     duplicate=False
   )
 
@@ -1361,16 +1435,20 @@
     pdb_name = library_name + '.dll.pdb'
     library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
     preparser_pdb_name = preparser_library_name + '.dll.pdb';
+    preparser_soname = 'lib' + preparser_library_name + '.so';
     preparser_library = env.SharedLibrary(preparser_library_name,
                                           preparser_files,
-                                          PDB=preparser_pdb_name)
+                                          PDB=preparser_pdb_name,
+                                          SONAME=preparser_soname)
   context.library_targets.append(library)
   context.library_targets.append(preparser_library)
 
-  d8_env = Environment(tools=tools)
-  d8_env.Replace(**context.flags['d8'])
   context.ApplyEnvOverrides(d8_env)
-  shell = d8_env.Program('d8' + suffix, object_files + shell_files)
+  if context.options['library'] == 'static':
+    shell = d8_env.Program('d8' + suffix, object_files + shell_files)
+  else:
+    shell = d8_env.Program('d8' + suffix, shell_files)
+    d8_env.Depends(shell, library)
   context.d8_targets.append(shell)
 
   for sample in context.samples:
@@ -1406,7 +1484,7 @@
   preparser_object = preparser_env.SConscript(
     join('preparser', 'SConscript'),
     build_dir=join('obj', 'preparser', target_id),
-    exports='context',
+    exports='context tools',
     duplicate=False
   )
   preparser_name = join('obj', 'preparser', target_id, 'preparser')
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index a6c144f..bdd9c2b 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,5 +1,2 @@
-We are tracking the V8 3.2 release branch as used by the Chrome 12 release branch.
-
-We have synced V8 past the last revision used in Chrome 12, as we continue to take 3.2 updates.
-
-http://v8.googlecode.com/svn/branches/3.2@10110
+V8 3.6.6.19
+http://v8.googlecode.com/svn/branches/3.6@10446
diff --git a/benchmarks/crypto.js b/benchmarks/crypto.js
index ffa69b5..531ad45 100644
--- a/benchmarks/crypto.js
+++ b/benchmarks/crypto.js
@@ -1406,7 +1406,7 @@
 
 // Mix in the current time (w/milliseconds) into the pool
 function rng_seed_time() {
-  // Use pre-computed date to avoid making the benchmark 
+  // Use pre-computed date to avoid making the benchmark
   // results dependent on the current date.
   rng_seed_int(1122926989487);
 }
diff --git a/benchmarks/earley-boyer.js b/benchmarks/earley-boyer.js
index 1be480e..b2328d6 100644
--- a/benchmarks/earley-boyer.js
+++ b/benchmarks/earley-boyer.js
@@ -134,7 +134,7 @@
 /*** META ((export #t)) */
 function sc_any2String(o) {
     return jsstring2string(sc_toDisplayString(o));
-}    
+}
 
 /*** META ((export #t)
            (peephole (infix 2 2 "==="))
@@ -923,7 +923,7 @@
     tmp.cdr = l2;
     return l1;
 }
-    
+
 /*** META ((export #t)) */
 function sc_appendBang() {
     var res = null;
@@ -1163,7 +1163,7 @@
     "us": "\037",
     "sp": "\040",
     "del": "\177"};
-    
+
 sc_Char.prototype.toString = function() {
     return this.val;
 };
@@ -1533,7 +1533,7 @@
     }
     return l1_orig;
 }
-     
+
 /*** META ((export #t)) */
 function sc_forEach(proc, l1) {
     if (l1 === undefined)
@@ -1871,7 +1871,7 @@
 	evalStr += ", arguments[" + i + "]";
     evalStr +=")";
     return eval(evalStr);
-}    
+}
 
 // ======================== RegExp ====================
 /*** META ((export #t)) */
@@ -1883,9 +1883,9 @@
 function sc_pregexpMatch(re, s) {
     var reg = (re instanceof RegExp) ? re : sc_pregexp(re);
     var tmp = reg.exec(sc_string2jsstring(s));
-    
+
     if (tmp == null) return false;
-    
+
     var res = null;
     for (var i = tmp.length-1; i >= 0; i--) {
 	if (tmp[i] !== null) {
@@ -1896,7 +1896,7 @@
     }
     return res;
 }
-   
+
 /*** META ((export #t)) */
 function sc_pregexpReplace(re, s1, s2) {
    var reg;
@@ -1914,7 +1914,7 @@
 
    return jss1.replace(reg, jss2);
 }
-   
+
 /*** META ((export pregexp-replace*)) */
 function sc_pregexpReplaceAll(re, s1, s2) {
    var reg;
@@ -1945,7 +1945,7 @@
 
    return sc_vector2list(tmp);
 }
-   
+
 
 /* =========================================================================== */
 /* Other library stuff */
@@ -2136,7 +2136,7 @@
 sc_ErrorInputPort.prototype.isCharReady = function() {
     return false;
 };
-    
+
 
 /* .............. String port ..........................*/
 
@@ -2200,7 +2200,7 @@
 };
 sc_Tokenizer.prototype.nextToken = function() {
     var port = this.port;
-    
+
     function isNumberChar(c) {
 	return (c >= "0" && c <= "9");
     };
@@ -2280,7 +2280,7 @@
 	else
 	    return new sc_Token(12/*NUMBER*/, res - 0);
     };
-    
+
     function skipWhitespaceAndComments() {
 	var done = false;
 	while (!done) {
@@ -2299,7 +2299,7 @@
 	    }
 	}
     };
-    
+
     function readDot() {
 	if (isWhitespace(port.peekChar()))
 	    return new sc_Token(10/*DOT*/);
@@ -2429,7 +2429,7 @@
 
 	while (true) {
 	    var token = tokenizer.peekToken();
-	    
+
 	    switch (token.type) {
 	    case 2/*CLOSE_PAR*/:
 	    case 4/*CLOSE_BRACE*/:
@@ -2491,7 +2491,7 @@
 	else
 	    throw "bad reference: " + nb;
     };
-    
+
     var tokenizer = this.tokenizer;
 
     var token = tokenizer.readToken();
@@ -2499,7 +2499,7 @@
     // handle error
     if (token.type === 13/*ERROR*/)
 	throw token.val;
-    
+
     switch (token.type) {
     case 1/*OPEN_PAR*/:
     case 3/*OPEN_BRACE*/:
@@ -2550,7 +2550,7 @@
 	port = SC_DEFAULT_IN; // THREAD: shared var...
     var t = port.peekChar();
     return t === SC_EOF_OBJECT? t: new sc_Char(t);
-}    
+}
 /*** META ((export #t)
            (type bool))
 */
@@ -2722,7 +2722,7 @@
 function sc_getOutputString(sp) {
     return sc_jsstring2string(sp.res);
 }
-    
+
 
 function sc_ErrorOutputPort() {
 }
@@ -2852,7 +2852,7 @@
 	p = SC_DEFAULT_OUT;
     p.appendJSString("\n");
 }
-    
+
 /* ------------------ write-char ---------------------------------------------------*/
 
 /*** META ((export #t)) */
@@ -2927,7 +2927,7 @@
     }
 
     var res = "";
-    
+
     if (this[symb] !== undefined) { // implies > 0
 	this[symb + "use"] = true;
 	if (inList)
@@ -2939,10 +2939,10 @@
 
     if (!inList)
 	res += "(";
-    
+
     // print car
     res += sc_genToWriteCircleString(this.car, symb);
-    
+
     if (sc_isPair(this.cdr)) {
 	res += " " + this.cdr.sc_toWriteCircleString(symb, true);
     } else if (this.cdr !== null) {
@@ -3072,7 +3072,7 @@
 	       p.appendJSString(arguments[j].toString(2));
 	       i += 2; j++;
 	       break;
-	       
+
 	    case 37:
 	    case 110:
 	       // %, n
@@ -3186,7 +3186,7 @@
 function sc_number2symbol(x, radix) {
     return sc_SYMBOL_PREFIX + sc_number2jsstring(x, radix);
 }
-    
+
 /*** META ((export number->string integer->string)) */
 var sc_number2string = sc_number2jsstring;
 
diff --git a/benchmarks/regexp.js b/benchmarks/regexp.js
index 71b9e63..9c83142 100644
--- a/benchmarks/regexp.js
+++ b/benchmarks/regexp.js
@@ -33,7 +33,7 @@
 // the popularity of the pages where it occurs and the number of times
 // it is executed while loading each page.  Furthermore the literal
 // letters in the data are encoded using ROT13 in a way that does not
-// affect how the regexps match their input.  Finally the strings are 
+// affect how the regexps match their input.  Finally the strings are
 // scrambled to exercise the regexp engine on different input strings.
 
 
@@ -47,7 +47,7 @@
   regExpBenchmark = new RegExpBenchmark();
   RegExpRun(); // run once to get system initialized
 }
-  
+
 function RegExpRun() {
   regExpBenchmark.run();
 }
@@ -1759,6 +1759,6 @@
       runBlock11();
     }
   }
-  
+
   this.run = run;
 }
diff --git a/build/README.txt b/build/README.txt
index 7cd7e18..ea6287f 100644
--- a/build/README.txt
+++ b/build/README.txt
@@ -5,21 +5,62 @@
 based build system.
 
 To use this a checkout of GYP is needed inside this directory. From the root of
-the V8 project do the following
+the V8 project do the following:
 
 $ svn co http://gyp.googlecode.com/svn/trunk build/gyp
 
-To generate Makefiles and build 32-bit version on Linux:
+Note for the command lines below that Debug is the default configuration,
+so specifying that on the command lines is not required.
 
-$ GYP_DEFINES=target_arch=ia32 build/gyp_v8
-$ make
 
-To generate Makefiles and build 64-bit version on Linux:
+To generate Makefiles on Linux:
+-------------------------------
 
-$ GYP_DEFINES=target_arch=x64 build/gyp_v8
-$ make
+$ build/gyp_v8
 
-To generate Makefiles and build for the arm simulator on Linux:
+This will build makefiles for ia32, x64 and the ARM simulator with names
+Makefile-ia32, Makefile-x64 and Makefile-armu respectively.
 
-$ build/gyp_v8 -I build/arm.gypi
-$ make
+To build and run for ia32 in debug and release version do:
+
+$ make -f Makefile-ia32
+$ out/Debug/shell
+$ make -f Makefile-ia32 BUILDTYPE=Release
+$ out/Release/shell
+
+Change the makefile to build and run for the other architectures.
+
+
+To generate Xcode project files on Mac OS:
+------------------------------------------
+
+$ build/gyp_v8
+
+This will make an Xcode project for the ia32 architecture. To build and run do:
+
+$ xcodebuild -project build/all.xcodeproj
+$ samples/build/Debug/shell
+$ xcodebuild -project build/all.xcodeproj -configuration Release
+$ samples/build/Release/shell
+
+
+To generate Visual Studio solution and project files on Windows:
+----------------------------------------------------------------
+
+On Windows an additional third party component is required. This is cygwin in
+the same version as is used by the Chromium project. This can be checked out
+from the Chromium repository. From the root of the V8 project do the following:
+
+> svn co http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844 third_party/cygwin
+
+To run GYP Python is required and it is recommended to use the same version as
+is used by the Chromium project. This can also be checked out from the Chromium
+repository. From the root of the V8 project do the following:
+
+> svn co http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111 third_party/python_26
+
+Now generate Visual Studio solution and project files for the ia32 architecture:
+
+> third_party\python_26\python build/gyp_v8
+
+Now open build\All.sln in Visual Studio.
diff --git a/build/all.gyp b/build/all.gyp
index 544e2c2..4b2fe52 100644
--- a/build/all.gyp
+++ b/build/all.gyp
@@ -1,4 +1,4 @@
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -8,10 +8,11 @@
       'target_name': 'All',
       'type': 'none',
       'dependencies': [
+        '../preparser/preparser.gyp:*',
         '../samples/samples.gyp:*',
+        '../src/d8.gyp:d8',
         '../test/cctest/cctest.gyp:*',
-        '../src/d8.gyp:*',
-      ]
+      ],
     }
   ]
 }
diff --git a/build/armu.gypi b/build/armu.gypi
index 72eb4d1..d15b8ab 100644
--- a/build/armu.gypi
+++ b/build/armu.gypi
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -27,6 +27,10 @@
 
 {
   'variables': {
+    'target_arch': 'ia32',
     'v8_target_arch': 'arm',
-  }
+    'armv7': 1,
+    'arm_neon': 0,
+    'arm_fpu': 'vfpv3',
+  },
 }
diff --git a/build/common.gypi b/build/common.gypi
index 3b5358e..4e896e0 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -25,58 +25,277 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# Shared definitions for all V8-related targets.
+
 {
   'variables': {
-    'library%': 'static_library',
-    'component%': 'static_library',
-    'visibility%': 'hidden',
-    'variables': {
-      'conditions': [
-        [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-          # This handles the Linux platforms we generally deal with. Anything
-          # else gets passed through, which probably won't work very well; such
-          # hosts should pass an explicit target_arch to gyp.
-          'host_arch%':
-            '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
-        }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
-          'host_arch%': 'ia32',
-        }],
-      ],
-    },
-    'host_arch%': '<(host_arch)',
-    'target_arch%': '<(host_arch)',
+    'use_system_v8%': 0,
+    'msvs_use_common_release': 0,
+    'gcc_version%': 'unknown',
+    'v8_compress_startup_data%': 'off',
     'v8_target_arch%': '<(target_arch)',
+
+    # Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
+    # generated by V8 to do unaligned memory access, and setting it to 'false'
+    # will ensure that the generated code will always do aligned memory
+    # accesses. The default value of 'default' will try to determine the correct
+    # setting. Note that for Intel architectures (ia32 and x64) unaligned memory
+    # access is allowed for all CPUs.
+    'v8_can_use_unaligned_accesses%': 'default',
+
+    # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP
+    # instructions in the V8 generated code. VFP instructions will be enabled
+    # both for the snapshot and for the ARM target. Leaving the default value
+    # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
+    # probing when running on the target.
+    'v8_can_use_vfp_instructions%': 'false',
+
+    # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
+    # EABI calling convention where double arguments are passed in VFP
+    # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
+    # well when compiling for the ARM target.
+    'v8_use_arm_eabi_hardfloat%': 'false',
+
+    'v8_enable_debugger_support%': 1,
+
+    'v8_enable_disassembler%': 0,
+
+    'v8_enable_gdbjit%': 0,
+
+    # Enable profiling support. Only required on Windows.
+    'v8_enable_prof%': 0,
+
+    # Chrome needs this definition unconditionally. For standalone V8 builds,
+    # it's handled in build/standalone.gypi.
+    'want_separate_host_toolset%': 1,
+
+    'v8_use_snapshot%': 'true',
+    'host_os%': '<(OS)',
+    'v8_use_liveobjectlist%': 'false',
+
+    # For a shared library build, results in "libv8-<(soname_version).so".
+    'soname_version%': '',
   },
   'target_defaults': {
-    'default_configuration': 'Debug',
-    'configurations': {
-      'Debug': {
-        'cflags': [ '-g', '-O0' ],
-        'defines': [ 'ENABLE_DISASSEMBLER', 'DEBUG' ],
-      },
-      'Release': {
-        'cflags': [ '-O3', '-fomit-frame-pointer', '-fdata-sections', '-ffunction-sections' ],
-      },
-    },
-  },
-  'conditions': [
-    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
-      'target_defaults': {
-        'cflags': [ '-Wall', '-pthread', '-fno-rtti', '-fno-exceptions' ],
-        'ldflags': [ '-pthread', ],
+    'conditions': [
+      ['v8_enable_debugger_support==1', {
+        'defines': ['ENABLE_DEBUGGER_SUPPORT',],
+      }],
+      ['v8_enable_disassembler==1', {
+        'defines': ['ENABLE_DISASSEMBLER',],
+      }],
+      ['v8_enable_gdbjit==1', {
+        'defines': ['ENABLE_GDB_JIT_INTERFACE',],
+      }],
+      ['OS!="mac"', {
+        # TODO(mark): The OS!="mac" conditional is temporary. It can be
+        # removed once the Mac Chromium build stops setting target_arch to
+        # ia32 and instead sets it to mac. Other checks in this file for
+        # OS=="mac" can be removed at that time as well. This can be cleaned
+        # up once http://crbug.com/44205 is fixed.
+        'conditions': [
+          ['v8_target_arch=="arm"', {
+            'defines': [
+              'V8_TARGET_ARCH_ARM',
+            ],
+            'conditions': [
+              [ 'v8_can_use_unaligned_accesses=="true"', {
+                'defines': [
+                  'CAN_USE_UNALIGNED_ACCESSES=1',
+                ],
+              }],
+              [ 'v8_can_use_unaligned_accesses=="false"', {
+                'defines': [
+                  'CAN_USE_UNALIGNED_ACCESSES=0',
+                ],
+              }],
+              [ 'v8_can_use_vfp_instructions=="true"', {
+                'defines': [
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
+              }],
+              [ 'v8_use_arm_eabi_hardfloat=="true"', {
+                'defines': [
+                  'USE_EABI_HARDFLOAT=1',
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
+                'cflags': [
+                  '-mfloat-abi=hard',
+                ],
+              }, {
+                'defines': [
+                  'USE_EABI_HARDFLOAT=0',
+                ],
+              }],
+              # The ARM assembler assumes the host is 32 bits,
+              # so force building 32-bit host tools.
+              ['host_arch=="x64"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'cflags': ['-m32'],
+                    'ldflags': ['-m32'],
+                  }],
+                ],
+              }],
+            ],
+          }],
+          ['v8_target_arch=="ia32"', {
+            'defines': [
+              'V8_TARGET_ARCH_IA32',
+            ],
+          }],
+          ['v8_target_arch=="mips"', {
+            'defines': [
+              'V8_TARGET_ARCH_MIPS',
+            ],
+          }],
+          ['v8_target_arch=="x64"', {
+            'defines': [
+              'V8_TARGET_ARCH_X64',
+            ],
+          }],
+        ],
+      }],
+      ['v8_use_liveobjectlist=="true"', {
+        'defines': [
+          'ENABLE_DEBUGGER_SUPPORT',
+          'INSPECTOR',
+          'OBJECT_PRINT',
+          'LIVEOBJECTLIST',
+        ],
+      }],
+      ['v8_compress_startup_data=="bz2"', {
+        'defines': [
+          'COMPRESS_STARTUP_DATA_BZ2',
+        ],
+      }],
+      ['OS=="win" and v8_enable_prof==1', {
+        'msvs_settings': {
+          'VCLinkerTool': {
+            'GenerateMapFile': 'true',
+          },
+        },
+      }],
+      ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
         'conditions': [
           [ 'target_arch=="ia32"', {
             'cflags': [ '-m32' ],
             'ldflags': [ '-m32' ],
           }],
-          [ 'OS=="linux"', {
-            'cflags': [ '-ansi' ],
+        ],
+      }],
+    ],
+    'configurations': {
+      'Debug': {
+        'defines': [
+          'DEBUG',
+          'ENABLE_DISASSEMBLER',
+          'V8_ENABLE_CHECKS',
+          'OBJECT_PRINT',
+        ],
+        'msvs_settings': {
+          'VCCLCompilerTool': {
+            'Optimization': '0',
+
+            'conditions': [
+              ['OS=="win" and component=="shared_library"', {
+                'RuntimeLibrary': '3',  # /MDd
+              }, {
+                'RuntimeLibrary': '1',  # /MTd
+              }],
+            ],
+          },
+          'VCLinkerTool': {
+            'LinkIncremental': '2',
+            # For future reference, the stack size needs to be increased
+            # when building for Windows 64-bit, otherwise some test cases
+            # can cause stack overflow.
+            # 'StackReserveSize': '297152',
+          },
+        },
+        'conditions': [
+          ['OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-I/usr/local/include' ],
           }],
-          [ 'visibility=="hidden"', {
-            'cflags': [ '-fvisibility=hidden' ],
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+                        '-Wnon-virtual-dtor' ],
           }],
         ],
       },
-    }],
-  ],
+      'Release': {
+        'conditions': [
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            'cflags!': [
+              '-O2',
+              '-Os',
+            ],
+            'cflags': [
+              '-fdata-sections',
+              '-ffunction-sections',
+              '-fomit-frame-pointer',
+              '-O3',
+            ],
+            'conditions': [
+              [ 'gcc_version==44', {
+                'cflags': [
+                  # Avoid crashes with gcc 4.4 in the v8 test suite.
+                  '-fno-tree-vrp',
+                ],
+              }],
+            ],
+          }],
+          ['OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-I/usr/local/include' ],
+          }],
+          ['OS=="mac"', {
+            'xcode_settings': {
+              'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
+
+              # -fstrict-aliasing.  Mainline gcc
+              # enables this at -O2 and above,
+              # but Apple gcc does not unless it
+              # is specified explicitly.
+              'GCC_STRICT_ALIASING': 'YES',
+            },
+          }],
+          ['OS=="win"', {
+            'msvs_configuration_attributes': {
+              'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
+              'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
+              'CharacterSet': '1',
+            },
+            'msvs_settings': {
+              'VCCLCompilerTool': {
+                'Optimization': '2',
+                'InlineFunctionExpansion': '2',
+                'EnableIntrinsicFunctions': 'true',
+                'FavorSizeOrSpeed': '0',
+                'OmitFramePointers': 'true',
+                'StringPooling': 'true',
+
+                'conditions': [
+                  ['OS=="win" and component=="shared_library"', {
+                    'RuntimeLibrary': '2',  #/MD
+                  }, {
+                    'RuntimeLibrary': '0',  #/MT
+                  }],
+                ],
+              },
+              'VCLinkerTool': {
+                'LinkIncremental': '1',
+                'OptimizeReferences': '2',
+                'OptimizeForWindows98': '1',
+                'EnableCOMDATFolding': '2',
+                # For future reference, the stack size needs to be
+                # increased when building for Windows 64-bit, otherwise
+                # some test cases can cause stack overflow.
+                # 'StackReserveSize': '297152',
+              },
+            },
+          }],
+        ],
+      },
+    },
+  },
 }
diff --git a/build/gyp_v8 b/build/gyp_v8
index 0c2221e..dfdbe3f 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -38,9 +38,13 @@
 script_dir = os.path.dirname(__file__)
 v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
 
+sys.path.insert(0, os.path.join(v8_root, 'tools'))
+import utils
+
 sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
 import gyp
 
+
 def apply_gyp_environment(file_path=None):
   """
   Reads in a *.gyp_env file and applies the valid keys to os.environ.
@@ -68,6 +72,7 @@
       else:
         os.environ[var] = val
 
+
 def additional_include_files(args=[]):
   """
   Returns a list of additional (.gypi) files to include, without
@@ -87,8 +92,8 @@
     if os.path.realpath(path) not in specified_includes:
       result.append(path)
 
-  # Always include common.gypi & features_override.gypi
-  AddInclude(os.path.join(script_dir, 'common.gypi'))
+  # Always include standalone.gypi
+  AddInclude(os.path.join(script_dir, 'standalone.gypi'))
 
   # Optionally add supplemental .gypi files if present.
   supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -97,6 +102,14 @@
 
   return result
 
+
+def run_gyp(args):
+  rc = gyp.main(args)
+  if rc != 0:
+    print 'Error running GYP'
+    sys.exit(rc)
+
+
 if __name__ == '__main__':
   args = sys.argv[1:]
 
@@ -141,5 +154,20 @@
   print 'Updating projects from gyp files...'
   sys.stdout.flush()
 
-  # Off we go...
-  sys.exit(gyp.main(args))
+  # Generate for the architectures supported on the given platform.
+  gyp_args = list(args)
+  gyp_args.append('-Dtarget_arch=ia32')
+  if utils.GuessOS() == 'linux':
+    gyp_args.append('-S-ia32')
+  run_gyp(gyp_args)
+
+  if utils.GuessOS() == 'linux':
+    gyp_args = list(args)
+    gyp_args.append('-Dtarget_arch=x64')
+    gyp_args.append('-S-x64')
+    run_gyp(gyp_args)
+
+    gyp_args = list(args)
+    gyp_args.append('-I' + v8_root + '/build/armu.gypi')
+    gyp_args.append('-S-armu')
+    run_gyp(gyp_args)
diff --git a/build/standalone.gypi b/build/standalone.gypi
new file mode 100644
index 0000000..cb5e133
--- /dev/null
+++ b/build/standalone.gypi
@@ -0,0 +1,196 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Definitions to be used when building stand-alone V8 binaries.
+
+{
+  'variables': {
+    'library%': 'static_library',
+    'component%': 'static_library',
+    'visibility%': 'hidden',
+    'msvs_multi_core_compile%': '1',
+    'variables': {
+      'variables': {
+        'conditions': [
+          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            # This handles the Linux platforms we generally deal with. Anything
+            # else gets passed through, which probably won't work very well; such
+            # hosts should pass an explicit target_arch to gyp.
+            'host_arch%':
+              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
+          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+            'host_arch%': 'ia32',
+          }],
+        ],
+      },
+      'host_arch%': '<(host_arch)',
+      'target_arch%': '<(host_arch)',
+      'v8_target_arch%': '<(target_arch)',
+    },
+    'host_arch%': '<(host_arch)',
+    'target_arch%': '<(target_arch)',
+    'v8_target_arch%': '<(v8_target_arch)',
+    'conditions': [
+      ['(v8_target_arch=="arm" and host_arch!="arm") or \
+        (v8_target_arch=="x64" and host_arch!="x64")', {
+        'want_separate_host_toolset': 1,
+      }, {
+        'want_separate_host_toolset': 0,
+      }],
+    ],
+  },
+  'target_defaults': {
+    'default_configuration': 'Debug',
+    'configurations': {
+      'Debug': {
+        'cflags': [ '-g', '-O0' ],
+      },
+    },
+  },
+  'conditions': [
+    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+      'target_defaults': {
+        'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+                    '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
+                    '-fno-exceptions', '-pedantic' ],
+        'ldflags': [ '-pthread', ],
+        'conditions': [
+          [ 'OS=="linux"', {
+            'cflags': [ '-ansi' ],
+          }],
+          [ 'visibility=="hidden"', {
+            'cflags': [ '-fvisibility=hidden' ],
+          }],
+          [ 'component=="shared_library"', {
+            'cflags': [ '-fPIC', ],
+          }],
+        ],
+      },
+    }],  # 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"'
+    ['OS=="win"', {
+      'target_defaults': {
+        'defines': [
+          'WIN32',
+          '_CRT_SECURE_NO_DEPRECATE',
+          '_CRT_NONSTDC_NO_DEPRECATE',
+        ],
+        'conditions': [
+          ['component=="static_library"', {
+            'defines': [
+              '_HAS_EXCEPTIONS=0',
+            ],
+          }],
+        ],
+        'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
+        'msvs_disabled_warnings': [4355, 4800],
+        'msvs_settings': {
+          'VCCLCompilerTool': {
+            'MinimalRebuild': 'false',
+            'BufferSecurityCheck': 'true',
+            'EnableFunctionLevelLinking': 'true',
+            'RuntimeTypeInfo': 'false',
+            'WarningLevel': '3',
+            'WarnAsError': 'true',
+            'DebugInformationFormat': '3',
+            'Detect64BitPortabilityProblems': 'false',
+            'conditions': [
+              [ 'msvs_multi_core_compile', {
+                'AdditionalOptions': ['/MP'],
+              }],
+              ['component=="shared_library"', {
+                'ExceptionHandling': '1',  # /EHsc
+              }, {
+                'ExceptionHandling': '0',
+              }],
+            ],
+          },
+          'VCLibrarianTool': {
+            'AdditionalOptions': ['/ignore:4221'],
+          },
+          'VCLinkerTool': {
+            'AdditionalDependencies': [
+              'ws2_32.lib',
+            ],
+            'GenerateDebugInformation': 'true',
+            'MapFileName': '$(OutDir)\\$(TargetName).map',
+            'ImportLibrary': '$(OutDir)\\lib\\$(TargetName).lib',
+            'FixedBaseAddress': '1',
+            # LinkIncremental values:
+            #   0 == default
+            #   1 == /INCREMENTAL:NO
+            #   2 == /INCREMENTAL
+            'LinkIncremental': '1',
+            # SubSystem values:
+            #   0 == not set
+            #   1 == /SUBSYSTEM:CONSOLE
+            #   2 == /SUBSYSTEM:WINDOWS
+            'SubSystem': '1',
+          },
+        },
+      },
+    }],  # OS=="win"
+    ['OS=="mac"', {
+      'target_defaults': {
+        'xcode_settings': {
+          'ALWAYS_SEARCH_USER_PATHS': 'NO',
+          'GCC_C_LANGUAGE_STANDARD': 'ansi',        # -ansi
+          'GCC_CW_ASM_SYNTAX': 'NO',                # No -fasm-blocks
+          'GCC_DYNAMIC_NO_PIC': 'NO',               # No -mdynamic-no-pic
+                                                    # (Equivalent to -fPIC)
+          'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',        # -fno-exceptions
+          'GCC_ENABLE_CPP_RTTI': 'NO',              # -fno-rtti
+          'GCC_ENABLE_PASCAL_STRINGS': 'NO',        # No -mpascal-strings
+          # GCC_INLINES_ARE_PRIVATE_EXTERN maps to -fvisibility-inlines-hidden
+          'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
+          'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',      # -fvisibility=hidden
+          'GCC_THREADSAFE_STATICS': 'NO',           # -fno-threadsafe-statics
+          'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES',    # -Werror
+          'GCC_VERSION': '4.2',
+          'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',  # -Wnewline-eof
+          'MACOSX_DEPLOYMENT_TARGET': '10.4',       # -mmacosx-version-min=10.4
+          'PREBINDING': 'NO',                       # No -Wl,-prebind
+          'USE_HEADERMAP': 'NO',
+          'OTHER_CFLAGS': [
+            '-fno-strict-aliasing',
+          ],
+          'WARNING_CFLAGS': [
+            '-Wall',
+            '-Wendif-labels',
+            '-W',
+            '-Wno-unused-parameter',
+            '-Wnon-virtual-dtor',
+          ],
+        },
+        'target_conditions': [
+          ['_type!="static_library"', {
+            'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
+          }],
+        ],  # target_conditions
+      },  # target_defaults
+    }],  # OS=="mac"
+  ],
+}
diff --git a/include/v8-debug.h b/include/v8-debug.h
old mode 100755
new mode 100644
index 0bdff84..504cbfe
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -127,7 +127,7 @@
     /**
      * Get the context active when the debug event happened. Note this is not
      * the current active context as the JavaScript part of the debugger is
-     * running in it's own context which is entered at this point.
+     * running in its own context which is entered at this point.
      */
     virtual Handle<Context> GetEventContext() const = 0;
 
@@ -164,12 +164,13 @@
     /**
      * Get the context active when the debug event happened. Note this is not
      * the current active context as the JavaScript part of the debugger is
-     * running in it's own context which is entered at this point.
+     * running in its own context which is entered at this point.
      */
     virtual Handle<Context> GetEventContext() const = 0;
 
     /**
-     * Client data passed with the corresponding callbak whet it was registered.
+     * Client data passed with the corresponding callback when it was
+     * registered.
      */
     virtual Handle<Value> GetCallbackData() const = 0;
 
@@ -310,7 +311,7 @@
   * get access to information otherwise not available during normal JavaScript
   * execution e.g. details on stack frames. Receiver of the function call will
   * be the debugger context global object, however this is a subject to change.
-  * The following example show a JavaScript function which when passed to
+  * The following example shows a JavaScript function which when passed to
   * v8::Debug::Call will return the current line of JavaScript execution.
   *
   * \code
@@ -352,7 +353,7 @@
    * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
    * to reading and processing debug messages;
    * 3. V8 is not running at all or has called some long-working C++ function;
-   * by default it means that processing of all debug message will be deferred
+   * by default it means that processing of all debug messages will be deferred
    * until V8 gets control again; however, embedding application may improve
    * this by manually calling this method.
    *
@@ -376,7 +377,7 @@
   static void ProcessDebugMessages();
 
   /**
-   * Debugger is running in it's own context which is entered while debugger
+   * Debugger is running in its own context which is entered while debugger
    * messages are being dispatched. This is an explicit getter for this
    * debugger context. Note that the content of the debugger context is subject
    * to change.
diff --git a/include/v8-preparser.h b/include/v8-preparser.h
index 4d46bad..f11d05e 100644
--- a/include/v8-preparser.h
+++ b/include/v8-preparser.h
@@ -66,7 +66,8 @@
 
 namespace v8 {
 
-
+// The result of preparsing is either a stack overflow error, or an opaque
+// blob of data that can be passed back into the parser.
 class V8EXPORT PreParserData {
  public:
   PreParserData(size_t size, const uint8_t* data)
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index db56e26..f67646f 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -206,7 +206,7 @@
 
 /**
  * HeapSnapshotEdge represents a directed connection between heap
- * graph nodes: from retaners to retained nodes.
+ * graph nodes: from retainers to retained nodes.
  */
 class V8EXPORT HeapGraphEdge {
  public:
@@ -269,17 +269,10 @@
 
   /**
    * Returns node id. For the same heap object, the id remains the same
-   * across all snapshots. Not applicable to aggregated heap snapshots
-   * as they only contain aggregated instances.
+   * across all snapshots.
    */
   uint64_t GetId() const;
 
-  /**
-   * Returns the number of instances. Only applicable to aggregated
-   * heap snapshots.
-   */
-  int GetInstancesCount() const;
-
   /** Returns node's own size, in bytes. */
   int GetSelfSize() const;
 
@@ -314,6 +307,12 @@
    * path from the snapshot root to the current node.
    */
   const HeapGraphNode* GetDominatorNode() const;
+
+  /**
+   * Finds and returns a value from the heap corresponding to this node,
+   * if the value is still reachable.
+   */
+  Handle<Value> GetHeapValue() const;
 };
 
 
@@ -323,9 +322,7 @@
 class V8EXPORT HeapSnapshot {
  public:
   enum Type {
-    kFull = 0,       // Heap snapshot with all instances and references.
-    kAggregated = 1  // Snapshot doesn't contain individual heap entries,
-                     // instead they are grouped by constructor name.
+    kFull = 0  // Heap snapshot with all instances and references.
   };
   enum SerializationFormat {
     kJSON = 0  // See format description near 'Serialize' method.
@@ -346,6 +343,12 @@
   /** Returns a node by its id. */
   const HeapGraphNode* GetNodeById(uint64_t id) const;
 
+  /** Returns total nodes count in the snapshot. */
+  int GetNodesCount() const;
+
+  /** Returns a node by index. */
+  const HeapGraphNode* GetNode(int index) const;
+
   /**
    * Deletes the snapshot and removes it from HeapProfiler's list.
    * All pointers to nodes, edges and paths previously returned become
@@ -357,7 +360,7 @@
    * Prepare a serialized representation of the snapshot. The result
    * is written into the stream provided in chunks of specified size.
    * The total length of the serialized snapshot is unknown in
-   * advance, it is can be roughly equal to JS heap size (that means,
+   * advance, it can be roughly equal to JS heap size (that means,
    * it can be really big - tens of megabytes).
    *
    * For the JSON format, heap contents are represented as an object
diff --git a/include/v8.h b/include/v8.h
index d15d024..4b7f6e7 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -80,9 +80,11 @@
 
 class Context;
 class String;
+class StringObject;
 class Value;
 class Utils;
 class Number;
+class NumberObject;
 class Object;
 class Array;
 class Int32;
@@ -90,6 +92,7 @@
 class External;
 class Primitive;
 class Boolean;
+class BooleanObject;
 class Integer;
 class Function;
 class Date;
@@ -115,7 +118,7 @@
 }
 
 
-// --- W e a k  H a n d l e s
+// --- Weak Handles ---
 
 
 /**
@@ -131,7 +134,7 @@
                                       void* parameter);
 
 
-// --- H a n d l e s ---
+// --- Handles ---
 
 #define TYPE_CHECK(T, S)                                       \
   while (false) {                                              \
@@ -159,29 +162,28 @@
  *
  * It is safe to extract the object stored in the handle by
  * dereferencing the handle (for instance, to extract the Object* from
- * an Handle<Object>); the value will still be governed by a handle
+ * a Handle<Object>); the value will still be governed by a handle
  * behind the scenes and the same rules apply to these values as to
  * their handles.
  */
 template <class T> class Handle {
  public:
-
   /**
    * Creates an empty handle.
    */
-  inline Handle();
+  inline Handle() : val_(0) {}
 
   /**
    * Creates a new handle for the specified value.
    */
-  inline explicit Handle(T* val) : val_(val) { }
+  inline explicit Handle(T* val) : val_(val) {}
 
   /**
    * Creates a handle for the contents of the specified handle.  This
    * constructor allows you to pass handles as arguments by value and
    * to assign between handles.  However, if you try to assign between
    * incompatible handles, for instance from a Handle<String> to a
-   * Handle<Number> it will cause a compiletime error.  Assigning
+   * Handle<Number> it will cause a compile-time error.  Assigning
    * between compatible handles, for instance assigning a
    * Handle<String> to a variable declared as Handle<Value>, is legal
    * because String is a subclass of Value.
@@ -201,14 +203,14 @@
    */
   inline bool IsEmpty() const { return val_ == 0; }
 
-  inline T* operator->() const { return val_; }
-
-  inline T* operator*() const { return val_; }
-
   /**
    * Sets the handle to be empty. IsEmpty() will then return true.
    */
-  inline void Clear() { this->val_ = 0; }
+  inline void Clear() { val_ = 0; }
+
+  inline T* operator->() const { return val_; }
+
+  inline T* operator*() const { return val_; }
 
   /**
    * Checks whether two handles are the same.
@@ -312,7 +314,6 @@
  */
 template <class T> class Persistent : public Handle<T> {
  public:
-
   /**
    * Creates an empty persistent handle that doesn't point to any
    * storage cell.
@@ -325,7 +326,7 @@
    * handles as arguments by value and to assign between persistent
    * handles.  However, attempting to assign between incompatible
    * persistent handles, for instance from a Persistent<String> to a
-   * Persistent<Number> will cause a compiletime error.  Assigning
+   * Persistent<Number> will cause a compile-time error.  Assigning
    * between compatible persistent handles, for instance assigning a
    * Persistent<String> to a variable declared as Persistent<Value>,
    * is allowed as String is a subclass of Value.
@@ -371,7 +372,7 @@
   /**
    * Releases the storage cell referenced by this persistent handle.
    * Does not remove the reference to the cell from any handles.
-   * This handle's reference, and any any other references to the storage
+   * This handle's reference, and any other references to the storage
    * cell remain and IsEmpty will still return false.
    */
   inline void Dispose();
@@ -388,6 +389,15 @@
   inline void ClearWeak();
 
   /**
+   * Marks the reference to this object independent. Garbage collector
+   * is free to ignore any object groups containing this object.
+   * Weak callback for an independent handle should not
+   * assume that it will be preceded by a global GC prologue callback
+   * or followed by a global GC epilogue callback.
+   */
+  inline void MarkIndependent();
+
+  /**
    *Checks if the handle holds the only reference to an object.
    */
   inline bool IsNearDeath() const;
@@ -483,7 +493,7 @@
 };
 
 
-// --- S p e c i a l   o b j e c t s ---
+// --- Special objects ---
 
 
 /**
@@ -577,7 +587,6 @@
  */
 class V8EXPORT Script {
  public:
-
   /**
    * Compiles the specified script (context-independent).
    *
@@ -775,7 +784,7 @@
   Local<Array> AsArray();
 
   /**
-   * Grab a snapshot of the the current JavaScript execution stack.
+   * Grab a snapshot of the current JavaScript execution stack.
    *
    * \param frame_limit The maximum number of stack frames we want to capture.
    * \param options Enumerates the set of things we will capture for each
@@ -834,14 +843,14 @@
   bool IsEval() const;
 
   /**
-   * Returns whther or not the associated function is called as a
+   * Returns whether or not the associated function is called as a
    * constructor via "new".
    */
   bool IsConstructor() const;
 };
 
 
-// --- V a l u e ---
+// --- Value ---
 
 
 /**
@@ -849,7 +858,6 @@
  */
 class Value : public Data {
  public:
-
   /**
    * Returns true if this value is the undefined value.  See ECMA-262
    * 4.3.10.
@@ -924,6 +932,26 @@
   V8EXPORT bool IsDate() const;
 
   /**
+   * Returns true if this value is a Boolean object.
+   */
+  V8EXPORT bool IsBooleanObject() const;
+
+  /**
+   * Returns true if this value is a Number object.
+   */
+  V8EXPORT bool IsNumberObject() const;
+
+  /**
+   * Returns true if this value is a String object.
+   */
+  V8EXPORT bool IsStringObject() const;
+
+  /**
+   * Returns true if this value is a NativeError.
+   */
+  V8EXPORT bool IsNativeError() const;
+
+  /**
    * Returns true if this value is a RegExp.
    */
   V8EXPORT bool IsRegExp() const;
@@ -981,7 +1009,6 @@
  */
 class String : public Primitive {
  public:
-
   /**
    * Returns the number of characters in this string.
    */
@@ -1012,29 +1039,33 @@
    * \param length The number of characters to copy from the string.  For
    *    WriteUtf8 the number of bytes in the buffer.
    * \param nchars_ref The number of characters written, can be NULL.
-   * \param hints Various hints that might affect performance of this or
+   * \param options Various options that might affect performance of this or
    *    subsequent operations.
    * \return The number of characters copied to the buffer excluding the null
    *    terminator.  For WriteUtf8: The number of bytes copied to the buffer
-   *    including the null terminator.
+   *    including the null terminator (if written).
    */
-  enum WriteHints {
-    NO_HINTS = 0,
-    HINT_MANY_WRITES_EXPECTED = 1
+  enum WriteOptions {
+    NO_OPTIONS = 0,
+    HINT_MANY_WRITES_EXPECTED = 1,
+    NO_NULL_TERMINATION = 2
   };
 
+  // 16-bit character codes.
   V8EXPORT int Write(uint16_t* buffer,
                      int start = 0,
                      int length = -1,
-                     WriteHints hints = NO_HINTS) const;  // UTF-16
+                     int options = NO_OPTIONS) const;
+  // ASCII characters.
   V8EXPORT int WriteAscii(char* buffer,
                           int start = 0,
                           int length = -1,
-                          WriteHints hints = NO_HINTS) const;  // ASCII
+                          int options = NO_OPTIONS) const;
+  // UTF-8 encoded characters.
   V8EXPORT int WriteUtf8(char* buffer,
                          int length = -1,
                          int* nchars_ref = NULL,
-                         WriteHints hints = NO_HINTS) const;  // UTF-8
+                         int options = NO_OPTIONS) const;
 
   /**
    * A zero length string.
@@ -1047,7 +1078,7 @@
   V8EXPORT bool IsExternal() const;
 
   /**
-   * Returns true if the string is both external and ascii
+   * Returns true if the string is both external and ASCII
    */
   V8EXPORT bool IsExternalAscii() const;
 
@@ -1104,11 +1135,11 @@
   };
 
   /**
-   * An ExternalAsciiStringResource is a wrapper around an ascii
+   * An ExternalAsciiStringResource is a wrapper around an ASCII
    * string buffer that resides outside V8's heap. Implement an
    * ExternalAsciiStringResource to manage the life cycle of the
    * underlying buffer.  Note that the string data must be immutable
-   * and that the data must be strict 7-bit ASCII, not Latin1 or
+   * and that the data must be strict (7-bit) ASCII, not Latin-1 or
    * UTF-8, which would require special treatment internally in the
    * engine and, in the case of UTF-8, do not allow efficient indexing.
    * Use String::New or convert to 16 bit data for non-ASCII.
@@ -1124,7 +1155,7 @@
     virtual ~ExternalAsciiStringResource() {}
     /** The string data from the underlying buffer.*/
     virtual const char* data() const = 0;
-    /** The number of ascii characters in the string.*/
+    /** The number of ASCII characters in the string.*/
     virtual size_t length() const = 0;
    protected:
     ExternalAsciiStringResource() {}
@@ -1137,7 +1168,7 @@
   inline ExternalStringResource* GetExternalStringResource() const;
 
   /**
-   * Get the ExternalAsciiStringResource for an external ascii string.
+   * Get the ExternalAsciiStringResource for an external ASCII string.
    * Returns NULL if IsExternalAscii() doesn't return true.
    */
   V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
@@ -1145,9 +1176,9 @@
   static inline String* Cast(v8::Value* obj);
 
   /**
-   * Allocates a new string from either utf-8 encoded or ascii data.
+   * Allocates a new string from either UTF-8 encoded or ASCII data.
    * The second parameter 'length' gives the buffer length.
-   * If the data is utf-8 encoded, the caller must
+   * If the data is UTF-8 encoded, the caller must
    * be careful to supply the length parameter.
    * If it is not given, the function calls
    * 'strlen' to determine the buffer length, it might be
@@ -1155,7 +1186,7 @@
    */
   V8EXPORT static Local<String> New(const char* data, int length = -1);
 
-  /** Allocates a new string from utf16 data.*/
+  /** Allocates a new string from 16-bit character codes.*/
   V8EXPORT static Local<String> New(const uint16_t* data, int length = -1);
 
   /** Creates a symbol. Returns one if it exists already.*/
@@ -1182,7 +1213,7 @@
    * Associate an external string resource with this string by transforming it
    * in place so that existing references to this string in the JavaScript heap
    * will use the external string resource. The external string resource's
-   * character contents needs to be equivalent to this string.
+   * character contents need to be equivalent to this string.
    * Returns true if the string has been changed to be an external string.
    * The string is not modified if the operation fails. See NewExternal for
    * information on the lifetime of the resource.
@@ -1190,7 +1221,7 @@
   V8EXPORT bool MakeExternal(ExternalStringResource* resource);
 
   /**
-   * Creates a new external string using the ascii data defined in the given
+   * Creates a new external string using the ASCII data defined in the given
    * resource. When the external string is no longer live on V8's heap the
    * resource will be disposed by calling its Dispose method. The caller of
    * this function should not otherwise delete or modify the resource. Neither
@@ -1204,7 +1235,7 @@
    * Associate an external string resource with this string by transforming it
    * in place so that existing references to this string in the JavaScript heap
    * will use the external string resource. The external string resource's
-   * character contents needs to be equivalent to this string.
+   * character contents need to be equivalent to this string.
    * Returns true if the string has been changed to be an external string.
    * The string is not modified if the operation fails. See NewExternal for
    * information on the lifetime of the resource.
@@ -1216,18 +1247,18 @@
    */
   V8EXPORT bool CanMakeExternal();
 
-  /** Creates an undetectable string from the supplied ascii or utf-8 data.*/
+  /** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
   V8EXPORT static Local<String> NewUndetectable(const char* data,
                                                 int length = -1);
 
-  /** Creates an undetectable string from the supplied utf-16 data.*/
+  /** Creates an undetectable string from the supplied 16-bit character codes.*/
   V8EXPORT static Local<String> NewUndetectable(const uint16_t* data,
                                                 int length = -1);
 
   /**
-   * Converts an object to a utf8-encoded character array.  Useful if
+   * Converts an object to a UTF-8-encoded character array.  Useful if
    * you want to print the object.  If conversion to a string fails
-   * (eg. due to an exception in the toString() method of the object)
+   * (e.g. due to an exception in the toString() method of the object)
    * then the length() method returns 0 and the * operator returns
    * NULL.
    */
@@ -1248,7 +1279,7 @@
   };
 
   /**
-   * Converts an object to an ascii string.
+   * Converts an object to an ASCII string.
    * Useful if you want to print the object.
    * If conversion to a string fails (eg. due to an exception in the toString()
    * method of the object) then the length() method returns 0 and the * operator
@@ -1308,7 +1339,7 @@
   static inline Number* Cast(v8::Value* obj);
  private:
   V8EXPORT Number();
-  static void CheckCast(v8::Value* obj);
+  V8EXPORT static void CheckCast(v8::Value* obj);
 };
 
 
@@ -1349,87 +1380,6 @@
 };
 
 
-/**
- * An instance of the built-in Date constructor (ECMA-262, 15.9).
- */
-class Date : public Value {
- public:
-  V8EXPORT static Local<Value> New(double time);
-
-  /**
-   * A specialization of Value::NumberValue that is more efficient
-   * because we know the structure of this object.
-   */
-  V8EXPORT double NumberValue() const;
-
-  static inline Date* Cast(v8::Value* obj);
-
-  /**
-   * Notification that the embedder has changed the time zone,
-   * daylight savings time, or other date / time configuration
-   * parameters.  V8 keeps a cache of various values used for
-   * date / time computation.  This notification will reset
-   * those cached values for the current context so that date /
-   * time configuration changes would be reflected in the Date
-   * object.
-   *
-   * This API should not be called more than needed as it will
-   * negatively impact the performance of date operations.
-   */
-  V8EXPORT static void DateTimeConfigurationChangeNotification();
-
- private:
-  V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
-/**
- * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
- */
-class RegExp : public Value {
- public:
-  /**
-   * Regular expression flag bits. They can be or'ed to enable a set
-   * of flags.
-   */
-  enum Flags {
-    kNone = 0,
-    kGlobal = 1,
-    kIgnoreCase = 2,
-    kMultiline = 4
-  };
-
-  /**
-   * Creates a regular expression from the given pattern string and
-   * the flags bit field. May throw a JavaScript exception as
-   * described in ECMA-262, 15.10.4.1.
-   *
-   * For example,
-   *   RegExp::New(v8::String::New("foo"),
-   *               static_cast<RegExp::Flags>(kGlobal | kMultiline))
-   * is equivalent to evaluating "/foo/gm".
-   */
-  V8EXPORT static Local<RegExp> New(Handle<String> pattern,
-                                    Flags flags);
-
-  /**
-   * Returns the value of the source property: a string representing
-   * the regular expression.
-   */
-  V8EXPORT Local<String> GetSource() const;
-
-  /**
-   * Returns the flags bit field.
-   */
-  V8EXPORT Flags GetFlags() const;
-
-  static inline RegExp* Cast(v8::Value* obj);
-
- private:
-  V8EXPORT static void CheckCast(v8::Value* obj);
-};
-
-
 enum PropertyAttribute {
   None       = 0,
   ReadOnly   = 1 << 0,
@@ -1445,6 +1395,7 @@
   kExternalIntArray,
   kExternalUnsignedIntArray,
   kExternalFloatArray,
+  kExternalDoubleArray,
   kExternalPixelArray
 };
 
@@ -1511,6 +1462,13 @@
 
   V8EXPORT Local<Value> Get(uint32_t index);
 
+  /**
+   * Gets the property attributes of a property which can be None or
+   * any combination of ReadOnly, DontEnum and DontDelete. Returns
+   * None when the property doesn't exist.
+   */
+  V8EXPORT PropertyAttribute GetPropertyAttributes(Handle<Value> key);
+
   // TODO(1245389): Replace the type-specific versions of these
   // functions with generic ones that accept a Handle<Value> key.
   V8EXPORT bool Has(Handle<String> key);
@@ -1541,6 +1499,13 @@
   V8EXPORT Local<Array> GetPropertyNames();
 
   /**
+   * This function has the same functionality as GetPropertyNames but
+   * the returned array doesn't contain the names of properties from
+   * prototype objects.
+   */
+  V8EXPORT Local<Array> GetOwnPropertyNames();
+
+  /**
    * Get the prototype object.  This does not skip objects marked to
    * be skipped by __proto__ and it does not consult the security
    * handler.
@@ -1587,6 +1552,7 @@
   V8EXPORT void SetPointerInInternalField(int index, void* value);
 
   // Testers for local properties.
+  V8EXPORT bool HasOwnProperty(Handle<String> key);
   V8EXPORT bool HasRealNamedProperty(Handle<String> key);
   V8EXPORT bool HasRealIndexedProperty(uint32_t index);
   V8EXPORT bool HasRealNamedCallbackProperty(Handle<String> key);
@@ -1619,8 +1585,8 @@
   V8EXPORT void TurnOnAccessCheck();
 
   /**
-   * Returns the identity hash for this object. The current implemenation uses
-   * a hidden property on the object to store the identity hash.
+   * Returns the identity hash for this object. The current implementation
+   * uses a hidden property on the object to store the identity hash.
    *
    * The return value will never be 0. Also, it is not guaranteed to be
    * unique.
@@ -1685,8 +1651,32 @@
   V8EXPORT ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
   V8EXPORT int GetIndexedPropertiesExternalArrayDataLength();
 
+  /**
+   * Checks whether a callback is set by the
+   * ObjectTemplate::SetCallAsFunctionHandler method.
+   * When an Object is callable this method returns true.
+   */
+  V8EXPORT bool IsCallable();
+
+  /**
+   * Call an Object as a function if a callback is set by the
+   * ObjectTemplate::SetCallAsFunctionHandler method.
+   */
+  V8EXPORT Local<Value> CallAsFunction(Handle<Object> recv,
+                                       int argc,
+                                       Handle<Value> argv[]);
+
+  /**
+   * Call an Object as a constructor if a callback is set by the
+   * ObjectTemplate::SetCallAsFunctionHandler method.
+   * Note: This method behaves like the Function::NewInstance method.
+   */
+  V8EXPORT Local<Value> CallAsConstructor(int argc,
+                                          Handle<Value> argv[]);
+
   V8EXPORT static Local<Object> New();
   static inline Object* Cast(Value* obj);
+
  private:
   V8EXPORT Object();
   V8EXPORT static void CheckCast(Value* obj);
@@ -1723,7 +1713,7 @@
   static inline Array* Cast(Value* obj);
  private:
   V8EXPORT Array();
-  static void CheckCast(Value* obj);
+  V8EXPORT static void CheckCast(Value* obj);
 };
 
 
@@ -1755,6 +1745,144 @@
 
 
 /**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class Date : public Object {
+ public:
+  V8EXPORT static Local<Value> New(double time);
+
+  /**
+   * A specialization of Value::NumberValue that is more efficient
+   * because we know the structure of this object.
+   */
+  V8EXPORT double NumberValue() const;
+
+  static inline Date* Cast(v8::Value* obj);
+
+  /**
+   * Notification that the embedder has changed the time zone,
+   * daylight savings time, or other date / time configuration
+   * parameters.  V8 keeps a cache of various values used for
+   * date / time computation.  This notification will reset
+   * those cached values for the current context so that date /
+   * time configuration changes would be reflected in the Date
+   * object.
+   *
+   * This API should not be called more than needed as it will
+   * negatively impact the performance of date operations.
+   */
+  V8EXPORT static void DateTimeConfigurationChangeNotification();
+
+ private:
+  V8EXPORT static void CheckCast(v8::Value* obj);
+};
+
+
+/**
+ * A Number object (ECMA-262, 4.3.21).
+ */
+class NumberObject : public Object {
+ public:
+  V8EXPORT static Local<Value> New(double value);
+
+  /**
+   * Returns the Number held by the object.
+   */
+  V8EXPORT double NumberValue() const;
+
+  static inline NumberObject* Cast(v8::Value* obj);
+
+ private:
+  V8EXPORT static void CheckCast(v8::Value* obj);
+};
+
+
+/**
+ * A Boolean object (ECMA-262, 4.3.15).
+ */
+class BooleanObject : public Object {
+ public:
+  V8EXPORT static Local<Value> New(bool value);
+
+  /**
+   * Returns the Boolean held by the object.
+   */
+  V8EXPORT bool BooleanValue() const;
+
+  static inline BooleanObject* Cast(v8::Value* obj);
+
+ private:
+  V8EXPORT static void CheckCast(v8::Value* obj);
+};
+
+
+/**
+ * A String object (ECMA-262, 4.3.18).
+ */
+class StringObject : public Object {
+ public:
+  V8EXPORT static Local<Value> New(Handle<String> value);
+
+  /**
+   * Returns the String held by the object.
+   */
+  V8EXPORT Local<String> StringValue() const;
+
+  static inline StringObject* Cast(v8::Value* obj);
+
+ private:
+  V8EXPORT static void CheckCast(v8::Value* obj);
+};
+
+
+/**
+ * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
+ */
+class RegExp : public Object {
+ public:
+  /**
+   * Regular expression flag bits. They can be or'ed to enable a set
+   * of flags.
+   */
+  enum Flags {
+    kNone = 0,
+    kGlobal = 1,
+    kIgnoreCase = 2,
+    kMultiline = 4
+  };
+
+  /**
+   * Creates a regular expression from the given pattern string and
+   * the flags bit field. May throw a JavaScript exception as
+   * described in ECMA-262, 15.10.4.1.
+   *
+   * For example,
+   *   RegExp::New(v8::String::New("foo"),
+   *               static_cast<RegExp::Flags>(kGlobal | kMultiline))
+   * is equivalent to evaluating "/foo/gm".
+   */
+  V8EXPORT static Local<RegExp> New(Handle<String> pattern,
+                                    Flags flags);
+
+  /**
+   * Returns the value of the source property: a string representing
+   * the regular expression.
+   */
+  V8EXPORT Local<String> GetSource() const;
+
+  /**
+   * Returns the flags bit field.
+   */
+  V8EXPORT Flags GetFlags() const;
+
+  static inline RegExp* Cast(v8::Value* obj);
+
+ private:
+  V8EXPORT static void CheckCast(v8::Value* obj);
+};
+
+
+/**
  * A JavaScript value that wraps a C++ void*.  This type of value is
  * mainly used to associate C++ data structures with JavaScript
  * objects.
@@ -1781,7 +1909,7 @@
 };
 
 
-// --- T e m p l a t e s ---
+// --- Templates ---
 
 
 /**
@@ -2107,6 +2235,12 @@
   void SetHiddenPrototype(bool value);
 
   /**
+   * Sets the ReadOnly flag in the attributes of the 'prototype' property
+   * of functions created from this FunctionTemplate to true.
+   */
+  void ReadOnlyPrototype();
+
+  /**
    * Returns true if the given object is an instance of this function
    * template.
    */
@@ -2218,7 +2352,7 @@
    *
    * \param getter The callback to invoke when getting a property.
    * \param setter The callback to invoke when setting a property.
-   * \param query The callback to invoke to check is an object has a property.
+   * \param query The callback to invoke to check if an object has a property.
    * \param deleter The callback to invoke when deleting a property.
    * \param enumerator The callback to invoke to enumerate all the indexed
    *   properties of an object.
@@ -2315,7 +2449,7 @@
 };
 
 
-// --- E x t e n s i o n s ---
+// --- Extensions ---
 
 
 /**
@@ -2367,7 +2501,7 @@
 };
 
 
-// --- S t a t i c s ---
+// --- Statics ---
 
 
 Handle<Primitive> V8EXPORT Undefined();
@@ -2408,7 +2542,7 @@
 bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints);
 
 
-// --- E x c e p t i o n s ---
+// --- Exceptions ---
 
 
 typedef void (*FatalErrorCallback)(const char* location, const char* message);
@@ -2439,7 +2573,7 @@
 };
 
 
-// --- C o u n t e r s  C a l l b a c k s ---
+// --- Counters Callbacks ---
 
 typedef int* (*CounterLookupCallback)(const char* name);
 
@@ -2450,7 +2584,7 @@
 
 typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
 
-// --- M e m o r y  A l l o c a t i o n   C a l l b a c k ---
+// --- Memory Allocation Callback ---
   enum ObjectSpace {
     kObjectSpaceNewSpace = 1 << 0,
     kObjectSpaceOldPointerSpace = 1 << 1,
@@ -2474,12 +2608,20 @@
                                          AllocationAction action,
                                          int size);
 
-// --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
+// --- Failed Access Check Callback ---
 typedef void (*FailedAccessCheckCallback)(Local<Object> target,
                                           AccessType type,
                                           Local<Value> data);
 
-// --- G a r b a g e C o l l e c t i o n  C a l l b a c k s
+// --- AllowCodeGenerationFromStrings callbacks ---
+
+/**
+ * Callback to check if code generation from strings is allowed. See
+ * Context::AllowCodeGenerationFromStrings.
+ */
+typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context);
+
+// --- Garbage Collection Callbacks ---
 
 /**
  * Applications can register callback functions which will be called
@@ -2506,23 +2648,6 @@
 
 
 /**
- * Profiler modules.
- *
- * In V8, profiler consists of several modules: CPU profiler, and different
- * kinds of heap profiling. Each can be turned on / off independently.
- * When PROFILER_MODULE_HEAP_SNAPSHOT flag is passed to ResumeProfilerEx,
- * modules are enabled only temporarily for making a snapshot of the heap.
- */
-enum ProfilerModules {
-  PROFILER_MODULE_NONE            = 0,
-  PROFILER_MODULE_CPU             = 1,
-  PROFILER_MODULE_HEAP_STATS      = 1 << 1,
-  PROFILER_MODULE_JS_CONSTRUCTORS = 1 << 2,
-  PROFILER_MODULE_HEAP_SNAPSHOT   = 1 << 16
-};
-
-
-/**
  * Collection of V8 heap information.
  *
  * Instances of this class can be passed to v8::V8::HeapStatistics to
@@ -2628,8 +2753,18 @@
    */
   void Dispose();
 
- private:
+  /**
+   * Associate embedder-specific data with the isolate
+   */
+  void SetData(void* data);
 
+  /**
+   * Retrive embedder-specific data from the isolate.
+   * Returns NULL if SetData has never been called.
+   */
+  void* GetData();
+
+ private:
   Isolate();
   Isolate(const Isolate&);
   ~Isolate();
@@ -2639,6 +2774,50 @@
 };
 
 
+class StartupData {
+ public:
+  enum CompressionAlgorithm {
+    kUncompressed,
+    kBZip2
+  };
+
+  const char* data;
+  int compressed_size;
+  int raw_size;
+};
+
+
+/**
+ * A helper class for driving V8 startup data decompression.  It is based on
+ * "CompressedStartupData" API functions from the V8 class.  It isn't mandatory
+ * for an embedder to use this class, instead, API functions can be used
+ * directly.
+ *
+ * For an example of the class usage, see the "shell.cc" sample application.
+ */
+class V8EXPORT StartupDataDecompressor {  // NOLINT
+ public:
+  StartupDataDecompressor();
+  virtual ~StartupDataDecompressor();
+  int Decompress();
+
+ protected:
+  virtual int DecompressData(char* raw_data,
+                             int* raw_data_size,
+                             const char* compressed_data,
+                             int compressed_data_size) = 0;
+
+ private:
+  char** raw_data;
+};
+
+
+/**
+ * EntropySource is used as a callback function when v8 needs a source
+ * of entropy.
+ */
+typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
+
 /**
  * Container class for static utility functions.
  */
@@ -2648,13 +2827,20 @@
   static void SetFatalErrorHandler(FatalErrorCallback that);
 
   /**
+   * Set the callback to invoke to check if code generation from
+   * strings should be allowed.
+   */
+  static void SetAllowCodeGenerationFromStringsCallback(
+      AllowCodeGenerationFromStringsCallback that);
+
+  /**
    * Ignore out-of-memory exceptions.
    *
    * V8 running out of memory is treated as a fatal error by default.
    * This means that the fatal error handler is called and that V8 is
    * terminated.
    *
-   * IgnoreOutOfMemoryException can be used to not treat a
+   * IgnoreOutOfMemoryException can be used to not treat an
    * out-of-memory situation as a fatal error.  This way, the contexts
    * that did not cause the out of memory problem might be able to
    * continue execution.
@@ -2668,9 +2854,33 @@
   static bool IsDead();
 
   /**
+   * The following 4 functions are to be used when V8 is built with
+   * the 'compress_startup_data' flag enabled. In this case, the
+   * embedder must decompress startup data prior to initializing V8.
+   *
+   * This is how interaction with V8 should look like:
+   *   int compressed_data_count = v8::V8::GetCompressedStartupDataCount();
+   *   v8::StartupData* compressed_data =
+   *     new v8::StartupData[compressed_data_count];
+   *   v8::V8::GetCompressedStartupData(compressed_data);
+   *   ... decompress data (compressed_data can be updated in-place) ...
+   *   v8::V8::SetDecompressedStartupData(compressed_data);
+   *   ... now V8 can be initialized
+   *   ... make sure the decompressed data stays valid until V8 shutdown
+   *
+   * A helper class StartupDataDecompressor is provided. It implements
+   * the protocol of the interaction described above, and can be used in
+   * most cases instead of calling these API functions directly.
+   */
+  static StartupData::CompressionAlgorithm GetCompressedStartupDataAlgorithm();
+  static int GetCompressedStartupDataCount();
+  static void GetCompressedStartupData(StartupData* compressed_data);
+  static void SetDecompressedStartupData(StartupData* decompressed_data);
+
+  /**
    * Adds a message listener.
    *
-   * The same message listener can be added more than once and it that
+   * The same message listener can be added more than once and in that
    * case it will be called more than once for each message.
    */
   static bool AddMessageListener(MessageCallback that,
@@ -2833,6 +3043,12 @@
   static bool Initialize();
 
   /**
+   * Allows the host application to provide a callback which can be used
+   * as a source of entropy for random number generators.
+   */
+  static void SetEntropySource(EntropySource source);
+
+  /**
    * Adjusts the amount of registered external memory.  Used to give
    * V8 an indication of the amount of externally allocated memory
    * that is kept alive by JavaScript objects.  V8 uses this to decide
@@ -2871,65 +3087,6 @@
   static bool IsProfilerPaused();
 
   /**
-   * Resumes specified profiler modules. Can be called several times to
-   * mark the opening of a profiler events block with the given tag.
-   *
-   * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
-   * See ProfilerModules enum.
-   *
-   * \param flags Flags specifying profiler modules.
-   * \param tag Profile tag.
-   */
-  static void ResumeProfilerEx(int flags, int tag = 0);
-
-  /**
-   * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
-   * a block of profiler events opened by a call to "ResumeProfilerEx" with the
-   * same tag value. There is no need for blocks to be properly nested.
-   * The profiler is paused when the last opened block is closed.
-   *
-   * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
-   * See ProfilerModules enum.
-   *
-   * \param flags Flags specifying profiler modules.
-   * \param tag Profile tag.
-   */
-  static void PauseProfilerEx(int flags, int tag = 0);
-
-  /**
-   * Returns active (resumed) profiler modules.
-   * See ProfilerModules enum.
-   *
-   * \returns active profiler modules.
-   */
-  static int GetActiveProfilerModules();
-
-  /**
-   * If logging is performed into a memory buffer (via --logfile=*), allows to
-   * retrieve previously written messages. This can be used for retrieving
-   * profiler log data in the application. This function is thread-safe.
-   *
-   * Caller provides a destination buffer that must exist during GetLogLines
-   * call. Only whole log lines are copied into the buffer.
-   *
-   * \param from_pos specified a point in a buffer to read from, 0 is the
-   *   beginning of a buffer. It is assumed that caller updates its current
-   *   position using returned size value from the previous call.
-   * \param dest_buf destination buffer for log data.
-   * \param max_size size of the destination buffer.
-   * \returns actual size of log data copied into buffer.
-   */
-  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
-  /**
-   * The minimum allowed size for a log lines buffer.  If the size of
-   * the buffer given will not be enough to hold a line of the maximum
-   * length, an attempt to find a log line end in GetLogLines will
-   * fail, and an empty result will be returned.
-   */
-  static const int kMinimumSizeForLogLinesBuffer = 2048;
-
-  /**
    * Retrieve the V8 thread id of the calling thread.
    *
    * The thread id for a thread should only be retrieved after the V8
@@ -2950,7 +3107,7 @@
    * The termination is achieved by throwing an exception that is
    * uncatchable by JavaScript exception handlers.  Termination
    * exceptions act as if they were caught by a C++ TryCatch exception
-   * handlers.  If forceful termination is used, any C++ TryCatch
+   * handler.  If forceful termination is used, any C++ TryCatch
    * exception handler that catches an exception should check if that
    * exception is a termination exception and immediately return if
    * that is the case.  Returning immediately in that case will
@@ -2982,8 +3139,10 @@
    * because of a call to TerminateExecution.  In that case there are
    * still JavaScript frames on the stack and the termination
    * exception is still active.
+   *
+   * \param isolate The isolate in which to check.
    */
-  static bool IsExecutionTerminating();
+  static bool IsExecutionTerminating(Isolate* isolate = NULL);
 
   /**
    * Releases any resources used by v8 and stops any utility threads
@@ -3034,6 +3193,7 @@
                        void* data,
                        WeakReferenceCallback);
   static void ClearWeak(internal::Object** global_handle);
+  static void MarkIndependent(internal::Object** global_handle);
   static bool IsGlobalNearDeath(internal::Object** global_handle);
   static bool IsGlobalWeak(internal::Object** global_handle);
   static void SetWrapperClassId(internal::Object** global_handle,
@@ -3051,7 +3211,6 @@
  */
 class V8EXPORT TryCatch {
  public:
-
   /**
    * Creates a new try/catch block and registers it with v8.
    */
@@ -3143,6 +3302,7 @@
   void SetCaptureMessage(bool value);
 
  private:
+  v8::internal::Isolate* isolate_;
   void* next_;
   void* exception_;
   void* message_;
@@ -3155,7 +3315,7 @@
 };
 
 
-// --- C o n t e x t ---
+// --- Context ---
 
 
 /**
@@ -3291,6 +3451,21 @@
   Local<Value> GetData();
 
   /**
+   * Control whether code generation from strings is allowed. Calling
+   * this method with false will disable 'eval' and the 'Function'
+   * constructor for code running in this context. If 'eval' or the
+   * 'Function' constructor are used an exception will be thrown.
+   *
+   * If code generation from strings is not allowed the
+   * V8::AllowCodeGenerationFromStrings callback will be invoked if
+   * set before blocking the call to 'eval' or the 'Function'
+   * constructor. If that callback returns true, the call will be
+   * allowed, otherwise an exception will be thrown. If no callback is
+   * set an exception will be thrown.
+   */
+  void AllowCodeGenerationFromStrings(bool allow);
+
+  /**
    * Stack-allocated class which sets the execution context for all
    * operations executed within a local scope.
    */
@@ -3321,41 +3496,37 @@
  * to the user of V8 to ensure (perhaps with locking) that this
  * constraint is not violated.
  *
- * More then one thread and multiple V8 isolates can be used
- * without any locking if each isolate is created and accessed
- * by a single thread only. For example, one thread can use
- * multiple isolates or multiple threads can each create and run
- * their own isolate.
+ * v8::Locker is a scoped lock object. While it's
+ * active (i.e. between its construction and destruction) the current thread is
+ * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
+ * by at most one thread at any time. In other words, the scope of a v8::Locker is
+ * a critical section.
  *
- * If you wish to start using V8 isolate in more then one thread
- * you can do this by constructing a v8::Locker object to guard
- * access to the isolate. After the code using V8 has completed
- * for the current thread you can call the destructor.  This can
- * be combined with C++ scope-based construction as follows
- * (assumes the default isolate that is used if not specified as
- * a parameter for the Locker):
- *
- * \code
+ * Sample usage:
+* \code
  * ...
  * {
- *   v8::Locker locker;
+ *   v8::Locker locker(isolate);
+ *   v8::Isolate::Scope isolate_scope(isolate);
  *   ...
- *   // Code using V8 goes here.
+ *   // Code using V8 and isolate goes here.
  *   ...
  * } // Destructor called here
  * \endcode
  *
- * If you wish to stop using V8 in a thread A you can do this by either
+ * If you wish to stop using V8 in a thread A you can do this either
  * by destroying the v8::Locker object as above or by constructing a
  * v8::Unlocker object:
  *
  * \code
  * {
- *   v8::Unlocker unlocker;
+ *   isolate->Exit();
+ *   v8::Unlocker unlocker(isolate);
  *   ...
  *   // Code not using V8 goes here while V8 can run in another thread.
  *   ...
  * } // Destructor called here.
+ * isolate->Enter();
  * \endcode
  *
  * The Unlocker object is intended for use in a long-running callback
@@ -3375,38 +3546,51 @@
  * \code
  * // V8 not locked.
  * {
- *   v8::Locker locker;
+ *   v8::Locker locker(isolate);
+ *   Isolate::Scope isolate_scope(isolate);
  *   // V8 locked.
  *   {
- *     v8::Locker another_locker;
+ *     v8::Locker another_locker(isolate);
  *     // V8 still locked (2 levels).
  *     {
- *       v8::Unlocker unlocker;
+ *       isolate->Exit();
+ *       v8::Unlocker unlocker(isolate);
  *       // V8 not locked.
  *     }
+ *     isolate->Enter();
  *     // V8 locked again (2 levels).
  *   }
  *   // V8 still locked (1 level).
  * }
  * // V8 Now no longer locked.
  * \endcode
+ *
+ *
  */
 class V8EXPORT Unlocker {
  public:
-  Unlocker();
+  /**
+   * Initialize Unlocker for a given Isolate. NULL means default isolate.
+   */
+  explicit Unlocker(Isolate* isolate = NULL);
   ~Unlocker();
+ private:
+  internal::Isolate* isolate_;
 };
 
 
 class V8EXPORT Locker {
  public:
-  Locker();
+  /**
+   * Initialize Locker for a given Isolate. NULL means default isolate.
+   */
+  explicit Locker(Isolate* isolate = NULL);
   ~Locker();
 
   /**
    * Start preemption.
    *
-   * When preemption is started, a timer is fired every n milli seconds
+   * When preemption is started, a timer is fired every n milliseconds
    * that will switch between multiple threads that are in contention
    * for the V8 lock.
    */
@@ -3418,18 +3602,20 @@
   static void StopPreemption();
 
   /**
-   * Returns whether or not the locker is locked by the current thread.
+   * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
+   * is locked by the current thread.
    */
-  static bool IsLocked();
+  static bool IsLocked(Isolate* isolate = NULL);
 
   /**
    * Returns whether v8::Locker is being used by this V8 instance.
    */
-  static bool IsActive() { return active_; }
+  static bool IsActive();
 
  private:
   bool has_lock_;
   bool top_level_;
+  internal::Isolate* isolate_;
 
   static bool active_;
 
@@ -3486,7 +3672,7 @@
 };
 
 
-// --- I m p l e m e n t a t i o n ---
+// --- Implementation ---
 
 
 namespace internal {
@@ -3571,7 +3757,6 @@
  */
 class Internals {
  public:
-
   // These values match non-compiler-dependent values defined within
   // the implementation of v8.
   static const int kHeapObjectMapOffset = 0;
@@ -3579,14 +3764,14 @@
   static const int kStringResourceOffset =
       InternalConstants<kApiPointerSize>::kStringResourceOffset;
 
-  static const int kProxyProxyOffset = kApiPointerSize;
+  static const int kForeignAddressOffset = kApiPointerSize;
   static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x02;
 
-  static const int kJSObjectType = 0xa0;
+  static const int kJSObjectType = 0xa3;
   static const int kFirstNonstringType = 0x80;
-  static const int kProxyType = 0x85;
+  static const int kForeignType = 0x85;
 
   static inline bool HasHeapObjectTag(internal::Object* value) {
     return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
@@ -3615,8 +3800,8 @@
   static inline void* GetExternalPointer(internal::Object* obj) {
     if (HasSmiTag(obj)) {
       return GetExternalPointerFromSmi(obj);
-    } else if (GetInstanceType(obj) == kProxyType) {
-      return ReadField<void*>(obj, kProxyProxyOffset);
+    } else if (GetInstanceType(obj) == kForeignType) {
+      return ReadField<void*>(obj, kForeignAddressOffset);
     } else {
       return NULL;
     }
@@ -3646,10 +3831,6 @@
 
 
 template <class T>
-Handle<T>::Handle() : val_(0) { }
-
-
-template <class T>
 Local<T>::Local() : Handle<T>() { }
 
 
@@ -3711,6 +3892,11 @@
 }
 
 template <class T>
+void Persistent<T>::MarkIndependent() {
+  V8::MarkIndependent(reinterpret_cast<internal::Object**>(**this));
+}
+
+template <class T>
 void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
   V8::SetWrapperClassId(reinterpret_cast<internal::Object**>(**this), class_id);
 }
@@ -3923,6 +4109,30 @@
 }
 
 
+StringObject* StringObject::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+  CheckCast(value);
+#endif
+  return static_cast<StringObject*>(value);
+}
+
+
+NumberObject* NumberObject::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+  CheckCast(value);
+#endif
+  return static_cast<NumberObject*>(value);
+}
+
+
+BooleanObject* BooleanObject::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+  CheckCast(value);
+#endif
+  return static_cast<BooleanObject*>(value);
+}
+
+
 RegExp* RegExp::Cast(v8::Value* value) {
 #ifdef V8_ENABLE_CHECKS
   CheckCast(value);
diff --git a/preparser/SConscript b/preparser/SConscript
index 1d51e82..10b3953 100644
--- a/preparser/SConscript
+++ b/preparser/SConscript
@@ -26,10 +26,10 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 from os.path import join
-Import('context')
+Import('context tools')
 
 def ConfigureObjectFiles():
-  env = Environment()
+  env = Environment(tools=tools)
   env.Replace(**context.flags['preparser'])
   context.ApplyEnvOverrides(env)
   return env.Object('preparser-process.cc')
diff --git a/preparser/preparser-process.cc b/preparser/preparser-process.cc
index fb6e386..e67851c 100644
--- a/preparser/preparser-process.cc
+++ b/preparser/preparser-process.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,23 +28,38 @@
 #include <stdlib.h>
 #include <stdarg.h>
 #include <stdio.h>
+#include <string.h>
 
 #include "../include/v8stdint.h"
 #include "../include/v8-preparser.h"
 
+#include "../src/preparse-data-format.h"
+
+namespace i = v8::internal;
+
 // This file is only used for testing the stand-alone preparser
 // library.
-// The first (and only) argument must be the path of a JavaScript file.
-// This file is preparsed and the resulting preparser data is written
-// to stdout. Diagnostic output is output on stderr.
-// The file must contain only ASCII characters (UTF-8 isn't supported).
+// The first argument must be the path of a JavaScript source file, or
+// the flags "-e" and the next argument is then the source of a JavaScript
+// program.
+// Optionally this can be followed by the word "throws" (case sensitive),
+// which signals that the parsing is expected to throw - the default is
+// to expect the parsing to not throw.
+// The command line can further be followed by a message text (the
+// *type* of the exception to throw), and even more optionally, the
+// start and end position reported with the exception.
+//
+// This source file is preparsed and tested against the expectations, and if
+// successful, the resulting preparser data is written to stdout.
+// Diagnostic output is output on stderr.
+// The source file must contain only ASCII characters (UTF-8 isn't supported).
 // The file is read into memory, so it should have a reasonable size.
 
 
 // Adapts an ASCII string to the UnicodeInputStream interface.
 class AsciiInputStream : public v8::UnicodeInputStream {
  public:
-  AsciiInputStream(uint8_t* buffer, size_t length)
+  AsciiInputStream(const uint8_t* buffer, size_t length)
       : buffer_(buffer),
         end_offset_(static_cast<int>(length)),
         offset_(0) { }
@@ -97,73 +112,268 @@
 }
 
 
+class PreparseDataInterpreter {
+ public:
+  PreparseDataInterpreter(const uint8_t* data, int length)
+      : data_(data), length_(length), message_(NULL) { }
+
+  ~PreparseDataInterpreter() {
+    if (message_ != NULL) delete[] message_;
+  }
+
+  bool valid() {
+    int header_length =
+      i::PreparseDataConstants::kHeaderSize * sizeof(int);  // NOLINT
+    return length_ >= header_length;
+  }
+
+  bool throws() {
+    return valid() &&
+        word(i::PreparseDataConstants::kHasErrorOffset) != 0;
+  }
+
+  const char* message() {
+    if (message_ != NULL) return message_;
+    if (!throws()) return NULL;
+    int text_pos = i::PreparseDataConstants::kHeaderSize +
+                   i::PreparseDataConstants::kMessageTextPos;
+    int length = word(text_pos);
+    char* buffer = new char[length + 1];
+    for (int i = 1; i <= length; i++) {
+      int character = word(text_pos + i);
+      buffer[i - 1] = character;
+    }
+    buffer[length] = '\0';
+    message_ = buffer;
+    return buffer;
+  }
+
+  int beg_pos() {
+    if (!throws()) return -1;
+    return word(i::PreparseDataConstants::kHeaderSize +
+                i::PreparseDataConstants::kMessageStartPos);
+  }
+
+  int end_pos() {
+    if (!throws()) return -1;
+    return word(i::PreparseDataConstants::kHeaderSize +
+                i::PreparseDataConstants::kMessageEndPos);
+  }
+
+ private:
+  int word(int offset) {
+    const int* word_data = reinterpret_cast<const int*>(data_);
+    if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
+      return word_data[offset];
+    }
+    return -1;
+  }
+
+  const uint8_t* const data_;
+  const int length_;
+  const char* message_;
+};
+
+
 template <typename T>
 class ScopedPointer {
  public:
+  explicit ScopedPointer() : pointer_(NULL) {}
   explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
-  ~ScopedPointer() { delete[] pointer_; }
+  ~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; }
   T& operator[](int index) { return pointer_[index]; }
   T* operator*() { return pointer_ ;}
+  T* operator=(T* new_value) {
+    if (pointer_ != NULL) delete[] pointer_;
+    pointer_ = new_value;
+    return new_value;
+  }
  private:
   T* pointer_;
 };
 
 
-int main(int argc, char* argv[]) {
-  // Check for filename argument.
-  if (argc < 2) {
-    fprintf(stderr, "ERROR: No filename on command line.\n");
-    fflush(stderr);
-    return EXIT_FAILURE;
-  }
-  const char* filename = argv[1];
 
-  // Open JS file.
-  FILE* input = fopen(filename, "rb");
-  if (input == NULL) {
-    perror("ERROR: Error opening file");
+void fail(v8::PreParserData* data, const char* message, ...) {
+  va_list args;
+  va_start(args, message);
+  vfprintf(stderr, message, args);
+  va_end(args);
+  fflush(stderr);
+  // Print preparser data to stdout.
+  uint32_t size = data->size();
+  fprintf(stderr, "LOG: data size: %u\n", size);
+  if (!WriteBuffer(stdout, data->data(), size)) {
+    perror("ERROR: Writing data");
     fflush(stderr);
-    return EXIT_FAILURE;
   }
+  exit(EXIT_FAILURE);
+}
 
-  // Find length of JS file.
-  if (fseek(input, 0, SEEK_END) != 0) {
-    perror("ERROR: Error during seek");
-    fflush(stderr);
-    return EXIT_FAILURE;
-  }
-  size_t length = static_cast<size_t>(ftell(input));
-  rewind(input);
 
-  // Read JS file into memory buffer.
-  ScopedPointer<uint8_t> buffer(new uint8_t[length]);
-  if (!ReadBuffer(input, *buffer, length)) {
-    perror("ERROR: Reading file");
-    fflush(stderr);
-    return EXIT_FAILURE;
+bool IsFlag(const char* arg) {
+  // Anything starting with '-' is considered a flag.
+  // It's summarily ignored for now.
+  return arg[0] == '-';
+}
+
+
+struct ExceptionExpectation {
+  ExceptionExpectation()
+      : throws(false), type(NULL), beg_pos(-1), end_pos(-1) { }
+  bool throws;
+  const char* type;
+  int beg_pos;
+  int end_pos;
+};
+
+
+void CheckException(v8::PreParserData* data,
+                    ExceptionExpectation* expects) {
+  PreparseDataInterpreter reader(data->data(), data->size());
+  if (expects->throws) {
+    if (!reader.throws()) {
+      if (expects->type == NULL) {
+        fail(data, "Didn't throw as expected\n");
+      } else {
+        fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
+      }
+    }
+    if (expects->type != NULL) {
+      const char* actual_message = reader.message();
+      if (strcmp(expects->type, actual_message)) {
+        fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
+             expects->type, actual_message, reader.beg_pos(), reader.end_pos());
+      }
+    }
+    if (expects->beg_pos >= 0) {
+      if (expects->beg_pos != reader.beg_pos()) {
+        fail(data, "Wrong error start position: Expected %i, found %i\n",
+             expects->beg_pos, reader.beg_pos());
+      }
+    }
+    if (expects->end_pos >= 0) {
+      if (expects->end_pos != reader.end_pos()) {
+        fail(data, "Wrong error end position: Expected %i, found %i\n",
+             expects->end_pos, reader.end_pos());
+      }
+    }
+  } else if (reader.throws()) {
+    const char* message = reader.message();
+    fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
+         message, reader.beg_pos(), reader.end_pos());
   }
-  fclose(input);
+}
+
+
+ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
+  ExceptionExpectation expects;
+
+  // Parse exception expectations from (the remainder of) the command line.
+  int arg_index = 0;
+  // Skip any flags.
+  while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++;
+  if (argc > arg_index) {
+    if (strncmp("throws", argv[arg_index], 7)) {
+      // First argument after filename, if present, must be the verbatim
+      // "throws", marking that the preparsing should fail with an exception.
+      fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
+    }
+    expects.throws = true;
+    do {
+      arg_index++;
+    } while (argc > arg_index && IsFlag(argv[arg_index]));
+    if (argc > arg_index) {
+      // Next argument is the exception type identifier.
+      expects.type = argv[arg_index];
+      do {
+        arg_index++;
+      } while (argc > arg_index && IsFlag(argv[arg_index]));
+      if (argc > arg_index) {
+        expects.beg_pos = atoi(argv[arg_index]);  // NOLINT
+        do {
+          arg_index++;
+        } while (argc > arg_index && IsFlag(argv[arg_index]));
+        if (argc > arg_index) {
+          expects.end_pos = atoi(argv[arg_index]);  // NOLINT
+        }
+      }
+    }
+  }
+  return expects;
+}
+
+
+int main(int argc, const char* argv[]) {
+  // Parse command line.
+  // Format:  preparser (<scriptfile> | -e "<source>")
+  //                    ["throws" [<exn-type> [<start> [<end>]]]]
+  // Any flags (except an initial -s) are ignored.
+
+  // Check for mandatory filename argument.
+  int arg_index = 1;
+  if (argc <= arg_index) {
+    fail(NULL, "ERROR: No filename on command line.\n");
+  }
+  const uint8_t* source = NULL;
+  const char* filename = argv[arg_index];
+  if (!strcmp(filename, "-e")) {
+    arg_index++;
+    if (argc <= arg_index) {
+      fail(NULL, "ERROR: No source after -e on command line.\n");
+    }
+    source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
+  }
+  // Check remainder of command line for exception expectations.
+  arg_index++;
+  ExceptionExpectation expects =
+      ParseExpectation(argc - arg_index, argv + arg_index);
+
+  ScopedPointer<uint8_t> buffer;
+  size_t length;
+
+  if (source == NULL) {
+    // Open JS file.
+    FILE* input = fopen(filename, "rb");
+    if (input == NULL) {
+      perror("ERROR: Error opening file");
+      fflush(stderr);
+      return EXIT_FAILURE;
+    }
+    // Find length of JS file.
+    if (fseek(input, 0, SEEK_END) != 0) {
+      perror("ERROR: Error during seek");
+      fflush(stderr);
+      return EXIT_FAILURE;
+    }
+    length = static_cast<size_t>(ftell(input));
+    rewind(input);
+    // Read JS file into memory buffer.
+    buffer = new uint8_t[length];
+    if (!ReadBuffer(input, *buffer, length)) {
+      perror("ERROR: Reading file");
+      fflush(stderr);
+      return EXIT_FAILURE;
+    }
+    fclose(input);
+    source = *buffer;
+  } else {
+    length = strlen(reinterpret_cast<const char*>(source));
+  }
 
   // Preparse input file.
-  AsciiInputStream input_buffer(*buffer, length);
+  AsciiInputStream input_buffer(source, length);
   size_t kMaxStackSize = 64 * 1024 * sizeof(void*);  // NOLINT
   v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
 
   // Fail if stack overflow.
   if (data.stack_overflow()) {
-    fprintf(stderr, "ERROR: Stack overflow\n");
-    fflush(stderr);
-    return EXIT_FAILURE;
+    fail(&data, "ERROR: Stack overflow\n");
   }
 
-  // Print preparser data to stdout.
-  uint32_t size = data.size();
-  fprintf(stderr, "LOG: Success, data size: %u\n", size);
-  fflush(stderr);
-  if (!WriteBuffer(stdout, data.data(), size)) {
-    perror("ERROR: Writing data");
-    return EXIT_FAILURE;
-  }
+  // Check that the expected exception is thrown, if an exception is
+  // expected.
+  CheckException(&data, &expects);
 
   return EXIT_SUCCESS;
 }
diff --git a/preparser/preparser.gyp b/preparser/preparser.gyp
new file mode 100644
index 0000000..0b03382
--- /dev/null
+++ b/preparser/preparser.gyp
@@ -0,0 +1,42 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'includes': ['../build/common.gypi'],
+  'targets': [
+    {
+      'target_name': 'preparser',
+      'type': 'executable',
+      'dependencies': [
+        '../tools/gyp/v8.gyp:preparser_lib',
+      ],
+      'sources': [
+        'preparser-process.cc',
+      ],
+    },
+  ],
+}
diff --git a/samples/SConscript b/samples/SConscript
index 31990b6..84c48c9 100644
--- a/samples/SConscript
+++ b/samples/SConscript
@@ -26,10 +26,10 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 from os.path import join
-Import('sample context')
+Import('sample context tools')
 
 def ConfigureObjectFiles():
-  env = Environment()
+  env = Environment(tools=tools)
   env.Replace(**context.flags['sample'])
   context.ApplyEnvOverrides(env)
   return env.Object(sample + '.cc')
diff --git a/samples/process.cc b/samples/process.cc
index 6be4ea5..c0cee4c 100644
--- a/samples/process.cc
+++ b/samples/process.cc
@@ -30,6 +30,10 @@
 #include <string>
 #include <map>
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#error Using compressed startup data is not supported for this sample
+#endif
+
 using namespace std;
 using namespace v8;
 
@@ -73,7 +77,6 @@
  */
 class JsHttpRequestProcessor : public HttpRequestProcessor {
  public:
-
   // Creates a new processor that processes requests by invoking the
   // Process function of the JavaScript script given as an argument.
   explicit JsHttpRequestProcessor(Handle<String> script) : script_(script) { }
@@ -84,7 +87,6 @@
   virtual bool Process(HttpRequest* req);
 
  private:
-
   // Execute the script associated with this processor and extract the
   // Process function.  Returns true if this succeeded, otherwise false.
   bool ExecuteScript(Handle<String> script);
@@ -531,7 +533,7 @@
                   string* file) {
   for (int i = 1; i < argc; i++) {
     string arg = argv[i];
-    int index = arg.find('=', 0);
+    size_t index = arg.find('=', 0);
     if (index == string::npos) {
       *file = arg;
     } else {
diff --git a/samples/samples.gyp b/samples/samples.gyp
index f383ee2..55b2a98 100644
--- a/samples/samples.gyp
+++ b/samples/samples.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -26,23 +26,25 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../build/common.gypi'],
+  'target_defaults': {
+    'type': 'executable',
+    'dependencies': [
+      '../tools/gyp/v8.gyp:v8',
+    ],
+    'include_dirs': [
+      '../include',
+    ],
+  },
   'targets': [
     {
       'target_name': 'shell',
-      'type': 'executable',
-      'dependencies': [
-        '../tools/gyp/v8.gyp:v8',
-      ],
       'sources': [
         'shell.cc',
       ],
     },
     {
       'target_name': 'process',
-      'type': 'executable',
-      'dependencies': [
-        '../tools/gyp/v8.gyp:v8',
-      ],
       'sources': [
         'process.cc',
       ],
diff --git a/samples/shell.cc b/samples/shell.cc
index 222eeda..b40eca2 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -26,36 +26,28 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <v8.h>
-#include <v8-testing.h>
 #include <assert.h>
 #include <fcntl.h>
 #include <string.h>
 #include <stdio.h>
 #include <stdlib.h>
 
-// When building with V8 in a shared library we cannot use functions which
-// is not explicitly a part of the public V8 API. This extensive use of
-// #ifndef USING_V8_SHARED/#endif is a hack until we can resolve whether to
-// still use the shell sample for testing or change to use the developer
-// shell d8 TODO(1272).
-#ifndef USING_V8_SHARED
-#include "../src/v8.h"
-#endif  // USING_V8_SHARED
-
-#if !defined(_WIN32) && !defined(_WIN64)
-#include <unistd.h>  // NOLINT
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#error Using compressed startup data is not supported for this sample
 #endif
 
-static void ExitShell(int exit_code) {
-  // Use _exit instead of exit to avoid races between isolate
-  // threads and static destructors.
-  fflush(stdout);
-  fflush(stderr);
-  _exit(exit_code);
-}
+/**
+ * This sample program shows how to implement a simple javascript shell
+ * based on V8.  This includes initializing V8 with command line options,
+ * creating global functions, compiling and executing strings.
+ *
+ * For a more sophisticated shell, consider using the debug shell D8.
+ */
+
 
 v8::Persistent<v8::Context> CreateShellContext();
 void RunShell(v8::Handle<v8::Context> context);
+int RunMain(int argc, char* argv[]);
 bool ExecuteString(v8::Handle<v8::String> source,
                    v8::Handle<v8::Value> name,
                    bool print_result,
@@ -65,258 +57,27 @@
 v8::Handle<v8::Value> Load(const v8::Arguments& args);
 v8::Handle<v8::Value> Quit(const v8::Arguments& args);
 v8::Handle<v8::Value> Version(const v8::Arguments& args);
-v8::Handle<v8::Value> Int8Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Int16Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Int32Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args);
-v8::Handle<v8::Value> Float32Array(const v8::Arguments& args);
-v8::Handle<v8::Value> PixelArray(const v8::Arguments& args);
 v8::Handle<v8::String> ReadFile(const char* name);
 void ReportException(v8::TryCatch* handler);
 
 
-static bool last_run = true;
-
-class SourceGroup {
- public:
-  SourceGroup() :
-#ifndef USING_V8_SHARED
-                  next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
-                  done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
-                  thread_(NULL),
-#endif  // USING_V8_SHARED
-                  argv_(NULL),
-                  begin_offset_(0),
-                  end_offset_(0) { }
-
-  void Begin(char** argv, int offset) {
-    argv_ = const_cast<const char**>(argv);
-    begin_offset_ = offset;
-  }
-
-  void End(int offset) { end_offset_ = offset; }
-
-  void Execute() {
-    for (int i = begin_offset_; i < end_offset_; ++i) {
-      const char* arg = argv_[i];
-      if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
-        // Execute argument given to -e option directly.
-        v8::HandleScope handle_scope;
-        v8::Handle<v8::String> file_name = v8::String::New("unnamed");
-        v8::Handle<v8::String> source = v8::String::New(argv_[i + 1]);
-        if (!ExecuteString(source, file_name, false, true)) {
-          ExitShell(1);
-          return;
-        }
-        ++i;
-      } else if (arg[0] == '-') {
-        // Ignore other options. They have been parsed already.
-      } else {
-        // Use all other arguments as names of files to load and run.
-        v8::HandleScope handle_scope;
-        v8::Handle<v8::String> file_name = v8::String::New(arg);
-        v8::Handle<v8::String> source = ReadFile(arg);
-        if (source.IsEmpty()) {
-          printf("Error reading '%s'\n", arg);
-          continue;
-        }
-        if (!ExecuteString(source, file_name, false, true)) {
-          ExitShell(1);
-          return;
-        }
-      }
-    }
-  }
-
-#ifndef USING_V8_SHARED
-  void StartExecuteInThread() {
-    if (thread_ == NULL) {
-      thread_ = new IsolateThread(this);
-      thread_->Start();
-    }
-    next_semaphore_->Signal();
-  }
-
-  void WaitForThread() {
-    if (thread_ == NULL) return;
-    if (last_run) {
-      thread_->Join();
-      thread_ = NULL;
-    } else {
-      done_semaphore_->Wait();
-    }
-  }
-#endif  // USING_V8_SHARED
-
- private:
-#ifndef USING_V8_SHARED
-  static v8::internal::Thread::Options GetThreadOptions() {
-    v8::internal::Thread::Options options;
-    options.name = "IsolateThread";
-    // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
-    // which is not enough to parse the big literal expressions used in tests.
-    // The stack size should be at least StackGuard::kLimitSize + some
-    // OS-specific padding for thread startup code.
-    options.stack_size = 2 << 20;  // 2 Mb seems to be enough
-    return options;
-  }
-
-  class IsolateThread : public v8::internal::Thread {
-   public:
-    explicit IsolateThread(SourceGroup* group)
-        : v8::internal::Thread(NULL, GetThreadOptions()), group_(group) {}
-
-    virtual void Run() {
-      group_->ExecuteInThread();
-    }
-
-   private:
-    SourceGroup* group_;
-  };
-
-  void ExecuteInThread() {
-    v8::Isolate* isolate = v8::Isolate::New();
-    do {
-      if (next_semaphore_ != NULL) next_semaphore_->Wait();
-      {
-        v8::Isolate::Scope iscope(isolate);
-        v8::HandleScope scope;
-        v8::Persistent<v8::Context> context = CreateShellContext();
-        {
-          v8::Context::Scope cscope(context);
-          Execute();
-        }
-        context.Dispose();
-      }
-      if (done_semaphore_ != NULL) done_semaphore_->Signal();
-    } while (!last_run);
-    isolate->Dispose();
-  }
-
-  v8::internal::Semaphore* next_semaphore_;
-  v8::internal::Semaphore* done_semaphore_;
-  v8::internal::Thread* thread_;
-#endif  // USING_V8_SHARED
-
-  const char** argv_;
-  int begin_offset_;
-  int end_offset_;
-};
+static bool run_shell;
 
 
-static SourceGroup* isolate_sources = NULL;
-
-
-int RunMain(int argc, char* argv[]) {
+int main(int argc, char* argv[]) {
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  run_shell = (argc == 1);
   v8::HandleScope handle_scope;
   v8::Persistent<v8::Context> context = CreateShellContext();
-  // Enter the newly created execution environment.
-  context->Enter();
   if (context.IsEmpty()) {
     printf("Error creating context\n");
     return 1;
   }
-
-  bool run_shell = (argc == 1);
-  int num_isolates = 1;
-  for (int i = 1; i < argc; i++) {
-    if (strcmp(argv[i], "--isolate") == 0) {
-#ifndef USING_V8_SHARED
-      ++num_isolates;
-#else  // USING_V8_SHARED
-      printf("Error: --isolate not supported when linked with shared "
-             "library\n");
-      ExitShell(1);
-#endif  // USING_V8_SHARED
-    }
-  }
-  if (isolate_sources == NULL) {
-    isolate_sources = new SourceGroup[num_isolates];
-    SourceGroup* current = isolate_sources;
-    current->Begin(argv, 1);
-    for (int i = 1; i < argc; i++) {
-      const char* str = argv[i];
-      if (strcmp(str, "--isolate") == 0) {
-        current->End(i);
-        current++;
-        current->Begin(argv, i + 1);
-      } else if (strcmp(str, "--shell") == 0) {
-        run_shell = true;
-      } else if (strcmp(str, "-f") == 0) {
-        // Ignore any -f flags for compatibility with the other stand-
-        // alone JavaScript engines.
-        continue;
-      } else if (strncmp(str, "--", 2) == 0) {
-        printf("Warning: unknown flag %s.\nTry --help for options\n", str);
-      }
-    }
-    current->End(argc);
-  }
-#ifndef USING_V8_SHARED
-  for (int i = 1; i < num_isolates; ++i) {
-    isolate_sources[i].StartExecuteInThread();
-  }
-#endif  // USING_V8_SHARED
-  isolate_sources[0].Execute();
+  context->Enter();
+  int result = RunMain(argc, argv);
   if (run_shell) RunShell(context);
-#ifndef USING_V8_SHARED
-  for (int i = 1; i < num_isolates; ++i) {
-    isolate_sources[i].WaitForThread();
-  }
-#endif  // USING_V8_SHARED
-  if (last_run) {
-    delete[] isolate_sources;
-    isolate_sources = NULL;
-  }
   context->Exit();
   context.Dispose();
-  return 0;
-}
-
-
-int main(int argc, char* argv[]) {
-  // Figure out if we're requested to stress the optimization
-  // infrastructure by running tests multiple times and forcing
-  // optimization in the last run.
-  bool FLAG_stress_opt = false;
-  bool FLAG_stress_deopt = false;
-  for (int i = 0; i < argc; i++) {
-    if (strcmp(argv[i], "--stress-opt") == 0) {
-      FLAG_stress_opt = true;
-      argv[i] = NULL;
-    } else if (strcmp(argv[i], "--stress-deopt") == 0) {
-      FLAG_stress_deopt = true;
-      argv[i] = NULL;
-    } else if (strcmp(argv[i], "--noalways-opt") == 0) {
-      // No support for stressing if we can't use --always-opt.
-      FLAG_stress_opt = false;
-      FLAG_stress_deopt = false;
-      break;
-    }
-  }
-
-  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
-  int result = 0;
-  if (FLAG_stress_opt || FLAG_stress_deopt) {
-    v8::Testing::SetStressRunType(FLAG_stress_opt
-                                  ? v8::Testing::kStressTypeOpt
-                                  : v8::Testing::kStressTypeDeopt);
-    int stress_runs = v8::Testing::GetStressRuns();
-    for (int i = 0; i < stress_runs && result == 0; i++) {
-      printf("============ Stress %d/%d ============\n",
-             i + 1, stress_runs);
-      v8::Testing::PrepareStressRun(i);
-      last_run = (i == stress_runs - 1);
-      result = RunMain(argc, argv);
-    }
-    printf("======== Full Deoptimization =======\n");
-    v8::Testing::DeoptimizeAll();
-  } else {
-    result = RunMain(argc, argv);
-  }
   v8::V8::Dispose();
   return result;
 }
@@ -344,24 +105,6 @@
   // Bind the 'version' function
   global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
 
-  // Bind the handlers for external arrays.
-  global->Set(v8::String::New("Int8Array"),
-              v8::FunctionTemplate::New(Int8Array));
-  global->Set(v8::String::New("Uint8Array"),
-              v8::FunctionTemplate::New(Uint8Array));
-  global->Set(v8::String::New("Int16Array"),
-              v8::FunctionTemplate::New(Int16Array));
-  global->Set(v8::String::New("Uint16Array"),
-              v8::FunctionTemplate::New(Uint16Array));
-  global->Set(v8::String::New("Int32Array"),
-              v8::FunctionTemplate::New(Int32Array));
-  global->Set(v8::String::New("Uint32Array"),
-              v8::FunctionTemplate::New(Uint32Array));
-  global->Set(v8::String::New("Float32Array"),
-              v8::FunctionTemplate::New(Float32Array));
-  global->Set(v8::String::New("PixelArray"),
-              v8::FunctionTemplate::New(PixelArray));
-
   return v8::Context::New(NULL, global);
 }
 
@@ -435,7 +178,9 @@
   // If not arguments are given args[0] will yield undefined which
   // converts to the integer value 0.
   int exit_code = args[0]->Int32Value();
-  ExitShell(exit_code);
+  fflush(stdout);
+  fflush(stderr);
+  exit(exit_code);
   return v8::Undefined();
 }
 
@@ -445,78 +190,6 @@
 }
 
 
-void ExternalArrayWeakCallback(v8::Persistent<v8::Value> object, void* data) {
-  free(data);
-  object.Dispose();
-}
-
-
-v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
-                                          v8::ExternalArrayType type,
-                                          int element_size) {
-  if (args.Length() != 1) {
-    return v8::ThrowException(
-        v8::String::New("Array constructor needs one parameter."));
-  }
-  int length = args[0]->Int32Value();
-  void* data = malloc(length * element_size);
-  memset(data, 0, length * element_size);
-  v8::Handle<v8::Object> array = v8::Object::New();
-  v8::Persistent<v8::Object> persistent_array =
-      v8::Persistent<v8::Object>::New(array);
-  persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
-  array->SetIndexedPropertiesToExternalArrayData(data, type, length);
-  array->Set(v8::String::New("length"), v8::Int32::New(length),
-             v8::ReadOnly);
-  array->Set(v8::String::New("BYTES_PER_ELEMENT"),
-             v8::Int32::New(element_size));
-  return array;
-}
-
-
-v8::Handle<v8::Value> Int8Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
-}
-
-
-v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalUnsignedByteArray,
-                             sizeof(uint8_t));
-}
-
-
-v8::Handle<v8::Value> Int16Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalShortArray, sizeof(int16_t));
-}
-
-
-v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalUnsignedShortArray,
-                             sizeof(uint16_t));
-}
-
-v8::Handle<v8::Value> Int32Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalIntArray, sizeof(int32_t));
-}
-
-
-v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalUnsignedIntArray,
-                             sizeof(uint32_t));
-}
-
-
-v8::Handle<v8::Value> Float32Array(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalFloatArray,
-                             sizeof(float));  // NOLINT
-}
-
-
-v8::Handle<v8::Value> PixelArray(const v8::Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalPixelArray, sizeof(uint8_t));
-}
-
-
 // Reads a file into a v8 string.
 v8::Handle<v8::String> ReadFile(const char* name) {
   FILE* file = fopen(name, "rb");
@@ -539,22 +212,52 @@
 }
 
 
+// Process remaining command line arguments and execute files
+int RunMain(int argc, char* argv[]) {
+  for (int i = 1; i < argc; i++) {
+    const char* str = argv[i];
+    if (strcmp(str, "--shell") == 0) {
+      run_shell = true;
+    } else if (strcmp(str, "-f") == 0) {
+      // Ignore any -f flags for compatibility with the other stand-
+      // alone JavaScript engines.
+      continue;
+    } else if (strncmp(str, "--", 2) == 0) {
+      printf("Warning: unknown flag %s.\nTry --help for options\n", str);
+    } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
+      // Execute argument given to -e option directly.
+      v8::Handle<v8::String> file_name = v8::String::New("unnamed");
+      v8::Handle<v8::String> source = v8::String::New(argv[++i]);
+      if (!ExecuteString(source, file_name, false, true)) return 1;
+    } else {
+      // Use all other arguments as names of files to load and run.
+      v8::Handle<v8::String> file_name = v8::String::New(str);
+      v8::Handle<v8::String> source = ReadFile(str);
+      if (source.IsEmpty()) {
+        printf("Error reading '%s'\n", str);
+        continue;
+      }
+      if (!ExecuteString(source, file_name, false, true)) return 1;
+    }
+  }
+  return 0;
+}
+
+
 // The read-eval-execute loop of the shell.
 void RunShell(v8::Handle<v8::Context> context) {
-  printf("V8 version %s\n", v8::V8::GetVersion());
+  printf("V8 version %s [sample shell]\n", v8::V8::GetVersion());
   static const int kBufferSize = 256;
   // Enter the execution environment before evaluating any code.
   v8::Context::Scope context_scope(context);
+  v8::Local<v8::String> name(v8::String::New("(shell)"));
   while (true) {
     char buffer[kBufferSize];
     printf("> ");
     char* str = fgets(buffer, kBufferSize, stdin);
     if (str == NULL) break;
     v8::HandleScope handle_scope;
-    ExecuteString(v8::String::New(str),
-                  v8::String::New("(shell)"),
-                  true,
-                  true);
+    ExecuteString(v8::String::New(str), name, true, true);
   }
   printf("\n");
 }
diff --git a/src/SConscript b/src/SConscript
old mode 100755
new mode 100644
index 06ee907..52607f1
--- a/src/SConscript
+++ b/src/SConscript
@@ -32,6 +32,7 @@
 import js2c
 Import('context')
 Import('tools')
+Import('d8_env')
 
 
 SOURCES = {
@@ -65,10 +66,10 @@
     disassembler.cc
     diy-fp.cc
     dtoa.cc
+    elements.cc
     execution.cc
     factory.cc
     flags.cc
-    frame-element.cc
     frames.cc
     full-codegen.cc
     func-name-inferrer.cc
@@ -110,8 +111,8 @@
     runtime.cc
     runtime-profiler.cc
     safepoint-table.cc
-    scanner-base.cc
     scanner.cc
+    scanner-character-streams.cc
     scopeinfo.cc
     scopes.cc
     serialize.cc
@@ -122,13 +123,14 @@
     strtod.cc
     stub-cache.cc
     token.cc
-    top.cc
     type-info.cc
     unicode.cc
     utils.cc
     v8-counters.cc
     v8.cc
+    v8conversions.cc
     v8threads.cc
+    v8utils.cc
     variables.cc
     version.cc
     zone.cc
@@ -220,7 +222,7 @@
   'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
   'os:cygwin':  ['platform-cygwin.cc', 'platform-posix.cc'],
   'os:nullos':  ['platform-nullos.cc'],
-  'os:win32':   ['platform-win32.cc'],
+  'os:win32':   ['platform-win32.cc', 'win32-math.cc'],
   'mode:release': [],
   'mode:debug': [
     'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
@@ -231,18 +233,36 @@
 PREPARSER_SOURCES = {
   'all': Split("""
     allocation.cc
+    bignum.cc
+    bignum-dtoa.cc
+    cached-powers.cc
+    conversions.cc
+    diy-fp.cc
+    dtoa.cc
+    fast-dtoa.cc
+    fixed-dtoa.cc
     hashmap.cc
     preparse-data.cc
     preparser.cc
     preparser-api.cc
-    scanner-base.cc
+    scanner.cc
+    strtod.cc
     token.cc
     unicode.cc
-    """)
+    utils.cc
+    """),
+  'os:win32': ['win32-math.cc']
 }
 
 
-D8_FILES = {
+D8_LIGHT_FILES = {
+  'all': [
+    'd8.cc'
+  ]
+}
+
+
+D8_FULL_FILES = {
   'all': [
     'd8.cc', 'd8-debug.cc'
   ],
@@ -297,6 +317,12 @@
 '''.split()
 
 
+EXPERIMENTAL_LIBRARY_FILES = '''
+proxy.js
+weakmap.js
+'''.split()
+
+
 def Abort(message):
   print message
   sys.exit(1)
@@ -309,21 +335,41 @@
   env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
   env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
 
+  def BuildJS2CEnv(type):
+    js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
+    if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
+      js2c_env['COMPRESSION'] = 'bz2'
+    return js2c_env
+
   # Build the standard platform-independent source files.
   source_files = context.GetRelevantSources(SOURCES)
-
-  d8_files = context.GetRelevantSources(D8_FILES)
-  d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
+  d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
   d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
-  d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
+  if context.options['library'] == 'shared':
+    d8_files = context.GetRelevantSources(D8_LIGHT_FILES)
+    d8_objs = []
+  else:
+    d8_files = context.GetRelevantSources(D8_FULL_FILES)
+    d8_objs = [d8_js_obj]
+  d8_objs.append(context.ConfigureObject(d8_env, [d8_files]))
 
   # Combine the JavaScript library files into a single C++ file and
   # compile it.
   library_files = [s for s in LIBRARY_FILES]
   library_files.append('macros.py')
-  libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+  libraries_src = env.JS2C(
+    ['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
   libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
 
+  # Combine the experimental JavaScript library files into a C++ file
+  # and compile it.
+  experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
+  experimental_library_files.append('macros.py')
+  experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
+                                        experimental_library_files,
+                                        **BuildJS2CEnv('EXPERIMENTAL'))
+  experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
+
   source_objs = context.ConfigureObject(env, source_files)
   non_snapshot_files = [source_objs]
 
@@ -340,7 +386,7 @@
   mksnapshot_env = env.Copy()
   mksnapshot_env.Replace(**context.flags['mksnapshot'])
   mksnapshot_src = 'mksnapshot.cc'
-  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj,  non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
   if context.use_snapshot:
     if context.build_snapshot:
       snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@@ -349,7 +395,7 @@
     snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
   else:
     snapshot_obj = empty_snapshot_obj
-  library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
+  library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
   return (library_objs, d8_objs, [mksnapshot], preparser_objs)
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 7fa6982..951209d 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
+#include "list-inl.h"
 #include "safepoint-table.h"
 #include "scopeinfo.h"
 
@@ -598,6 +599,7 @@
   if (!found_it) return isolate->heap()->undefined_value();
   Handle<JSFunction> function(holder, isolate);
 
+  if (function->shared()->native()) return isolate->heap()->null_value();
   // Find the top invocation of the function by traversing frames.
   List<JSFunction*> functions(2);
   for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
@@ -679,6 +681,52 @@
 }
 
 
+class FrameFunctionIterator {
+ public:
+  FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+      : frame_iterator_(isolate),
+        functions_(2),
+        index_(0) {
+    GetFunctions();
+  }
+  JSFunction* next() {
+    if (functions_.length() == 0) return NULL;
+    JSFunction* next_function = functions_[index_];
+    index_--;
+    if (index_ < 0) {
+      GetFunctions();
+    }
+    return next_function;
+  }
+
+  // Iterate through functions until the first occurence of 'function'.
+  // Returns true if 'function' is found, and false if the iterator ends
+  // without finding it.
+  bool Find(JSFunction* function) {
+    JSFunction* next_function;
+    do {
+      next_function = next();
+      if (next_function == function) return true;
+    } while (next_function != NULL);
+    return false;
+  }
+
+ private:
+  void GetFunctions() {
+    functions_.Rewind(0);
+    if (frame_iterator_.done()) return;
+    JavaScriptFrame* frame = frame_iterator_.frame();
+    frame->GetFunctions(&functions_);
+    ASSERT(functions_.length() > 0);
+    frame_iterator_.Advance();
+    index_ = functions_.length() - 1;
+  }
+  JavaScriptFrameIterator frame_iterator_;
+  List<JSFunction*> functions_;
+  int index_;
+};
+
+
 MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
   Isolate* isolate = Isolate::Current();
   HandleScope scope(isolate);
@@ -686,40 +734,33 @@
   bool found_it = false;
   JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
   if (!found_it) return isolate->heap()->undefined_value();
+  if (holder->shared()->native()) return isolate->heap()->null_value();
   Handle<JSFunction> function(holder, isolate);
 
-  List<JSFunction*> functions(2);
-  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
-    frame->GetFunctions(&functions);
-    for (int i = functions.length() - 1; i >= 0; i--) {
-      if (functions[i] == *function) {
-        // Once we have found the frame, we need to go to the caller
-        // frame. This may require skipping through a number of top-level
-        // frames, e.g. frames for scripts not functions.
-        if (i > 0) {
-          ASSERT(!functions[i - 1]->shared()->is_toplevel());
-          return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
-        } else {
-          for (it.Advance(); !it.done(); it.Advance()) {
-            frame = it.frame();
-            functions.Rewind(0);
-            frame->GetFunctions(&functions);
-            if (!functions.last()->shared()->is_toplevel()) {
-              return CheckNonStrictCallerOrThrow(isolate, functions.last());
-            }
-            ASSERT(functions.length() == 1);
-          }
-          if (it.done()) return isolate->heap()->null_value();
-          break;
-        }
-      }
-    }
-    functions.Rewind(0);
+  FrameFunctionIterator it(isolate, no_alloc);
+
+  // Find the function from the frames.
+  if (!it.Find(*function)) {
+    // No frame corresponding to the given function found. Return null.
+    return isolate->heap()->null_value();
   }
 
-  // No frame corresponding to the given function found. Return null.
-  return isolate->heap()->null_value();
+  // Find previously called non-toplevel function.
+  JSFunction* caller;
+  do {
+    caller = it.next();
+    if (caller == NULL) return isolate->heap()->null_value();
+  } while (caller->shared()->is_toplevel());
+
+  // If caller is a built-in function and caller's caller is also built-in,
+  // use that instead.
+  JSFunction* potential_caller = caller;
+  while (potential_caller != NULL && potential_caller->IsBuiltin()) {
+    caller = potential_caller;
+    potential_caller = it.next();
+  }
+
+  return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
 
diff --git a/src/accessors.h b/src/accessors.h
index 14ccc8f..385536d 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -28,6 +28,8 @@
 #ifndef V8_ACCESSORS_H_
 #define V8_ACCESSORS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/api.cc b/src/api.cc
index 3ae6304..39c0d02 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,13 +35,16 @@
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
+#include "flags.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "messages.h"
+#include "natives.h"
 #include "parser.h"
 #include "platform.h"
 #include "profile-generator-inl.h"
 #include "runtime-profiler.h"
+#include "scanner-character-streams.h"
 #include "serialize.h"
 #include "snapshot.h"
 #include "v8threads.h"
@@ -53,17 +56,11 @@
 
 #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
 
-// TODO(isolates): avoid repeated TLS reads in function prologues.
-#ifdef ENABLE_VMSTATE_TRACKING
 #define ENTER_V8(isolate)                                        \
   ASSERT((isolate)->IsInitialized());                           \
   i::VMState __state__((isolate), i::OTHER)
 #define LEAVE_V8(isolate) \
   i::VMState __state__((isolate), i::EXTERNAL)
-#else
-#define ENTER_V8(isolate) ((void) 0)
-#define LEAVE_V8(isolate) ((void) 0)
-#endif
 
 namespace v8 {
 
@@ -98,13 +95,11 @@
     }                                                                          \
   } while (false)
 
-// TODO(isolates): Add a parameter to this macro for an isolate.
 
-#define API_ENTRY_CHECK(msg)                                                   \
+#define API_ENTRY_CHECK(isolate, msg)                                          \
   do {                                                                         \
     if (v8::Locker::IsActive()) {                                              \
-      ApiCheck(i::Isolate::Current()->thread_manager()->                       \
-                  IsLockedByCurrentThread(),                                   \
+      ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(),           \
                msg,                                                            \
                "Entering the V8 API without proper locking in place");         \
     }                                                                          \
@@ -116,9 +111,7 @@
 
 static void DefaultFatalErrorHandler(const char* location,
                                      const char* message) {
-#ifdef ENABLE_VMSTATE_TRACKING
   i::VMState __state__(i::Isolate::Current(), i::OTHER);
-#endif
   API_Fatal(location, message);
 }
 
@@ -177,8 +170,8 @@
   heap_stats.pending_global_handle_count = &pending_global_handle_count;
   int near_death_global_handle_count;
   heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
-  int destroyed_global_handle_count;
-  heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
+  int free_global_handle_count;
+  heap_stats.free_global_handle_count = &free_global_handle_count;
   intptr_t memory_allocator_size;
   heap_stats.memory_allocator_size = &memory_allocator_size;
   intptr_t memory_allocator_capacity;
@@ -290,6 +283,7 @@
   if (isolate != NULL) {
     if (isolate->IsInitialized()) return true;
   }
+  ASSERT(isolate == i::Isolate::Current());
   return ApiCheck(InitializeHelper(), location, "Error initializing V8");
 }
 
@@ -311,12 +305,145 @@
 }
 
 
+StartupDataDecompressor::StartupDataDecompressor()
+    : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
+  for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+    raw_data[i] = NULL;
+  }
+}
+
+
+StartupDataDecompressor::~StartupDataDecompressor() {
+  for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+    i::DeleteArray(raw_data[i]);
+  }
+  i::DeleteArray(raw_data);
+}
+
+
+int StartupDataDecompressor::Decompress() {
+  int compressed_data_count = V8::GetCompressedStartupDataCount();
+  StartupData* compressed_data =
+      i::NewArray<StartupData>(compressed_data_count);
+  V8::GetCompressedStartupData(compressed_data);
+  for (int i = 0; i < compressed_data_count; ++i) {
+    char* decompressed = raw_data[i] =
+        i::NewArray<char>(compressed_data[i].raw_size);
+    if (compressed_data[i].compressed_size != 0) {
+      int result = DecompressData(decompressed,
+                                  &compressed_data[i].raw_size,
+                                  compressed_data[i].data,
+                                  compressed_data[i].compressed_size);
+      if (result != 0) return result;
+    } else {
+      ASSERT_EQ(0, compressed_data[i].raw_size);
+    }
+    compressed_data[i].data = decompressed;
+  }
+  V8::SetDecompressedStartupData(compressed_data);
+  return 0;
+}
+
+
+StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  return StartupData::kBZip2;
+#else
+  return StartupData::kUncompressed;
+#endif
+}
+
+
+enum CompressedStartupDataItems {
+  kSnapshot = 0,
+  kSnapshotContext,
+  kLibraries,
+  kExperimentalLibraries,
+  kCompressedStartupDataCount
+};
+
+int V8::GetCompressedStartupDataCount() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  return kCompressedStartupDataCount;
+#else
+  return 0;
+#endif
+}
+
+
+void V8::GetCompressedStartupData(StartupData* compressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  compressed_data[kSnapshot].data =
+      reinterpret_cast<const char*>(i::Snapshot::data());
+  compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
+  compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
+
+  compressed_data[kSnapshotContext].data =
+      reinterpret_cast<const char*>(i::Snapshot::context_data());
+  compressed_data[kSnapshotContext].compressed_size =
+      i::Snapshot::context_size();
+  compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
+
+  i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
+  compressed_data[kLibraries].data =
+      reinterpret_cast<const char*>(libraries_source.start());
+  compressed_data[kLibraries].compressed_size = libraries_source.length();
+  compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
+
+  i::Vector<const i::byte> exp_libraries_source =
+      i::ExperimentalNatives::GetScriptsSource();
+  compressed_data[kExperimentalLibraries].data =
+      reinterpret_cast<const char*>(exp_libraries_source.start());
+  compressed_data[kExperimentalLibraries].compressed_size =
+      exp_libraries_source.length();
+  compressed_data[kExperimentalLibraries].raw_size =
+      i::ExperimentalNatives::GetRawScriptsSize();
+#endif
+}
+
+
+void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
+  i::Snapshot::set_raw_data(
+      reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
+
+  ASSERT_EQ(i::Snapshot::context_raw_size(),
+            decompressed_data[kSnapshotContext].raw_size);
+  i::Snapshot::set_context_raw_data(
+      reinterpret_cast<const i::byte*>(
+          decompressed_data[kSnapshotContext].data));
+
+  ASSERT_EQ(i::Natives::GetRawScriptsSize(),
+            decompressed_data[kLibraries].raw_size);
+  i::Vector<const char> libraries_source(
+      decompressed_data[kLibraries].data,
+      decompressed_data[kLibraries].raw_size);
+  i::Natives::SetRawScriptsSource(libraries_source);
+
+  ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
+            decompressed_data[kExperimentalLibraries].raw_size);
+  i::Vector<const char> exp_libraries_source(
+      decompressed_data[kExperimentalLibraries].data,
+      decompressed_data[kExperimentalLibraries].raw_size);
+  i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
+#endif
+}
+
+
 void V8::SetFatalErrorHandler(FatalErrorCallback that) {
   i::Isolate* isolate = EnterIsolateIfNeeded();
   isolate->set_exception_behavior(that);
 }
 
 
+void V8::SetAllowCodeGenerationFromStringsCallback(
+    AllowCodeGenerationFromStringsCallback callback) {
+  i::Isolate* isolate = EnterIsolateIfNeeded();
+  isolate->set_allow_code_gen_callback(callback);
+}
+
+
 #ifdef DEBUG
 void ImplementationUtilities::ZapHandleRange(i::Object** begin,
                                              i::Object** end) {
@@ -477,6 +604,13 @@
 }
 
 
+void V8::MarkIndependent(i::Object** object) {
+  i::Isolate* isolate = i::Isolate::Current();
+  LOG_API(isolate, "MakeIndependent");
+  isolate->global_handles()->MarkIndependent(object);
+}
+
+
 bool V8::IsGlobalNearDeath(i::Object** obj) {
   i::Isolate* isolate = i::Isolate::Current();
   LOG_API(isolate, "IsGlobalNearDeath");
@@ -504,8 +638,8 @@
 
 
 HandleScope::HandleScope() {
-  API_ENTRY_CHECK("HandleScope::HandleScope");
   i::Isolate* isolate = i::Isolate::Current();
+  API_ENTRY_CHECK(isolate, "HandleScope::HandleScope");
   v8::ImplementationUtilities::HandleScopeData* current =
       isolate->handle_scope_data();
   isolate_ = isolate;
@@ -561,12 +695,11 @@
 
 
 void Context::Enter() {
-  // TODO(isolates): Context should have a pointer to isolate.
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
   if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
   ENTER_V8(isolate);
 
-  i::Handle<i::Context> env = Utils::OpenHandle(this);
   isolate->handle_scope_implementer()->EnterContext(env);
 
   isolate->handle_scope_implementer()->SaveContext(isolate->context());
@@ -575,7 +708,9 @@
 
 
 void Context::Exit() {
-  // TODO(isolates): Context should have a pointer to isolate.
+  // Exit is essentially a static function and doesn't use the
+  // receiver, so we have to get the current isolate from the thread
+  // local.
   i::Isolate* isolate = i::Isolate::Current();
   if (!isolate->IsInitialized()) return;
 
@@ -593,41 +728,31 @@
 
 
 void Context::SetData(v8::Handle<String> data) {
-  // TODO(isolates): Context should have a pointer to isolate.
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
   if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
-  ENTER_V8(isolate);
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::Context> env = Utils::OpenHandle(this);
-    i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
-    ASSERT(env->IsGlobalContext());
-    if (env->IsGlobalContext()) {
-      env->set_data(*raw_data);
-    }
+  i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+  ASSERT(env->IsGlobalContext());
+  if (env->IsGlobalContext()) {
+    env->set_data(*raw_data);
   }
 }
 
 
 v8::Local<v8::Value> Context::GetData() {
-  // TODO(isolates): Context should have a pointer to isolate.
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
   if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
     return v8::Local<Value>();
   }
-  ENTER_V8(isolate);
   i::Object* raw_result = NULL;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::Context> env = Utils::OpenHandle(this);
-    ASSERT(env->IsGlobalContext());
-    if (env->IsGlobalContext()) {
-      raw_result = env->data();
-    } else {
-      return Local<Value>();
-    }
+  ASSERT(env->IsGlobalContext());
+  if (env->IsGlobalContext()) {
+    raw_result = env->data();
+  } else {
+    return Local<Value>();
   }
-  i::Handle<i::Object> result(raw_result);
+  i::Handle<i::Object> result(raw_result, isolate);
   return Utils::ToLocal(result);
 }
 
@@ -856,6 +981,7 @@
 int TypeSwitch::match(v8::Handle<Value> value) {
   i::Isolate* isolate = i::Isolate::Current();
   LOG_API(isolate, "TypeSwitch::match");
+  USE(isolate);
   i::Handle<i::Object> obj = Utils::OpenHandle(*value);
   i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
   i::FixedArray* types = i::FixedArray::cast(info->types());
@@ -867,9 +993,9 @@
 }
 
 
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do {  \
-    i::Handle<i::Object> proxy = FromCData(cdata);  \
-    (obj)->setter(*proxy);                          \
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do {    \
+    i::Handle<i::Object> foreign = FromCData(cdata);  \
+    (obj)->setter(*foreign);                          \
   } while (false)
 
 
@@ -975,6 +1101,16 @@
 }
 
 
+void FunctionTemplate::ReadOnlyPrototype() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
+    return;
+  }
+  ENTER_V8(isolate);
+  Utils::OpenHandle(this)->set_read_only_prototype(true);
+}
+
+
 void FunctionTemplate::SetNamedInstancePropertyHandler(
       NamedPropertyGetter getter,
       NamedPropertySetter setter,
@@ -1271,7 +1407,7 @@
 ScriptData* ScriptData::PreCompile(const char* input, int length) {
   i::Utf8ToUC16CharacterStream stream(
       reinterpret_cast<const unsigned char*>(input), length);
-  return i::ParserApi::PreParse(&stream, NULL);
+  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
 }
 
 
@@ -1280,10 +1416,10 @@
   if (str->IsExternalTwoByteString()) {
     i::ExternalTwoByteStringUC16CharacterStream stream(
       i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
   } else {
     i::GenericStringUC16CharacterStream stream(str, 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
   }
 }
 
@@ -1320,31 +1456,35 @@
   ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
   LOG_API(isolate, "Script::New");
   ENTER_V8(isolate);
-  i::Handle<i::String> str = Utils::OpenHandle(*source);
-  i::Handle<i::Object> name_obj;
-  int line_offset = 0;
-  int column_offset = 0;
-  if (origin != NULL) {
-    if (!origin->ResourceName().IsEmpty()) {
-      name_obj = Utils::OpenHandle(*origin->ResourceName());
+  i::SharedFunctionInfo* raw_result = NULL;
+  { i::HandleScope scope(isolate);
+    i::Handle<i::String> str = Utils::OpenHandle(*source);
+    i::Handle<i::Object> name_obj;
+    int line_offset = 0;
+    int column_offset = 0;
+    if (origin != NULL) {
+      if (!origin->ResourceName().IsEmpty()) {
+        name_obj = Utils::OpenHandle(*origin->ResourceName());
+      }
+      if (!origin->ResourceLineOffset().IsEmpty()) {
+        line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+      }
+      if (!origin->ResourceColumnOffset().IsEmpty()) {
+        column_offset =
+            static_cast<int>(origin->ResourceColumnOffset()->Value());
+      }
     }
-    if (!origin->ResourceLineOffset().IsEmpty()) {
-      line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+    EXCEPTION_PREAMBLE(isolate);
+    i::ScriptDataImpl* pre_data_impl =
+        static_cast<i::ScriptDataImpl*>(pre_data);
+    // We assert that the pre-data is sane, even though we can actually
+    // handle it if it turns out not to be in release mode.
+    ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
+    // If the pre-data isn't sane we simply ignore it
+    if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+      pre_data_impl = NULL;
     }
-    if (!origin->ResourceColumnOffset().IsEmpty()) {
-      column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
-    }
-  }
-  EXCEPTION_PREAMBLE(isolate);
-  i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
-  // We assert that the pre-data is sane, even though we can actually
-  // handle it if it turns out not to be in release mode.
-  ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
-  // If the pre-data isn't sane we simply ignore it
-  if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
-    pre_data_impl = NULL;
-  }
-  i::Handle<i::SharedFunctionInfo> result =
+    i::Handle<i::SharedFunctionInfo> result =
       i::Compiler::Compile(str,
                            name_obj,
                            line_offset,
@@ -1353,8 +1493,11 @@
                            pre_data_impl,
                            Utils::OpenHandle(*script_data),
                            i::NOT_NATIVES_CODE);
-  has_pending_exception = result.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+    has_pending_exception = result.is_null();
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+    raw_result = *result;
+  }
+  i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
   return Local<Script>(ToApi<Script>(result));
 }
 
@@ -1476,26 +1619,27 @@
 
 
 v8::TryCatch::TryCatch()
-    : next_(i::Isolate::Current()->try_catch_handler_address()),
-      exception_(HEAP->the_hole_value()),
+    : isolate_(i::Isolate::Current()),
+      next_(isolate_->try_catch_handler_address()),
+      exception_(isolate_->heap()->the_hole_value()),
       message_(i::Smi::FromInt(0)),
       is_verbose_(false),
       can_continue_(true),
       capture_message_(true),
       rethrow_(false) {
-  i::Isolate::Current()->RegisterTryCatchHandler(this);
+  isolate_->RegisterTryCatchHandler(this);
 }
 
 
 v8::TryCatch::~TryCatch() {
-  i::Isolate* isolate = i::Isolate::Current();
+  ASSERT(isolate_ == i::Isolate::Current());
   if (rethrow_) {
     v8::HandleScope scope;
     v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
-    isolate->UnregisterTryCatchHandler(this);
+    isolate_->UnregisterTryCatchHandler(this);
     v8::ThrowException(exc);
   } else {
-    isolate->UnregisterTryCatchHandler(this);
+    isolate_->UnregisterTryCatchHandler(this);
   }
 }
 
@@ -1518,10 +1662,11 @@
 
 
 v8::Local<Value> v8::TryCatch::Exception() const {
+  ASSERT(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     // Check for out of memory exception.
     i::Object* exception = reinterpret_cast<i::Object*>(exception_);
-    return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+    return v8::Utils::ToLocal(i::Handle<i::Object>(exception, isolate_));
   } else {
     return v8::Local<Value>();
   }
@@ -1529,15 +1674,17 @@
 
 
 v8::Local<Value> v8::TryCatch::StackTrace() const {
+  ASSERT(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
     if (!raw_obj->IsJSObject()) return v8::Local<Value>();
-    v8::HandleScope scope;
-    i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
-    i::Handle<i::String> name = FACTORY->LookupAsciiSymbol("stack");
-    if (!obj->HasProperty(*name))
-      return v8::Local<Value>();
-    return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
+    i::HandleScope scope(isolate_);
+    i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
+    i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
+    if (!obj->HasProperty(*name)) return v8::Local<Value>();
+    i::Handle<i::Object> value = i::GetProperty(obj, name);
+    if (value.is_null()) return v8::Local<Value>();
+    return v8::Utils::ToLocal(scope.CloseAndEscape(value));
   } else {
     return v8::Local<Value>();
   }
@@ -1545,9 +1692,10 @@
 
 
 v8::Local<v8::Message> v8::TryCatch::Message() const {
+  ASSERT(isolate_ == i::Isolate::Current());
   if (HasCaught() && message_ != i::Smi::FromInt(0)) {
     i::Object* message = reinterpret_cast<i::Object*>(message_);
-    return v8::Utils::MessageToLocal(i::Handle<i::Object>(message));
+    return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
   } else {
     return v8::Local<v8::Message>();
   }
@@ -1555,7 +1703,8 @@
 
 
 void v8::TryCatch::Reset() {
-  exception_ = HEAP->the_hole_value();
+  ASSERT(isolate_ == i::Isolate::Current());
+  exception_ = isolate_->heap()->the_hole_value();
   message_ = i::Smi::FromInt(0);
 }
 
@@ -1989,7 +2138,7 @@
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
     return false;
   }
-  return Utils::OpenHandle(this)->IsProxy();
+  return Utils::OpenHandle(this)->IsForeign();
 }
 
 
@@ -2025,6 +2174,65 @@
 }
 
 
+bool Value::IsStringObject() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->HasSpecificClassOf(isolate->heap()->String_symbol());
+}
+
+
+bool Value::IsNumberObject() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->HasSpecificClassOf(isolate->heap()->Number_symbol());
+}
+
+
+static i::Object* LookupBuiltin(i::Isolate* isolate,
+                                const char* builtin_name) {
+  i::Handle<i::String> symbol =
+      isolate->factory()->LookupAsciiSymbol(builtin_name);
+  i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
+  return builtins->GetPropertyNoExceptionThrown(*symbol);
+}
+
+
+static bool CheckConstructor(i::Isolate* isolate,
+                             i::Handle<i::JSObject> obj,
+                             const char* class_name) {
+  return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
+}
+
+
+bool Value::IsNativeError() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsJSObject()) {
+    i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
+    return CheckConstructor(isolate, js_obj, "$Error") ||
+        CheckConstructor(isolate, js_obj, "$EvalError") ||
+        CheckConstructor(isolate, js_obj, "$RangeError") ||
+        CheckConstructor(isolate, js_obj, "$ReferenceError") ||
+        CheckConstructor(isolate, js_obj, "$SyntaxError") ||
+        CheckConstructor(isolate, js_obj, "$TypeError") ||
+        CheckConstructor(isolate, js_obj, "$URIError");
+  } else {
+    return false;
+  }
+}
+
+
+bool Value::IsBooleanObject() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol());
+}
+
+
 bool Value::IsRegExp() const {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -2150,7 +2358,7 @@
 void External::CheckCast(v8::Value* that) {
   if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  ApiCheck(obj->IsProxy(),
+  ApiCheck(obj->IsForeign(),
            "v8::External::Cast()",
            "Could not convert to external");
 }
@@ -2220,6 +2428,36 @@
 }
 
 
+void v8::StringObject::CheckCast(v8::Value* that) {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_symbol()),
+           "v8::StringObject::Cast()",
+           "Could not convert to StringObject");
+}
+
+
+void v8::NumberObject::CheckCast(v8::Value* that) {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_symbol()),
+           "v8::NumberObject::Cast()",
+           "Could not convert to NumberObject");
+}
+
+
+void v8::BooleanObject::CheckCast(v8::Value* that) {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()),
+           "v8::BooleanObject::Cast()",
+           "Could not convert to BooleanObject");
+}
+
+
 void v8::RegExp::CheckCast(v8::Value* that) {
   if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -2563,6 +2801,26 @@
 }
 
 
+PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
+             return static_cast<PropertyAttribute>(NONE));
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  if (!key_obj->IsString()) {
+    EXCEPTION_PREAMBLE(isolate);
+    key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
+  }
+  i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
+  PropertyAttributes result = self->GetPropertyAttribute(*key_string);
+  if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
+  return static_cast<PropertyAttribute>(result);
+}
+
+
 Local<Value> v8::Object::GetPrototype() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
@@ -2628,6 +2886,25 @@
 }
 
 
+Local<Array> v8::Object::GetOwnPropertyNames() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyNames()",
+             return Local<v8::Array>());
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::FixedArray> value =
+      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+  // Because we use caching to speed up enumeration it is important
+  // to never change the result of the basic enumeration function so
+  // we clone the result.
+  i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
+  i::Handle<i::JSArray> result =
+      isolate->factory()->NewJSArrayWithElements(elms);
+  return Utils::ToLocal(scope.CloseAndEscape(result));
+}
+
+
 Local<String> v8::Object::ObjectProtoToString() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
@@ -2751,6 +3028,15 @@
 }
 
 
+bool v8::Object::HasOwnProperty(Handle<String> key) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
+             return false);
+  return Utils::OpenHandle(this)->HasLocalProperty(
+      *Utils::OpenHandle(*key));
+}
+
+
 bool v8::Object::HasRealNamedProperty(Handle<String> key) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
@@ -2886,10 +3172,9 @@
   i::Object* constructor = object->map()->constructor();
   i::JSFunction* function;
   if (!constructor->IsJSFunction()) {
-    // API functions have null as a constructor,
+    // Functions have null as a constructor,
     // but any JSFunction knows its context immediately.
-    ASSERT(object->IsJSFunction() &&
-           i::JSFunction::cast(object)->shared()->IsApiFunction());
+    ASSERT(object->IsJSFunction());
     function = i::JSFunction::cast(object);
   } else {
     function = i::JSFunction::cast(constructor);
@@ -2915,39 +3200,7 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
-  if (!hidden_props_obj->IsJSObject()) {
-    // We failed to create hidden properties.  That's a detached
-    // global proxy.
-    ASSERT(hidden_props_obj->IsUndefined());
-    return 0;
-  }
-  i::Handle<i::JSObject> hidden_props =
-      i::Handle<i::JSObject>::cast(hidden_props_obj);
-  i::Handle<i::String> hash_symbol = isolate->factory()->identity_hash_symbol();
-  if (hidden_props->HasLocalProperty(*hash_symbol)) {
-    i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
-    CHECK(!hash.is_null());
-    CHECK(hash->IsSmi());
-    return i::Smi::cast(*hash)->value();
-  }
-
-  int hash_value;
-  int attempts = 0;
-  do {
-    // Generate a random 32-bit hash value but limit range to fit
-    // within a smi.
-    hash_value = i::V8::Random(self->GetIsolate()) & i::Smi::kMaxValue;
-    attempts++;
-  } while (hash_value == 0 && attempts < 30);
-  hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
-  CHECK(!i::SetLocalPropertyIgnoreAttributes(
-          hidden_props,
-          hash_symbol,
-          i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
-          static_cast<PropertyAttributes>(None)).is_null());
-
-  return hash_value;
+  return i::GetIdentityHash(self);
 }
 
 
@@ -2958,7 +3211,9 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::ALLOW_CREATION));
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
   EXCEPTION_PREAMBLE(isolate);
@@ -2980,7 +3235,9 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::OMIT_CREATION));
   if (hidden_props->IsUndefined()) {
     return v8::Local<v8::Value>();
   }
@@ -3002,7 +3259,9 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::OMIT_CREATION));
   if (hidden_props->IsUndefined()) {
     return true;
   }
@@ -3014,6 +3273,42 @@
 
 namespace {
 
+static i::ElementsKind GetElementsKindFromExternalArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+      return i::EXTERNAL_BYTE_ELEMENTS;
+      break;
+    case kExternalUnsignedByteArray:
+      return i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+      break;
+    case kExternalShortArray:
+      return i::EXTERNAL_SHORT_ELEMENTS;
+      break;
+    case kExternalUnsignedShortArray:
+      return i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+      break;
+    case kExternalIntArray:
+      return i::EXTERNAL_INT_ELEMENTS;
+      break;
+    case kExternalUnsignedIntArray:
+      return i::EXTERNAL_UNSIGNED_INT_ELEMENTS;
+      break;
+    case kExternalFloatArray:
+      return i::EXTERNAL_FLOAT_ELEMENTS;
+      break;
+    case kExternalDoubleArray:
+      return i::EXTERNAL_DOUBLE_ELEMENTS;
+      break;
+    case kExternalPixelArray:
+      return i::EXTERNAL_PIXEL_ELEMENTS;
+      break;
+  }
+  UNREACHABLE();
+  return i::DICTIONARY_ELEMENTS;
+}
+
+
 void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
                                   void* data,
                                   ExternalArrayType array_type,
@@ -3032,9 +3327,9 @@
       elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
   if (cant_reuse_map) {
     i::Handle<i::Map> external_array_map =
-        isolate->factory()->GetExternalArrayElementsMap(
+        isolate->factory()->GetElementsTransitionMap(
             i::Handle<i::Map>(object->map()),
-            array_type,
+            GetElementsKindFromExternalArrayType(array_type),
             object->HasFastProperties());
     object->set_map(*external_array_map);
   }
@@ -3096,6 +3391,7 @@
   }
 }
 
+
 void v8::Object::SetIndexedPropertiesToExternalArrayData(
     void* data,
     ExternalArrayType array_type,
@@ -3161,6 +3457,8 @@
       return kExternalUnsignedIntArray;
     case i::EXTERNAL_FLOAT_ARRAY_TYPE:
       return kExternalFloatArray;
+    case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
+      return kExternalDoubleArray;
     case i::EXTERNAL_PIXEL_ARRAY_TYPE:
       return kExternalPixelArray;
     default:
@@ -3182,6 +3480,85 @@
 }
 
 
+bool v8::Object::IsCallable() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  if (obj->IsJSFunction()) return true;
+  return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+}
+
+
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+                                        v8::Handle<v8::Value> argv[]) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
+             return Local<v8::Value>());
+  LOG_API(isolate, "Object::CallAsFunction");
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+  STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
+  if (obj->IsJSFunction()) {
+    fun = i::Handle<i::JSFunction>::cast(obj);
+  } else {
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> delegate =
+        i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+    fun = i::Handle<i::JSFunction>::cast(delegate);
+    recv_obj = obj;
+  }
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> returned =
+      i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+  return Utils::ToLocal(scope.CloseAndEscape(returned));
+}
+
+
+Local<v8::Value> Object::CallAsConstructor(int argc,
+                                           v8::Handle<v8::Value> argv[]) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::CallAsConstructor()",
+             return Local<v8::Object>());
+  LOG_API(isolate, "Object::CallAsConstructor");
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  if (obj->IsJSFunction()) {
+    i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> returned =
+        i::Execution::New(fun, argc, args, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    return Utils::ToLocal(scope.CloseAndEscape(
+        i::Handle<i::JSObject>::cast(returned)));
+  }
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> delegate =
+      i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+  if (!delegate->IsUndefined()) {
+    i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> returned =
+        i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    ASSERT(!delegate->IsUndefined());
+    return Utils::ToLocal(scope.CloseAndEscape(returned));
+  }
+  return Local<v8::Object>();
+}
+
+
 Local<v8::Object> Function::NewInstance() const {
   return NewInstance(0, NULL);
 }
@@ -3233,6 +3610,7 @@
 void Function::SetName(v8::Handle<v8::String> name) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
+  USE(isolate);
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   func->shared()->set_name(*Utils::OpenHandle(*name));
 }
@@ -3288,7 +3666,7 @@
 int String::WriteUtf8(char* buffer,
                       int capacity,
                       int* nchars_ref,
-                      WriteHints hints) const {
+                      int options) const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
   LOG_API(isolate, "String::WriteUtf8");
@@ -3296,7 +3674,7 @@
   i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   i::Handle<i::String> str = Utils::OpenHandle(this);
   isolate->string_tracker()->RecordWrite(str);
-  if (hints & HINT_MANY_WRITES_EXPECTED) {
+  if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
     str->TryFlatten();
@@ -3336,7 +3714,8 @@
     }
   }
   if (nchars_ref != NULL) *nchars_ref = nchars;
-  if (i == len && (capacity == -1 || pos < capacity))
+  if (!(options & NO_NULL_TERMINATION) &&
+      (i == len && (capacity == -1 || pos < capacity)))
     buffer[pos++] = '\0';
   return pos;
 }
@@ -3345,7 +3724,7 @@
 int String::WriteAscii(char* buffer,
                        int start,
                        int length,
-                       WriteHints hints) const {
+                       int options) const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
   LOG_API(isolate, "String::WriteAscii");
@@ -3354,7 +3733,7 @@
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
   isolate->string_tracker()->RecordWrite(str);
-  if (hints & HINT_MANY_WRITES_EXPECTED) {
+  if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
     str->TryFlatten();
@@ -3370,7 +3749,7 @@
     if (c == '\0') c = ' ';
     buffer[i] = c;
   }
-  if (length == -1 || i < length)
+  if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length))
     buffer[i] = '\0';
   return i;
 }
@@ -3379,7 +3758,7 @@
 int String::Write(uint16_t* buffer,
                   int start,
                   int length,
-                  WriteHints hints) const {
+                  int options) const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
   LOG_API(isolate, "String::Write");
@@ -3387,7 +3766,7 @@
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
   isolate->string_tracker()->RecordWrite(str);
-  if (hints & HINT_MANY_WRITES_EXPECTED) {
+  if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
     str->TryFlatten();
@@ -3397,7 +3776,8 @@
     end = str->length();
   if (end < 0) return 0;
   i::String::WriteToFlat(*str, buffer, start, end);
-  if (length == -1 || end - start < length) {
+  if (!(options & NO_NULL_TERMINATION) &&
+      (length == -1 || end - start < length)) {
     buffer[end - start] = '\0';
   }
   return end - start;
@@ -3570,11 +3950,11 @@
     Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
   } else {
     HandleScope scope;
-    i::Handle<i::Proxy> proxy =
-        isolate->factory()->NewProxy(
+    i::Handle<i::Foreign> foreign =
+        isolate->factory()->NewForeign(
             reinterpret_cast<i::Address>(value), i::TENURED);
-    if (!proxy.is_null())
-        Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+    if (!foreign.is_null())
+        Utils::OpenHandle(this)->SetInternalField(index, *foreign);
   }
   ASSERT_EQ(value, GetPointerFromInternalField(index));
 }
@@ -3592,6 +3972,11 @@
 }
 
 
+void v8::V8::SetEntropySource(EntropySource source) {
+  i::V8::SetEntropySource(source);
+}
+
+
 bool v8::V8::Dispose() {
   i::Isolate* isolate = i::Isolate::Current();
   if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -3707,6 +4092,7 @@
 
     // Create the environment.
     env = isolate->bootstrapper()->CreateEnvironment(
+        isolate,
         Utils::OpenHandle(*global_object),
         proxy_template,
         extensions);
@@ -3851,6 +4237,20 @@
 }
 
 
+void Context::AllowCodeGenerationFromStrings(bool allow) {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
+    return;
+  }
+  ENTER_V8(isolate);
+  i::Object** ctx = reinterpret_cast<i::Object**>(this);
+  i::Handle<i::Context> context =
+      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+  context->set_allow_code_gen_from_strings(
+      allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
+}
+
+
 void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
   i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
 }
@@ -3895,11 +4295,11 @@
 
 
 static Local<External> ExternalNewImpl(void* data) {
-  return Utils::ToLocal(FACTORY->NewProxy(static_cast<i::Address>(data)));
+  return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
 }
 
 
@@ -3924,8 +4324,8 @@
   i::Object* value = obj->GetInternalField(index);
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
-  } else if (value->IsProxy()) {
-    return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
+  } else if (value->IsForeign()) {
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
   } else {
     return NULL;
   }
@@ -3938,7 +4338,7 @@
   void* result;
   if (obj->IsSmi()) {
     result = i::Internals::GetExternalPointerFromSmi(*obj);
-  } else if (obj->IsProxy()) {
+  } else if (obj->IsForeign()) {
     result = ExternalValueImpl(obj);
   } else {
     result = NULL;
@@ -4088,6 +4488,9 @@
   if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
     return false;
   }
+  if (isolate->heap()->IsInGCPostProcessing()) {
+    return false;
+  }
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
     isolate->heap()->external_string_table()->AddString(*obj);
@@ -4120,6 +4523,9 @@
   if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
     return false;
   }
+  if (isolate->heap()->IsInGCPostProcessing()) {
+    return false;
+  }
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
     isolate->heap()->external_string_table()->AddString(*obj);
@@ -4154,6 +4560,73 @@
 }
 
 
+Local<v8::Value> v8::NumberObject::New(double value) {
+  i::Isolate* isolate = i::Isolate::Current();
+  EnsureInitializedForIsolate(isolate, "v8::NumberObject::New()");
+  LOG_API(isolate, "NumberObject::New");
+  ENTER_V8(isolate);
+  i::Handle<i::Object> number = isolate->factory()->NewNumber(value);
+  i::Handle<i::Object> obj = isolate->factory()->ToObject(number);
+  return Utils::ToLocal(obj);
+}
+
+
+double v8::NumberObject::NumberValue() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
+  LOG_API(isolate, "NumberObject::NumberValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+  return jsvalue->value()->Number();
+}
+
+
+Local<v8::Value> v8::BooleanObject::New(bool value) {
+  i::Isolate* isolate = i::Isolate::Current();
+  EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
+  LOG_API(isolate, "BooleanObject::New");
+  ENTER_V8(isolate);
+  i::Handle<i::Object> boolean(value ? isolate->heap()->true_value()
+                                     : isolate->heap()->false_value());
+  i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
+  return Utils::ToLocal(obj);
+}
+
+
+bool v8::BooleanObject::BooleanValue() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
+  LOG_API(isolate, "BooleanObject::BooleanValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+  return jsvalue->value()->IsTrue();
+}
+
+
+Local<v8::Value> v8::StringObject::New(Handle<String> value) {
+  i::Isolate* isolate = i::Isolate::Current();
+  EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
+  LOG_API(isolate, "StringObject::New");
+  ENTER_V8(isolate);
+  i::Handle<i::Object> obj =
+      isolate->factory()->ToObject(Utils::OpenHandle(*value));
+  return Utils::ToLocal(obj);
+}
+
+
+Local<v8::String> v8::StringObject::StringValue() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
+    return Local<v8::String>();
+  }
+  LOG_API(isolate, "StringObject::StringValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+  return Utils::ToLocal(
+      i::Handle<i::String>(i::String::cast(jsvalue->value())));
+}
+
+
 Local<v8::Value> v8::Date::New(double time) {
   i::Isolate* isolate = i::Isolate::Current();
   EnsureInitializedForIsolate(isolate, "v8::Date::New()");
@@ -4383,7 +4856,7 @@
   i::HandleScope scope(isolate);
   NeanderArray listeners(isolate->factory()->message_listeners());
   NeanderObject obj(2);
-  obj.set(0, *isolate->factory()->NewProxy(FUNCTION_ADDR(that)));
+  obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
   obj.set(1, data.IsEmpty() ?
              isolate->heap()->undefined_value() :
              *Utils::OpenHandle(*data));
@@ -4403,8 +4876,8 @@
     if (listeners.get(i)->IsUndefined()) continue;  // skip deleted ones
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
-    i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
-    if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+    i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+    if (callback_obj->address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
@@ -4551,73 +5024,20 @@
 
 
 void V8::PauseProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  PauseProfilerEx(PROFILER_MODULE_CPU);
-#endif
+  i::Isolate* isolate = i::Isolate::Current();
+  isolate->logger()->PauseProfiler();
 }
 
 
 void V8::ResumeProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  ResumeProfilerEx(PROFILER_MODULE_CPU);
-#endif
+  i::Isolate* isolate = i::Isolate::Current();
+  isolate->logger()->ResumeProfiler();
 }
 
 
 bool V8::IsProfilerPaused() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
-#else
-  return true;
-#endif
-}
-
-
-void V8::ResumeProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   i::Isolate* isolate = i::Isolate::Current();
-  if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
-    // Snapshot mode: resume modules, perform GC, then pause only
-    // those modules which haven't been started prior to making a
-    // snapshot.
-
-    // Make a GC prior to taking a snapshot.
-    isolate->heap()->CollectAllGarbage(false);
-    // Reset snapshot flag and CPU module flags.
-    flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
-    const int current_flags = isolate->logger()->GetActiveProfilerModules();
-    isolate->logger()->ResumeProfiler(flags, tag);
-    isolate->heap()->CollectAllGarbage(false);
-    isolate->logger()->PauseProfiler(~current_flags & flags, tag);
-  } else {
-    isolate->logger()->ResumeProfiler(flags, tag);
-  }
-#endif
-}
-
-
-void V8::PauseProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  LOGGER->PauseProfiler(flags, tag);
-#endif
-}
-
-
-int V8::GetActiveProfilerModules() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  return LOGGER->GetActiveProfilerModules();
-#else
-  return PROFILER_MODULE_NONE;
-#endif
-}
-
-
-int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
-  return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
-#endif
-  return 0;
+  return isolate->logger()->IsProfilerPaused();
 }
 
 
@@ -4631,7 +5051,7 @@
 void V8::TerminateExecution(int thread_id) {
   i::Isolate* isolate = i::Isolate::Current();
   if (!isolate->IsInitialized()) return;
-  API_ENTRY_CHECK("V8::TerminateExecution()");
+  API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
   // If the thread_id identifies the current thread just terminate
   // execution right away.  Otherwise, ask the thread manager to
   // terminate the thread with the given id if any.
@@ -4654,9 +5074,10 @@
 }
 
 
-bool V8::IsExecutionTerminating() {
-  i::Isolate* isolate = i::Isolate::Current();
-  return IsExecutionTerminatingCheck(isolate);
+bool V8::IsExecutionTerminating(Isolate* isolate) {
+  i::Isolate* i_isolate = isolate != NULL ?
+      reinterpret_cast<i::Isolate*>(isolate) : i::Isolate::Current();
+  return IsExecutionTerminatingCheck(i_isolate);
 }
 
 
@@ -4695,26 +5116,30 @@
 }
 
 
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
+void Isolate::SetData(void* data) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->SetData(data);
+}
+
+void* Isolate::GetData() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->GetData();
+}
+
+
+String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Utf8Length();
-    str_ = i::NewArray<char>(length_ + 1);
-    str->WriteUtf8(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Utf8Length();
+  str_ = i::NewArray<char>(length_ + 1);
+  str->WriteUtf8(str_);
 }
 
 
@@ -4723,26 +5148,19 @@
 }
 
 
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Length();
-    str_ = i::NewArray<char>(length_ + 1);
-    str->WriteAscii(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Length();
+  str_ = i::NewArray<char>(length_ + 1);
+  str->WriteAscii(str_);
 }
 
 
@@ -4751,26 +5169,19 @@
 }
 
 
-String::Value::Value(v8::Handle<v8::Value> obj) {
+String::Value::Value(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Length();
-    str_ = i::NewArray<uint16_t>(length_ + 1);
-    str->Write(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Length();
+  str_ = i::NewArray<uint16_t>(length_ + 1);
+  str->Write(str_);
 }
 
 
@@ -4884,11 +5295,12 @@
   isolate->set_debug_event_callback(that);
 
   i::HandleScope scope(isolate);
-  i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
+  i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
   if (that != NULL) {
-    proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+    foreign =
+        isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
   }
-  isolate->debugger()->SetEventListener(proxy, Utils::OpenHandle(*data));
+  isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
   return true;
 }
 
@@ -4899,12 +5311,11 @@
   ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
+  i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
   if (that != NULL) {
-    proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(that));
+    foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
   }
-  isolate->debugger()->SetEventListener(proxy,
-                                                      Utils::OpenHandle(*data));
+  isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
   return true;
 }
 
@@ -5090,8 +5501,6 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 Handle<String> CpuProfileNode::GetFunctionName() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
@@ -5278,6 +5687,7 @@
       reinterpret_cast<const i::HeapGraphEdge*>(edge));
 }
 
+
 HeapGraphEdge::Type HeapGraphEdge::GetType() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
@@ -5346,19 +5756,10 @@
 uint64_t HeapGraphNode::GetId() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
-  ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
   return ToInternal(this)->id();
 }
 
 
-int HeapGraphNode::GetInstancesCount() const {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapGraphNode::GetInstancesCount");
-  ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
-  return static_cast<int>(ToInternal(this)->id());
-}
-
-
 int HeapGraphNode::GetSelfSize() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
@@ -5410,6 +5811,16 @@
 }
 
 
+v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
+  i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
+  return v8::Handle<Value>(!object.is_null() ?
+                           ToApi<Value>(object) : ToApi<Value>(
+                               isolate->factory()->undefined_value()));
+}
+
+
 static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
   return const_cast<i::HeapSnapshot*>(
       reinterpret_cast<const i::HeapSnapshot*>(snapshot));
@@ -5465,6 +5876,21 @@
 }
 
 
+int HeapSnapshot::GetNodesCount() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
+  return ToInternal(this)->entries()->length();
+}
+
+
+const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
+  return reinterpret_cast<const HeapGraphNode*>(
+      ToInternal(this)->entries()->at(index));
+}
+
+
 void HeapSnapshot::Serialize(OutputStream* stream,
                              HeapSnapshot::SerializationFormat format) const {
   i::Isolate* isolate = i::Isolate::Current();
@@ -5516,9 +5942,6 @@
     case HeapSnapshot::kFull:
       internal_type = i::HeapSnapshot::kFull;
       break;
-    case HeapSnapshot::kAggregated:
-      internal_type = i::HeapSnapshot::kAggregated;
-      break;
     default:
       UNREACHABLE();
   }
@@ -5541,7 +5964,6 @@
                                                              callback);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 v8::Testing::StressType internal::Testing::stress_type_ =
@@ -5619,9 +6041,8 @@
 
 
 char* HandleScopeImplementer::ArchiveThread(char* storage) {
-  Isolate* isolate = Isolate::Current();
   v8::ImplementationUtilities::HandleScopeData* current =
-      isolate->handle_scope_data();
+      isolate_->handle_scope_data();
   handle_scope_data_ = *current;
   memcpy(storage, this, sizeof(*this));
 
@@ -5639,7 +6060,7 @@
 
 char* HandleScopeImplementer::RestoreThread(char* storage) {
   memcpy(this, storage, sizeof(*this));
-  *Isolate::Current()->handle_scope_data() = handle_scope_data_;
+  *isolate_->handle_scope_data() = handle_scope_data_;
   return storage + ArchiveSpacePerThread();
 }
 
@@ -5665,7 +6086,7 @@
 
 void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
   v8::ImplementationUtilities::HandleScopeData* current =
-      Isolate::Current()->handle_scope_data();
+      isolate_->handle_scope_data();
   handle_scope_data_ = *current;
   IterateThis(v);
 }
diff --git a/src/api.h b/src/api.h
index d38a1d5..07723cb 100644
--- a/src/api.h
+++ b/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -115,14 +115,14 @@
 template <typename T> static inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
+      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
 }
 
 
 template <typename T>
 static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
-  return FACTORY->NewProxy(
+  return FACTORY->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
 }
 
@@ -182,7 +182,7 @@
   static inline Local<Array> ToLocal(
       v8::internal::Handle<v8::internal::JSArray> obj);
   static inline Local<External> ToLocal(
-      v8::internal::Handle<v8::internal::Proxy> obj);
+      v8::internal::Handle<v8::internal::Foreign> obj);
   static inline Local<Message> MessageToLocal(
       v8::internal::Handle<v8::internal::Object> obj);
   static inline Local<StackTrace> StackTraceToLocal(
@@ -236,7 +236,7 @@
       OpenHandle(const v8::Signature* sig);
   static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
       OpenHandle(const v8::TypeSwitch* that);
-  static inline v8::internal::Handle<v8::internal::Proxy>
+  static inline v8::internal::Handle<v8::internal::Foreign>
       OpenHandle(const v8::External* that);
 };
 
@@ -273,7 +273,7 @@
 MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
 MAKE_TO_LOCAL(ToLocal, JSObject, Object)
 MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, Foreign, External)
 MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
 MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
 MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -311,7 +311,7 @@
 MAKE_OPEN_HANDLE(Function, JSFunction)
 MAKE_OPEN_HANDLE(Message, JSObject)
 MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Proxy)
+MAKE_OPEN_HANDLE(External, Foreign)
 MAKE_OPEN_HANDLE(StackTrace, JSArray)
 MAKE_OPEN_HANDLE(StackFrame, JSObject)
 
@@ -396,16 +396,20 @@
 // data. In multithreaded V8 programs this data is copied in and out of storage
 // so that the currently executing thread always has its own copy of this
 // data.
-ISOLATED_CLASS HandleScopeImplementer {
+class HandleScopeImplementer {
  public:
-
-  HandleScopeImplementer()
-      : blocks_(0),
+  explicit HandleScopeImplementer(Isolate* isolate)
+      : isolate_(isolate),
+        blocks_(0),
         entered_contexts_(0),
         saved_contexts_(0),
         spare_(NULL),
         call_depth_(0) { }
 
+  ~HandleScopeImplementer() {
+    DeleteArray(spare_);
+  }
+
   // Threading support for handle data.
   static int ArchiveSpacePerThread();
   char* RestoreThread(char* from);
@@ -460,6 +464,7 @@
     ASSERT(call_depth_ == 0);
   }
 
+  Isolate* isolate_;
   List<internal::Object**> blocks_;
   // Used as a stack to keep track of entered contexts.
   List<Handle<Object> > entered_contexts_;
diff --git a/src/apinatives.js b/src/apinatives.js
index ca2bbf5..e94da9f 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -49,7 +49,10 @@
       return InstantiateFunction(data, name);
     case kNewObjectTag:
       var Constructor = %GetTemplateField(data, kApiConstructorOffset);
-      var result = Constructor ? new (Instantiate(Constructor))() : {};
+      // Note: Do not directly use a function template as a condition, our
+      // internal ToBoolean doesn't handle that!
+      var result = typeof Constructor === 'undefined' ?
+          {} : new (Instantiate(Constructor))();
       ConfigureTemplateInstance(result, data);
       result = %ToFastProperties(result);
       return result;
@@ -73,10 +76,19 @@
       if (name) %FunctionSetName(fun, name);
       cache[serialNumber] = fun;
       var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
-      fun.prototype = prototype ? Instantiate(prototype) : {};
+      var flags = %GetTemplateField(data, kApiFlagOffset);
+      // Note: Do not directly use an object template as a condition, our
+      // internal ToBoolean doesn't handle that!
+      fun.prototype = typeof prototype === 'undefined' ?
+          {} : Instantiate(prototype);
+      if (flags & (1 << kReadOnlyPrototypeBit)) {
+        %FunctionSetReadOnlyPrototype(fun);
+      }
       %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
       var parent = %GetTemplateField(data, kApiParentTemplateOffset);
-      if (parent) {
+      // Note: Do not directly use a function template as a condition, our
+      // internal ToBoolean doesn't handle that!
+      if (!(typeof parent === 'undefined')) {
         var parent_fun = Instantiate(parent);
         fun.prototype.__proto__ = parent_fun.prototype;
       }
diff --git a/src/arguments.h b/src/arguments.h
index a7a30e2..e9a3270 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -28,6 +28,8 @@
 #ifndef V8_ARGUMENTS_H_
 #define V8_ARGUMENTS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -61,10 +63,19 @@
     return Handle<S>(reinterpret_cast<S**>(value));
   }
 
+  int smi_at(int index) {
+    return Smi::cast((*this)[index])->value();
+  }
+
+  double number_at(int index) {
+    return (*this)[index]->Number();
+  }
+
   // Get the total number of arguments including the receiver.
   int length() const { return length_; }
 
   Object** arguments() { return arguments_; }
+
  private:
   int length_;
   Object** arguments_;
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 3e19a45..5ad7b5a 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
 #define V8_ARM_ASSEMBLER_ARM_INL_H_
@@ -46,6 +46,13 @@
 namespace internal {
 
 
+int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
+  ASSERT(!reg.is(kDoubleRegZero));
+  ASSERT(!reg.is(kScratchDoubleReg));
+  return reg.code();
+}
+
+
 void RelocInfo::apply(intptr_t delta) {
   if (RelocInfo::IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index fd8e8b5..0ec3692 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -51,24 +51,30 @@
 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-#ifdef __arm__
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// can be defined to enable ARMv7 and VFPv3 instructions when building the
+// snapshot.
 static uint64_t CpuFeaturesImpliedByCompiler() {
   uint64_t answer = 0;
 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
   answer |= 1u << ARMv7;
 #endif  // def CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP_INSTRUCTIONS
+  answer |= 1u << VFP3 | 1u << ARMv7;
+#endif  // def CAN_USE_VFP_INSTRUCTIONS
+
+#ifdef __arm__
   // If the compiler is allowed to use VFP then we can use VFP too in our code
   // generation even when generating snapshots.  This won't work for cross
   // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
   answer |= 1u << VFP3 | 1u << ARMv7;
 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
-#ifdef CAN_USE_VFP_INSTRUCTIONS
-  answer |= 1u << VFP3 | 1u << ARMv7;
-#endif  // def CAN_USE_VFP_INSTRUCTIONS
+#endif  // def __arm__
+
   return answer;
 }
-#endif  // def __arm__
 
 
 void CpuFeatures::Probe() {
@@ -76,6 +82,18 @@
 #ifdef DEBUG
   initialized_ = true;
 #endif
+
+  // Get the features implied by the OS and the compiler settings. This is the
+  // minimal set of features which is also alowed for generated code in the
+  // snapshot.
+  supported_ |= OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= CpuFeaturesImpliedByCompiler();
+
+  if (Serializer::enabled()) {
+    // No probing for features if we might serialize (generate snapshot).
+    return;
+  }
+
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
   // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
@@ -87,13 +105,8 @@
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
-    supported_ |= CpuFeaturesImpliedByCompiler();
-    return;  // No features if we might serialize.
-  }
-
-  if (OS::ArmCpuHasFeature(VFP3)) {
+  // Probe for additional features not already known to be available.
+  if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
     // This implementation also sets the VFP flags if runtime
     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
     // 0406B, page A1-6.
@@ -101,7 +114,7 @@
     found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
   }
 
-  if (OS::ArmCpuHasFeature(ARMv7)) {
+  if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
@@ -276,9 +289,7 @@
 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
     : AssemblerBase(arg_isolate),
       positions_recorder_(this),
-      allow_peephole_optimization_(false),
       emit_debug_code_(FLAG_debug_code) {
-  allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -309,12 +320,13 @@
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-  num_prinfo_ = 0;
+  num_pending_reloc_info_ = 0;
   next_buffer_check_ = 0;
   const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
-  last_const_pool_end_ = 0;
+  first_const_pool_use_ = -1;
   last_bound_pos_ = 0;
+  ClearRecordedAstId();
 }
 
 
@@ -334,7 +346,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Emit constant pool if necessary.
   CheckConstPool(true, false);
-  ASSERT(num_prinfo_ == 0);
+  ASSERT(num_pending_reloc_info_ == 0);
 
   // Setup code descriptor.
   desc->buffer = buffer_;
@@ -680,11 +692,11 @@
 void Assembler::next(Label* L) {
   ASSERT(L->is_linked());
   int link = target_at(L->pos());
-  if (link > 0) {
-    L->link_to(link);
-  } else {
-    ASSERT(link == kEndOfChain);
+  if (link == kEndOfChain) {
     L->Unuse();
+  } else {
+    ASSERT(link >= 0);
+    L->link_to(link);
   }
 }
 
@@ -861,7 +873,7 @@
   emit(instr | rn.code()*B16 | rd.code()*B12);
   if (rn.is(pc) || x.rm_.is(pc)) {
     // Block constant pool emission for one instruction after reading pc.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
+    BlockConstPoolFor(1);
   }
 }
 
@@ -985,7 +997,7 @@
 
   // Block the emission of the constant pool, since the branch instruction must
   // be emitted at the pc offset recorded by the label.
-  BlockConstPoolBefore(pc_offset() + kInstrSize);
+  BlockConstPoolFor(1);
   return target_pos - (pc_offset() + kPcLoadDelta);
 }
 
@@ -1082,20 +1094,6 @@
 void Assembler::add(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
   addrmod1(cond | ADD | s, src1, dst, src2);
-
-  // Eliminate pattern: push(r), pop()
-  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
-  //   add(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  if (can_peephole_optimize(2) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1400,195 +1398,11 @@
     positions_recorder()->WriteRecordedPositions();
   }
   addrmod2(cond | B26 | L, dst, src);
-
-  // Eliminate pattern: push(ry), pop(rx)
-  //   str(ry, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(rx, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated if ry = rx.
-  // If ry != rx, a register copy from ry to rx is inserted
-  // after eliminating the push and the pop instructions.
-  if (can_peephole_optimize(2)) {
-    Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(push_instr) && IsPop(pop_instr)) {
-      if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
-        // For consecutive push and pop on different registers,
-        // we delete both the push & pop and insert a register move.
-        // push ry, pop rx --> mov rx, ry
-        Register reg_pushed, reg_popped;
-        reg_pushed = GetRd(push_instr);
-        reg_popped = GetRd(pop_instr);
-        pc_ -= 2 * kInstrSize;
-        // Insert a mov instruction, which is better than a pair of push & pop
-        mov(reg_popped, reg_pushed);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
-                 pc_offset());
-        }
-      } else {
-        // For consecutive push and pop on the same register,
-        // both the push and the pop can be deleted.
-        pc_ -= 2 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(2)) {
-    Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if ((IsStrRegFpOffset(str_instr) &&
-         IsLdrRegFpOffset(ldr_instr)) ||
-       (IsStrRegFpNegOffset(str_instr) &&
-         IsLdrRegFpNegOffset(ldr_instr))) {
-      if ((ldr_instr & kLdrStrInstrArgumentMask) ==
-            (str_instr & kLdrStrInstrArgumentMask)) {
-        // Pattern: Ldr/str same fp+offset, same register.
-        //
-        // The following:
-        // str rx, [fp, #-12]
-        // ldr rx, [fp, #-12]
-        //
-        // Becomes:
-        // str rx, [fp, #-12]
-
-        pc_ -= 1 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
-        }
-      } else if ((ldr_instr & kLdrStrOffsetMask) ==
-                 (str_instr & kLdrStrOffsetMask)) {
-        // Pattern: Ldr/str same fp+offset, different register.
-        //
-        // The following:
-        // str rx, [fp, #-12]
-        // ldr ry, [fp, #-12]
-        //
-        // Becomes:
-        // str rx, [fp, #-12]
-        // mov ry, rx
-
-        Register reg_stored, reg_loaded;
-        reg_stored = GetRd(str_instr);
-        reg_loaded = GetRd(ldr_instr);
-        pc_ -= 1 * kInstrSize;
-        // Insert a mov instruction, which is better than ldr.
-        mov(reg_loaded, reg_stored);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(3)) {
-    Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
-    if (IsPush(mem_write_instr) &&
-        IsPop(mem_read_instr)) {
-      if ((IsLdrRegFpOffset(ldr_instr) ||
-        IsLdrRegFpNegOffset(ldr_instr))) {
-        if (Instruction::RdValue(mem_write_instr) ==
-                                  Instruction::RdValue(mem_read_instr)) {
-          // Pattern: push & pop from/to same register,
-          // with a fp+offset ldr in between
-          //
-          // The following:
-          // str rx, [sp, #-4]!
-          // ldr rz, [fp, #-24]
-          // ldr rx, [sp], #+4
-          //
-          // Becomes:
-          // if(rx == rz)
-          //   delete all
-          // else
-          //   ldr rz, [fp, #-24]
-
-          if (Instruction::RdValue(mem_write_instr) ==
-              Instruction::RdValue(ldr_instr)) {
-            pc_ -= 3 * kInstrSize;
-          } else {
-            pc_ -= 3 * kInstrSize;
-            // Reinsert back the ldr rz.
-            emit(ldr_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
-          }
-        } else {
-          // Pattern: push & pop from/to different registers
-          // with a fp+offset ldr in between
-          //
-          // The following:
-          // str rx, [sp, #-4]!
-          // ldr rz, [fp, #-24]
-          // ldr ry, [sp], #+4
-          //
-          // Becomes:
-          // if(ry == rz)
-          //   mov ry, rx;
-          // else if(rx != rz)
-          //   ldr rz, [fp, #-24]
-          //   mov ry, rx
-          // else if((ry != rz) || (rx == rz)) becomes:
-          //   mov ry, rx
-          //   ldr rz, [fp, #-24]
-
-          Register reg_pushed, reg_popped;
-          if (Instruction::RdValue(mem_read_instr) ==
-              Instruction::RdValue(ldr_instr)) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            mov(reg_popped, reg_pushed);
-          } else if (Instruction::RdValue(mem_write_instr) !=
-                     Instruction::RdValue(ldr_instr)) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            emit(ldr_instr);
-            mov(reg_popped, reg_pushed);
-          } else if ((Instruction::RdValue(mem_read_instr) !=
-                      Instruction::RdValue(ldr_instr)) ||
-                     (Instruction::RdValue(mem_write_instr) ==
-                      Instruction::RdValue(ldr_instr))) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            mov(reg_popped, reg_pushed);
-            emit(ldr_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
-          }
-        }
-      }
-    }
-  }
 }
 
 
 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
   addrmod2(cond | B26, src, dst);
-
-  // Eliminate pattern: pop(), push(r)
-  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
-  // ->  str r, [sp, 0], al
-  if (can_peephole_optimize(2) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
-    pc_ -= 2 * kInstrSize;
-    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1679,15 +1493,17 @@
 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
 #ifndef __arm__
   ASSERT(code >= kDefaultStopCode);
-  // The Simulator will handle the stop instruction and get the message address.
-  // It expects to find the address just after the svc instruction.
-  BlockConstPoolFor(2);
-  if (code >= 0) {
-    svc(kStopCode + code, cond);
-  } else {
-    svc(kStopCode + kMaxStopCode, cond);
+  {
+    // The Simulator will handle the stop instruction and get the message
+    // address. It expects to find the address just after the svc instruction.
+    BlockConstPoolScope block_const_pool(this);
+    if (code >= 0) {
+      svc(kStopCode + code, cond);
+    } else {
+      svc(kStopCode + kMaxStopCode, cond);
+    }
+    emit(reinterpret_cast<Instr>(msg));
   }
-  emit(reinterpret_cast<Instr>(msg));
 #else  // def __arm__
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   if (cond != al) {
@@ -2592,11 +2408,6 @@
 }
 
 
-void Assembler::BlockConstPoolFor(int instructions) {
-  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
 // Debugging.
 void Assembler::RecordJSReturn() {
   positions_recorder()->WriteRecordedPositions();
@@ -2660,8 +2471,8 @@
   // to relocate any emitted relocation entries.
 
   // Relocate pending relocation entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
+  for (int i = 0; i < num_pending_reloc_info_; i++) {
+    RelocInfo& rinfo = pending_reloc_info_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
            rinfo.rmode() != RelocInfo::POSITION);
     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@@ -2675,7 +2486,7 @@
   // No relocation info should be pending while using db. db is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using db.
-  ASSERT(num_prinfo_ == 0);
+  ASSERT(num_pending_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
   pc_ += sizeof(uint8_t);
@@ -2686,7 +2497,7 @@
   // No relocation info should be pending while using dd. dd is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using dd.
-  ASSERT(num_prinfo_ == 0);
+  ASSERT(num_pending_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
   pc_ += sizeof(uint32_t);
@@ -2703,11 +2514,14 @@
            || RelocInfo::IsPosition(rmode));
     // These modes do not need an entry in the constant pool.
   } else {
-    ASSERT(num_prinfo_ < kMaxNumPRInfo);
-    prinfo_[num_prinfo_++] = rinfo;
+    ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+    if (num_pending_reloc_info_ == 0) {
+      first_const_pool_use_ = pc_offset();
+    }
+    pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
+    BlockConstPoolFor(1);
   }
   if (rinfo.rmode() != RelocInfo::NONE) {
     // Don't record external references unless the heap will be serialized.
@@ -2722,116 +2536,123 @@
       }
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
-    reloc_info_writer.Write(&rinfo);
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      ClearRecordedAstId();
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
+  }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+  int pc_limit = pc_offset() + instructions * kInstrSize;
+  if (no_const_pool_before_ < pc_limit) {
+    // If there are some pending entries, the constant pool cannot be blocked
+    // further than first_const_pool_use_ + kMaxDistToPool
+    ASSERT((num_pending_reloc_info_ == 0) ||
+           (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+    no_const_pool_before_ = pc_limit;
+  }
+
+  if (next_buffer_check_ < no_const_pool_before_) {
+    next_buffer_check_ = no_const_pool_before_;
   }
 }
 
 
 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
-  // Calculate the offset of the next check. It will be overwritten
-  // when a const pool is generated or when const pools are being
-  // blocked for a specific range.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
-  // There is nothing to do if there are no pending relocation info entries.
-  if (num_prinfo_ == 0) return;
-
-  // We emit a constant pool at regular intervals of about kDistBetweenPools
-  // or when requested by parameter force_emit (e.g. after each function).
-  // We prefer not to emit a jump unless the max distance is reached or if we
-  // are running low on slots, which can happen if a lot of constants are being
-  // emitted (e.g. --debug-code and many static references).
-  int dist = pc_offset() - last_const_pool_end_;
-  if (!force_emit && dist < kMaxDistBetweenPools &&
-      (require_jump || dist < kDistBetweenPools) &&
-      // TODO(1236125): Cleanup the "magic" number below. We know that
-      // the code generation will test every kCheckConstIntervalInst.
-      // Thus we are safe as long as we generate less than 7 constant
-      // entries per instruction.
-      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
-    return;
-  }
-
-  // If we did not return by now, we need to emit the constant pool soon.
-
-  // However, some small sequences of instructions must not be broken up by the
-  // insertion of a constant pool; such sequences are protected by setting
-  // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
-  // both checked here. Also, recursive calls to CheckConstPool are blocked by
-  // no_const_pool_before_.
-  if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as
-    // possible.
-    if (const_pool_blocked_nesting_ > 0) {
-      next_buffer_check_ = pc_offset() + kInstrSize;
-    } else {
-      next_buffer_check_ = no_const_pool_before_;
-    }
-
+  // Some short sequence of instruction mustn't be broken up by constant pool
+  // emission, such sequences are protected by calls to BlockConstPoolFor and
+  // BlockConstPoolScope.
+  if (is_const_pool_blocked()) {
     // Something is wrong if emission is forced and blocked at the same time.
     ASSERT(!force_emit);
     return;
   }
 
-  int jump_instr = require_jump ? kInstrSize : 0;
+  // There is nothing to do if there are no pending constant pool entries.
+  if (num_pending_reloc_info_ == 0)  {
+    // Calculate the offset of the next check.
+    next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+    return;
+  }
+
+  // We emit a constant pool when:
+  //  * requested to do so by parameter force_emit (e.g. after each function).
+  //  * the distance to the first instruction accessing the constant pool is
+  //    kAvgDistToPool or more.
+  //  * no jump is required and the distance to the first instruction accessing
+  //    the constant pool is at least kMaxDistToPool / 2.
+  ASSERT(first_const_pool_use_ >= 0);
+  int dist = pc_offset() - first_const_pool_use_;
+  if (!force_emit && dist < kAvgDistToPool &&
+      (require_jump || (dist < (kMaxDistToPool / 2)))) {
+    return;
+  }
 
   // Check that the code buffer is large enough before emitting the constant
-  // pool and relocation information (include the jump over the pool and the
-  // constant pool marker).
-  int max_needed_space =
-      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
-  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+  // pool (include the jump over the pool and the constant pool marker and
+  // the gap to the relocation information).
+  int jump_instr = require_jump ? kInstrSize : 0;
+  int needed_space = jump_instr + kInstrSize +
+                     num_pending_reloc_info_ * kInstrSize + kGap;
+  while (buffer_space() <= needed_space) GrowBuffer();
 
-  // Block recursive calls to CheckConstPool.
-  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
-                       num_prinfo_*kInstrSize);
-  // Don't bother to check for the emit calls below.
-  next_buffer_check_ = no_const_pool_before_;
+  {
+    // Block recursive calls to CheckConstPool.
+    BlockConstPoolScope block_const_pool(this);
 
-  // Emit jump over constant pool if necessary.
-  Label after_pool;
-  if (require_jump) b(&after_pool);
-
-  RecordComment("[ Constant Pool");
-
-  // Put down constant pool marker "Undefined instruction" as specified by
-  // A5.6 (ARMv7) Instruction set encoding.
-  emit(kConstantPoolMarker | num_prinfo_);
-
-  // Emit constant pool entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION &&
-           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
-    Instr instr = instr_at(rinfo.pc());
-
-    // Instruction to patch must be a ldr/str [pc, #offset].
-    // P and U set, B and W clear, Rn == pc, offset12 still 0.
-    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
-           (2*B25 | P | U | pc.code()*B16));
-    int delta = pc_ - rinfo.pc() - 8;
-    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
-    if (delta < 0) {
-      instr &= ~U;
-      delta = -delta;
+    // Emit jump over constant pool if necessary.
+    Label after_pool;
+    if (require_jump) {
+      b(&after_pool);
     }
-    ASSERT(is_uint12(delta));
-    instr_at_put(rinfo.pc(), instr + delta);
-    emit(rinfo.data());
-  }
-  num_prinfo_ = 0;
-  last_const_pool_end_ = pc_offset();
 
-  RecordComment("]");
+    RecordComment("[ Constant Pool");
 
-  if (after_pool.is_linked()) {
-    bind(&after_pool);
+    // Put down constant pool marker "Undefined instruction" as specified by
+    // A5.6 (ARMv7) Instruction set encoding.
+    emit(kConstantPoolMarker | num_pending_reloc_info_);
+
+    // Emit constant pool entries.
+    for (int i = 0; i < num_pending_reloc_info_; i++) {
+      RelocInfo& rinfo = pending_reloc_info_[i];
+      ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+             rinfo.rmode() != RelocInfo::POSITION &&
+             rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+
+      Instr instr = instr_at(rinfo.pc());
+      // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+      ASSERT(IsLdrPcImmediateOffset(instr) &&
+             GetLdrRegisterImmediateOffset(instr) == 0);
+
+      int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+      // 0 is the smallest delta:
+      //   ldr rd, [pc, #0]
+      //   constant pool marker
+      //   data
+      ASSERT(is_uint12(delta));
+
+      instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+      emit(rinfo.data());
+    }
+
+    num_pending_reloc_info_ = 0;
+    first_const_pool_use_ = -1;
+
+    RecordComment("]");
+
+    if (after_pool.is_linked()) {
+      bind(&after_pool);
+    }
   }
 
   // Since a constant pool was just emitted, move the check offset forward by
   // the standard interval.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 3f2daab..eeadaca 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 // A light-weight ARM Assembler
 // Generates user mode instructions for the ARM architecture up to version 5
@@ -72,6 +72,7 @@
 struct Register {
   static const int kNumRegisters = 16;
   static const int kNumAllocatableRegisters = 8;
+  static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
     ASSERT(reg.code() < kNumAllocatableRegisters);
@@ -166,27 +167,26 @@
 
 // Double word VFP register.
 struct DwVfpRegister {
-  // d0 has been excluded from allocation. This is following ia32
-  // where xmm0 is excluded. This should be revisited.
-  // Currently d0 is used as a scratch register.
-  // d1 has also been excluded from allocation to be used as a scratch
-  // register as well.
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 15;
+  // A few double registers are reserved: one as a scratch register and one to
+  // hold 0.0, that does not fit in the immediate field of vmov instructions.
+  //  d14: 0.0
+  //  d15: scratch register.
+  static const int kNumReservedRegisters = 2;
+  static const int kNumAllocatableRegisters = kNumRegisters -
+      kNumReservedRegisters;
 
-  static int ToAllocationIndex(DwVfpRegister reg) {
-    ASSERT(reg.code() != 0);
-    return reg.code() - 1;
-  }
+  inline static int ToAllocationIndex(DwVfpRegister reg);
 
   static DwVfpRegister FromAllocationIndex(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
-    return from_code(index + 1);
+    return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
+      "d0",
       "d1",
       "d2",
       "d3",
@@ -199,9 +199,7 @@
       "d10",
       "d11",
       "d12",
-      "d13",
-      "d14",
-      "d15"
+      "d13"
     };
     return names[index];
   }
@@ -305,6 +303,8 @@
 // Aliases for double registers.
 const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
 const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+const DwVfpRegister kDoubleRegZero = d14;
+const DwVfpRegister kScratchDoubleReg = d15;
 
 
 // Coprocessor register
@@ -375,8 +375,10 @@
   // immediate
   INLINE(explicit Operand(int32_t immediate,
          RelocInfo::Mode rmode = RelocInfo::NONE));
+  INLINE(static Operand Zero()) {
+    return Operand(static_cast<int32_t>(0));
+  }
   INLINE(explicit Operand(const ExternalReference& f));
-  INLINE(explicit Operand(const char* s));
   explicit Operand(Handle<Object> handle);
   INLINE(explicit Operand(Smi* value));
 
@@ -454,6 +456,7 @@
 
   Register rn() const { return rn_; }
   Register rm() const { return rm_; }
+  AddrMode am() const { return am_; }
 
   bool OffsetIsUint12Encodable() const {
     return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@@ -503,6 +506,7 @@
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
+
    public:
     explicit Scope(CpuFeature f) {
       unsigned mask = 1u << f;
@@ -522,10 +526,12 @@
         isolate_->set_enabled_cpu_features(old_enabled_);
       }
     }
+
    private:
     Isolate* isolate_;
     unsigned old_enabled_;
 #else
+
    public:
     explicit Scope(CpuFeature f) {}
 #endif
@@ -1135,8 +1141,13 @@
   void jmp(Label* L) { b(L, al); }
 
   // Check the code size generated from label to here.
-  int InstructionsGeneratedSince(Label* l) {
-    return (pc_offset() - l->pos()) / kInstrSize;
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
+
+  // Check the number of instructions generated from label to here.
+  int InstructionsGeneratedSince(Label* label) {
+    return SizeOfCodeGeneratedSince(label) / kInstrSize;
   }
 
   // Check whether an immediate fits an addressing mode 1 instruction.
@@ -1158,10 +1169,6 @@
     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
   };
 
-  // Postpone the generation of the constant pool for the specified number of
-  // instructions.
-  void BlockConstPoolFor(int instructions);
-
   // Debugging
 
   // Mark address of the ExitJSFrame code.
@@ -1170,6 +1177,20 @@
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot();
 
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void SetRecordedAstId(unsigned ast_id) {
+    ASSERT(recorded_ast_id_ == kNoASTId);
+    recorded_ast_id_ = ast_id;
+  }
+
+  unsigned RecordedAstId() {
+    ASSERT(recorded_ast_id_ != kNoASTId);
+    return recorded_ast_id_;
+  }
+
+  void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
@@ -1185,12 +1206,6 @@
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
-  bool can_peephole_optimize(int instructions) {
-    if (!allow_peephole_optimization_) return false;
-    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
-    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
-  }
-
   // Read/patch instructions
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
@@ -1223,10 +1238,25 @@
   static int GetCmpImmediateRawImmediate(Instr instr);
   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
 
-  // Check if is time to emit a constant pool for pending reloc info entries
+  // Constants in pools are accessed via pc relative addressing, which can
+  // reach +/-4KB thereby defining a maximum distance between the instruction
+  // and the accessed constant.
+  static const int kMaxDistToPool = 4*KB;
+  static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
+
+  // Postpone the generation of the constant pool for the specified number of
+  // instructions.
+  void BlockConstPoolFor(int instructions);
+
+  // Check if is time to emit a constant pool.
   void CheckConstPool(bool force_emit, bool require_jump);
 
  protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  unsigned recorded_ast_id_;
+
   bool emit_debug_code() const { return emit_debug_code_; }
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1243,18 +1273,37 @@
   // Patch branch instruction at pos to branch to given branch target pos
   void target_at_put(int pos, int target_pos);
 
-  // Block the emission of the constant pool before pc_offset
-  void BlockConstPoolBefore(int pc_offset) {
-    if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+  // Prevent contant pool emission until EndBlockConstPool is called.
+  // Call to this function can be nested but must be followed by an equal
+  // number of call to EndBlockConstpool.
+  void StartBlockConstPool() {
+    if (const_pool_blocked_nesting_++ == 0) {
+      // Prevent constant pool checks happening by setting the next check to
+      // the biggest possible offset.
+      next_buffer_check_ = kMaxInt;
+    }
   }
 
-  void StartBlockConstPool() {
-    const_pool_blocked_nesting_++;
-  }
+  // Resume constant pool emission. Need to be called as many time as
+  // StartBlockConstPool to have an effect.
   void EndBlockConstPool() {
-    const_pool_blocked_nesting_--;
+    if (--const_pool_blocked_nesting_ == 0) {
+      // Check the constant pool hasn't been blocked for too long.
+      ASSERT((num_pending_reloc_info_ == 0) ||
+             (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+      // Two cases:
+      //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
+      //    still blocked
+      //  * no_const_pool_before_ < next_buffer_check_ and the next emit will
+      //    trigger a check.
+      next_buffer_check_ = no_const_pool_before_;
+    }
   }
-  bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
+
+  bool is_const_pool_blocked() const {
+    return (const_pool_blocked_nesting_ > 0) ||
+           (pc_offset() < no_const_pool_before_);
+  }
 
  private:
   // Code buffer:
@@ -1264,9 +1313,6 @@
   // True if the assembler owns the buffer, false if buffer is external.
   bool own_buffer_;
 
-  // Buffer size and constant pool distance are checked together at regular
-  // intervals of kBufferCheckInterval emitted bytes
-  static const int kBufferCheckInterval = 1*KB/2;
   int next_buffer_check_;  // pc offset of next buffer check
 
   // Code generation
@@ -1291,40 +1337,41 @@
   // expensive. By default we only check again once a number of instructions
   // has been generated. That also means that the sizing of the buffers is not
   // an exact science, and that we rely on some slop to not overrun buffers.
-  static const int kCheckConstIntervalInst = 32;
-  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+  static const int kCheckPoolIntervalInst = 32;
+  static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
 
 
-  // Pools are emitted after function return and in dead code at (more or less)
-  // regular intervals of kDistBetweenPools bytes
-  static const int kDistBetweenPools = 1*KB;
-
-  // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant. We satisfy this constraint by limiting the
-  // distance between pools.
-  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+  // Average distance beetween a constant pool and the first instruction
+  // accessing the constant pool. Longer distance should result in less I-cache
+  // pollution.
+  // In practice the distance will be smaller since constant pool emission is
+  // forced after function return and sometimes after unconditional branches.
+  static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
 
   // Emission of the constant pool may be blocked in some code sequences.
   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
   int no_const_pool_before_;  // Block emission before this pc offset.
 
-  // Keep track of the last emitted pool to guarantee a maximal distance
-  int last_const_pool_end_;  // pc offset following the last constant pool
+  // Keep track of the first instruction requiring a constant pool entry
+  // since the previous constant pool was emitted.
+  int first_const_pool_use_;
 
   // Relocation info generation
   // Each relocation is encoded as a variable size value
   static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
   RelocInfoWriter reloc_info_writer;
+
   // Relocation info records are also used during code generation as temporary
   // containers for constants and code target addresses until they are emitted
   // to the constant pool. These pending relocation info records are temporarily
   // stored in a separate buffer until a constant pool is emitted.
   // If every instruction in a long sequence is accessing the pool, we need one
   // pending relocation entry per instruction.
-  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
-  RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
-  int num_prinfo_;  // number of pending reloc info entries in the buffer
+
+  // the buffer of pending relocation info
+  RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+  // number of pending reloc info entries in the buffer
+  int num_pending_reloc_info_;
 
   // The bound position, before this we cannot do instruction elimination.
   int last_bound_pos_;
@@ -1356,7 +1403,6 @@
   friend class BlockConstPoolScope;
 
   PositionsRecorder positions_recorder_;
-  bool allow_peephole_optimization_;
   bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 5235dd3..ae8cb56 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -138,7 +138,7 @@
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
 
   // Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -207,7 +207,7 @@
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
   __ bind(&not_empty);
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ mov(elements_array_end,
          Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
   __ add(elements_array_end,
@@ -243,7 +243,7 @@
          FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ sub(elements_array_storage,
          elements_array_storage,
          Operand(kHeapObjectTag));
@@ -255,7 +255,7 @@
   __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(array_size, array_size);
   // Length of the FixedArray is the number of pre-allocated elements if
   // the actual JSArray has length 0 and the size of the JSArray for non-empty
@@ -272,7 +272,7 @@
   // result: JSObject
   // elements_array_storage: elements array element storage
   // array_size: smi-tagged size of elements array
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
   __ add(elements_array_end,
          elements_array_storage,
          Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -337,14 +337,14 @@
   __ bind(&argc_one_or_more);
   __ cmp(r0, Operand(1));
   __ b(ne, &argc_two_or_more);
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
   __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
   __ b(ne, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
   // is too large to actually allocate an elements array.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
   __ b(ge, call_generic_code);
 
@@ -571,7 +571,7 @@
   // Is it a String?
   __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  ASSERT(kNotStringTag != 0);
+  STATIC_ASSERT(kNotStringTag != 0);
   __ tst(r3, Operand(kIsNotStringMask));
   __ b(ne, &convert_argument);
   __ mov(argument, r0);
@@ -584,7 +584,7 @@
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
   __ EnterInternalFrame();
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
   __ LeaveInternalFrame();
   __ pop(function);
   __ mov(argument, r0);
@@ -619,8 +619,7 @@
 
   Label non_function_call;
   // Check that the function is not a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &non_function_call);
+  __ JumpIfSmi(r1, &non_function_call);
   // Check that the function is a JSFunction.
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_call);
@@ -636,6 +635,7 @@
   // Set expected number of arguments to zero (not changing r0).
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(r5, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
 }
@@ -674,8 +674,7 @@
     // Load the initial map and verify that it is in fact a map.
     // r1: constructor function
     __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ tst(r2, Operand(kSmiTagMask));
-    __ b(eq, &rt_call);
+    __ JumpIfSmi(r2, &rt_call);
     __ CompareObjectType(r2, r3, r4, MAP_TYPE);
     __ b(ne, &rt_call);
 
@@ -914,10 +913,11 @@
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
     __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
   } else {
     ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Pop the function from the stack.
@@ -944,12 +944,11 @@
   // sp[0]: receiver (newly allocated object)
   // sp[1]: constructor function
   // sp[2]: number of arguments (smi-tagged)
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &use_receiver);
+  __ JumpIfSmi(r0, &use_receiver);
 
   // If the type of the result (stored in its map) is less than
-  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
   __ b(ge, &exit);
 
   // Throw away the result of the constructor invocation and use the
@@ -1007,10 +1006,7 @@
   // Set up the context from the function argument.
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  // Set up the roots register.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
-  __ mov(r10, Operand(roots_address));
+  __ InitializeRootRegister();
 
   // Push the function and the receiver onto the stack.
   __ push(r1);
@@ -1045,11 +1041,11 @@
   // Invoke the code and pass argc as r0.
   __ mov(r0, Operand(r3));
   if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
+    __ Call(masm->isolate()->builtins()->JSConstructCall());
   } else {
     ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame and remove the parameters (except function), and return.
@@ -1077,12 +1073,17 @@
 
   // Preserve the function.
   __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
   // Push the function on the stack as the argument to the runtime function.
   __ push(r1);
   __ CallRuntime(Runtime::kLazyCompile, 1);
   // Calculate the entry point.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(r5);
   // Restore saved function.
   __ pop(r1);
 
@@ -1100,12 +1101,17 @@
 
   // Preserve the function.
   __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
   // Push the function on the stack as the argument to the runtime function.
   __ push(r1);
   __ CallRuntime(Runtime::kLazyRecompile, 1);
   // Calculate the entry point.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(r5);
   // Restore saved function.
   __ pop(r1);
 
@@ -1221,48 +1227,50 @@
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
   // r0: actual number of arguments
-  Label non_function;
+  Label slow, non_function;
   __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &non_function);
+  __ JumpIfSmi(r1, &non_function);
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &non_function);
+  __ b(ne, &slow);
 
   // 3a. Patch the first argument if necessary when calling a function.
   // r0: actual number of arguments
   // r1: function
   Label shift_arguments;
+  __ mov(r4, Operand(0, RelocInfo::NONE));  // indicate regular JS_FUNCTION
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
     // Do not transform the receiver for strict mode functions.
     __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                              kSmiTagSize)));
     __ b(ne, &shift_arguments);
 
+    // Do not transform the receiver for native (Compilerhints already in r3).
+    __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ b(ne, &shift_arguments);
+
     // Compute the receiver in non-strict mode.
     __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
     __ ldr(r2, MemOperand(r2, -kPointerSize));
     // r0: actual number of arguments
     // r1: function
     // r2: first argument
-    __ tst(r2, Operand(kSmiTagMask));
-    __ b(eq, &convert_to_object);
+    __ JumpIfSmi(r2, &convert_to_object);
 
-    __ LoadRoot(r3, Heap::kNullValueRootIndex);
-    __ cmp(r2, r3);
-    __ b(eq, &use_global_receiver);
     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
     __ cmp(r2, r3);
     __ b(eq, &use_global_receiver);
+    __ LoadRoot(r3, Heap::kNullValueRootIndex);
+    __ cmp(r2, r3);
+    __ b(eq, &use_global_receiver);
 
-    __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
-    __ b(lt, &convert_to_object);
-    __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
-    __ b(le, &shift_arguments);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -1270,14 +1278,15 @@
     __ push(r0);
 
     __ push(r2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ mov(r2, r0);
 
     __ pop(r0);
     __ mov(r0, Operand(r0, ASR, kSmiTagSize));
     __ LeaveInternalFrame();
-    // Restore the function to r1.
+    // Restore the function to r1, and the flag to r4.
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    __ mov(r4, Operand(0, RelocInfo::NONE));
     __ jmp(&patch_receiver);
 
     // Use the global receiver object from the called function as the
@@ -1297,23 +1306,30 @@
     __ jmp(&shift_arguments);
   }
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ mov(r4, Operand(1, RelocInfo::NONE));  // indicate function proxy
+  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(eq, &shift_arguments);
+  __ bind(&non_function);
+  __ mov(r4, Operand(2, RelocInfo::NONE));  // indicate non-function
+
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
   // r0: actual number of arguments
   // r1: function
-  __ bind(&non_function);
+  // r4: call type (0: JS function, 1: function proxy, 2: non-function)
   __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
   __ str(r1, MemOperand(r2, -kPointerSize));
-  // Clear r1 to indicate a non-function being called.
-  __ mov(r1, Operand(0, RelocInfo::NONE));
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
   // r0: actual number of arguments
   // r1: function
+  // r4: call type (0: JS function, 1: function proxy, 2: non-function)
   __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
@@ -1331,14 +1347,27 @@
     __ pop();
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
   // r0: actual number of arguments
   // r1: function
-  { Label function;
-    __ tst(r1, r1);
-    __ b(ne, &function);
+  // r4: call type (0: JS function, 1: function proxy, 2: non-function)
+  { Label function, non_proxy;
+    __ tst(r4, r4);
+    __ b(eq, &function);
     // Expected number of arguments is 0 for CALL_NON_FUNCTION.
     __ mov(r2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(r5, CALL_AS_METHOD);
+    __ cmp(r4, Operand(1));
+    __ b(ne, &non_proxy);
+
+    __ push(r1);  // re-add proxy object as additional argument
+    __ add(r0, r0, Operand(1));
+    __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
     __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
@@ -1355,13 +1384,15 @@
          FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
   __ mov(r2, Operand(r2, ASR, kSmiTagSize));
   __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(r5, CALL_AS_METHOD);
   __ cmp(r2, r0);  // Check formal and actual parameter counts.
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET,
           ne);
 
   ParameterCount expected(0);
-  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -1378,9 +1409,9 @@
   __ push(r0);
   __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
   __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying need to catch
+  // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
@@ -1396,7 +1427,7 @@
   __ ldr(r1, MemOperand(fp, kFunctionOffset));
   __ push(r1);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
   // End of stack check.
 
   // Push current limit and index.
@@ -1405,25 +1436,34 @@
   __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
   __ push(r1);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ ldr(r0, MemOperand(fp, kFunctionOffset));
-  __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in r0.
-  __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
-
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
+  // Get the receiver.
   __ ldr(r0, MemOperand(fp, kRecvOffset));
 
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &push_receiver);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in r1.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+
+  // Compute the receiver.
   // Do not transform the receiver for strict mode functions.
-  __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+  Label call_to_object, use_global_receiver;
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                            kSmiTagSize)));
   __ b(ne, &push_receiver);
 
+  // Do not transform the receiver for strict mode functions.
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+  __ b(ne, &push_receiver);
+
   // Compute the receiver in non-strict mode.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &call_to_object);
+  __ JumpIfSmi(r0, &call_to_object);
   __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
   __ b(eq, &use_global_receiver);
@@ -1433,16 +1473,15 @@
 
   // Check if the receiver is already a JavaScript object.
   // r0: receiver
-  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &call_to_object);
-  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
-  __ b(le, &push_receiver);
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+  __ b(ge, &push_receiver);
 
   // Convert the receiver to a regular object.
   // r0: receiver
   __ bind(&call_to_object);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ b(&push_receiver);
 
   // Use the current global receiver object as the receiver.
@@ -1489,15 +1528,33 @@
   __ b(ne, &loop);
 
   // Invoke the function.
+  Label call_proxy;
   ParameterCount actual(r0);
   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
   __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ InvokeFunction(r1, actual, CALL_FUNCTION);
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &call_proxy);
+  __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   // Tear down the internal frame and remove function, receiver and args.
   __ LeaveInternalFrame();
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Jump(lr);
+
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(r1);  // add function proxy as last argument
+  __ add(r0, r0, Operand(1));
+  __ mov(r2, Operand(0, RelocInfo::NONE));
+  __ SetCallKind(r5, CALL_AS_METHOD);
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+
+  __ LeaveInternalFrame();
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Jump(lr);
 }
 
 
@@ -1529,6 +1586,7 @@
   //  -- r1 : function (passed through to callee)
   //  -- r2 : expected number of arguments
   //  -- r3 : code entry to call
+  //  -- r5 : call kind information
   // -----------------------------------
 
   Label invoke, dont_adapt_arguments;
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index fad9339..36450c9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -55,23 +55,30 @@
                                            Register rhs);
 
 
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+                                   Register scratch1, Register scratch2,
+                                   Label* not_a_heap_number) {
+  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch1, scratch2);
+  __ b(ne, not_a_heap_number);
+}
+
+
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
   Label check_heap_number, call_builtin;
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &check_heap_number);
+  __ JumpIfNotSmi(r0, &check_heap_number);
   __ Ret();
 
   __ bind(&check_heap_number);
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-  __ cmp(r1, ip);
-  __ b(ne, &call_builtin);
+  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
   __ Ret();
 
   __ bind(&call_builtin);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
 
@@ -150,7 +157,7 @@
   __ ldr(r3, MemOperand(sp, 0));
 
   // Setup the object header.
-  __ LoadRoot(r2, Heap::kContextMapRootIndex);
+  __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ mov(r2, Operand(Smi::FromInt(length)));
   __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -158,11 +165,10 @@
   // Setup the fixed slots.
   __ mov(r1, Operand(Smi::FromInt(0)));
   __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
-  __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+  __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
 
-  // Copy the global object from the surrounding context.
+  // Copy the global object from the previous context.
   __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
@@ -179,7 +185,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
 }
 
 
@@ -298,12 +304,6 @@
   }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
 };
 
 
@@ -364,136 +364,6 @@
 }
 
 
-class FloatingPointHelper : public AllStatic {
- public:
-
-  enum Destination {
-    kVFPRegisters,
-    kCoreRegisters
-  };
-
-
-  // Loads smis from r0 and r1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
-  // floating point registers VFP3 must be supported. If core registers are
-  // requested when VFP3 is supported d6 and d7 will be scratched.
-  static void LoadSmis(MacroAssembler* masm,
-                       Destination destination,
-                       Register scratch1,
-                       Register scratch2);
-
-  // Loads objects from r0 and r1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
-  // floating point registers VFP3 must be supported. If core registers are
-  // requested when VFP3 is supported d6 and d7 will still be scratched. If
-  // either r0 or r1 is not a number (not smi and not heap number object) the
-  // not_number label is jumped to with r0 and r1 intact.
-  static void LoadOperands(MacroAssembler* masm,
-                           FloatingPointHelper::Destination destination,
-                           Register heap_number_map,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* not_number);
-
-  // Convert the smi or heap number in object to an int32 using the rules
-  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
-  // and brought into the range -2^31 .. +2^31 - 1.
-  static void ConvertNumberToInt32(MacroAssembler* masm,
-                                   Register object,
-                                   Register dst,
-                                   Register heap_number_map,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   DwVfpRegister double_scratch,
-                                   Label* not_int32);
-
-  // Load the number from object into double_dst in the double format.
-  // Control will jump to not_int32 if the value cannot be exactly represented
-  // by a 32-bit integer.
-  // Floating point value in the 32-bit integer range that are not exact integer
-  // won't be loaded.
-  static void LoadNumberAsInt32Double(MacroAssembler* masm,
-                                      Register object,
-                                      Destination destination,
-                                      DwVfpRegister double_dst,
-                                      Register dst1,
-                                      Register dst2,
-                                      Register heap_number_map,
-                                      Register scratch1,
-                                      Register scratch2,
-                                      SwVfpRegister single_scratch,
-                                      Label* not_int32);
-
-  // Loads the number from object into dst as a 32-bit integer.
-  // Control will jump to not_int32 if the object cannot be exactly represented
-  // by a 32-bit integer.
-  // Floating point value in the 32-bit integer range that are not exact integer
-  // won't be converted.
-  // scratch3 is not used when VFP3 is supported.
-  static void LoadNumberAsInt32(MacroAssembler* masm,
-                                Register object,
-                                Register dst,
-                                Register heap_number_map,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                DwVfpRegister double_scratch,
-                                Label* not_int32);
-
-  // Generate non VFP3 code to check if a double can be exactly represented by a
-  // 32-bit integer. This does not check for 0 or -0, which need
-  // to be checked for separately.
-  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
-  // through otherwise.
-  // src1 and src2 will be cloberred.
-  //
-  // Expected input:
-  // - src1: higher (exponent) part of the double value.
-  // - src2: lower (mantissa) part of the double value.
-  // Output status:
-  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
-  // - src2: contains 1.
-  // - other registers are clobbered.
-  static void DoubleIs32BitInteger(MacroAssembler* masm,
-                                   Register src1,
-                                   Register src2,
-                                   Register dst,
-                                   Register scratch,
-                                   Label* not_int32);
-
-  // Generates code to call a C function to do a double operation using core
-  // registers. (Used when VFP3 is not supported.)
-  // This code never falls through, but returns with a heap number containing
-  // the result in r0.
-  // Register heapnumber_result must be a heap number in which the
-  // result of the operation will be stored.
-  // Requires the following layout on entry:
-  // r0: Left value (least significant part of mantissa).
-  // r1: Left value (sign, exponent, top of mantissa).
-  // r2: Right value (least significant part of mantissa).
-  // r3: Right value (sign, exponent, top of mantissa).
-  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
-                                          Token::Value op,
-                                          Register heap_number_result,
-                                          Register scratch);
-
- private:
-  static void LoadNumber(MacroAssembler* masm,
-                         FloatingPointHelper::Destination destination,
-                         Register object,
-                         DwVfpRegister dst,
-                         Register dst1,
-                         Register dst2,
-                         Register heap_number_map,
-                         Register scratch1,
-                         Register scratch2,
-                         Label* not_number);
-};
-
-
 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
@@ -516,11 +386,11 @@
     __ mov(scratch1, Operand(r0));
     ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
     __ push(lr);
-    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    __ Call(stub1.GetCode());
     // Write Smi from r1 to r1 and r0 in double format.
     __ mov(scratch1, Operand(r1));
     ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
-    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ Call(stub2.GetCode());
     __ pop(lr);
   }
 }
@@ -597,7 +467,7 @@
     __ mov(scratch1, Operand(object));
     ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
     __ push(lr);
-    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+    __ Call(stub.GetCode());
     __ pop(lr);
   }
 
@@ -651,6 +521,80 @@
 }
 
 
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+                                             Register int_scratch,
+                                             Destination destination,
+                                             DwVfpRegister double_dst,
+                                             Register dst1,
+                                             Register dst2,
+                                             Register scratch2,
+                                             SwVfpRegister single_scratch) {
+  ASSERT(!int_scratch.is(scratch2));
+  ASSERT(!int_scratch.is(dst1));
+  ASSERT(!int_scratch.is(dst2));
+
+  Label done;
+
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(single_scratch, int_scratch);
+    __ vcvt_f64_s32(double_dst, single_scratch);
+    if (destination == kCoreRegisters) {
+      __ vmov(dst1, dst2, double_dst);
+    }
+  } else {
+    Label fewer_than_20_useful_bits;
+    // Expected output:
+    // |         dst2            |         dst1            |
+    // | s |   exp   |              mantissa               |
+
+    // Check for zero.
+    __ cmp(int_scratch, Operand::Zero());
+    __ mov(dst2, int_scratch);
+    __ mov(dst1, int_scratch);
+    __ b(eq, &done);
+
+    // Preload the sign of the value.
+    __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+    // Get the absolute value of the object (as an unsigned integer).
+    __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
+
+    // Get mantisssa[51:20].
+
+    // Get the position of the first set bit.
+    __ CountLeadingZeros(dst1, int_scratch, scratch2);
+    __ rsb(dst1, dst1, Operand(31));
+
+    // Set the exponent.
+    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Bfi(dst2, scratch2, scratch2,
+        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+    // Clear the first non null bit.
+    __ mov(scratch2, Operand(1));
+    __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
+
+    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    // Get the number of bits to set in the lower part of the mantissa.
+    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+    __ b(mi, &fewer_than_20_useful_bits);
+    // Set the higher 20 bits of the mantissa.
+    __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
+    __ rsb(scratch2, scratch2, Operand(32));
+    __ mov(dst1, Operand(int_scratch, LSL, scratch2));
+    __ b(&done);
+
+    __ bind(&fewer_than_20_useful_bits);
+    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
+    __ orr(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, Operand::Zero());
+  }
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
@@ -672,63 +616,8 @@
 
   __ JumpIfNotSmi(object, &obj_is_not_smi);
   __ SmiUntag(scratch1, object);
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(single_scratch, scratch1);
-    __ vcvt_f64_s32(double_dst, single_scratch);
-    if (destination == kCoreRegisters) {
-      __ vmov(dst1, dst2, double_dst);
-    }
-  } else {
-    Label fewer_than_20_useful_bits;
-    // Expected output:
-    // |         dst2            |         dst1            |
-    // | s |   exp   |              mantissa               |
-
-    // Check for zero.
-    __ cmp(scratch1, Operand(0));
-    __ mov(dst2, scratch1);
-    __ mov(dst1, scratch1);
-    __ b(eq, &done);
-
-    // Preload the sign of the value.
-    __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC);
-    // Get the absolute value of the object (as an unsigned integer).
-    __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
-
-    // Get mantisssa[51:20].
-
-    // Get the position of the first set bit.
-    __ CountLeadingZeros(dst1, scratch1, scratch2);
-    __ rsb(dst1, dst1, Operand(31));
-
-    // Set the exponent.
-    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
-    __ Bfi(dst2, scratch2, scratch2,
-        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
-    // Clear the first non null bit.
-    __ mov(scratch2, Operand(1));
-    __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1));
-
-    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
-    // Get the number of bits to set in the lower part of the mantissa.
-    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
-    __ b(mi, &fewer_than_20_useful_bits);
-    // Set the higher 20 bits of the mantissa.
-    __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2));
-    __ rsb(scratch2, scratch2, Operand(32));
-    __ mov(dst1, Operand(scratch1, LSL, scratch2));
-    __ b(&done);
-
-    __ bind(&fewer_than_20_useful_bits);
-    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
-    __ mov(scratch2, Operand(scratch1, LSL, scratch2));
-    __ orr(dst2, dst2, scratch2);
-    // Set dst1 to 0.
-    __ mov(dst1, Operand(0));
-  }
-
+  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+                     scratch2, single_scratch);
   __ b(&done);
 
   __ bind(&obj_is_not_smi);
@@ -768,7 +657,7 @@
     // Check for 0 and -0.
     __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
     __ orr(scratch1, scratch1, Operand(dst2));
-    __ cmp(scratch1, Operand(0));
+    __ cmp(scratch1, Operand::Zero());
     __ b(eq, &done);
 
     // Check that the value can be exactly represented by a 32-bit integer.
@@ -841,7 +730,7 @@
     // Check for 0 and -0.
     __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
     __ orr(dst, scratch2, Operand(dst));
-    __ cmp(dst, Operand(0));
+    __ cmp(dst, Operand::Zero());
     __ b(eq, &done);
 
     DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
@@ -858,7 +747,7 @@
     // Set the sign.
     __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
     __ tst(scratch1, Operand(HeapNumber::kSignMask));
-    __ rsb(dst, dst, Operand(0), LeaveCC, mi);
+    __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
   }
 
   __ bind(&done);
@@ -943,14 +832,25 @@
   // Push the current return address before the C call. Return will be
   // through pop(pc) below.
   __ push(lr);
-  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
+  __ PrepareCallCFunction(0, 2, scratch);
+  if (masm->use_eabi_hardfloat()) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(d0, r0, r1);
+    __ vmov(d1, r2, r3);
+  }
   // Call C routine that may not cause GC or other trouble.
   __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   4);
+                   0, 2);
   // Store answer in the overwritable heap number. Double returned in
-  // registers r0 and r1.
-  __ Strd(r0, r1, FieldMemOperand(heap_number_result,
-                                  HeapNumber::kValueOffset));
+  // registers r0 and r1 or in d0.
+  if (masm->use_eabi_hardfloat()) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vstr(d0,
+            FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+  } else {
+    __ Strd(r0, r1, FieldMemOperand(heap_number_result,
+                                    HeapNumber::kValueOffset));
+  }
   // Place heap_number_result in r0 and return to the pushed return address.
   __ mov(r0, Operand(heap_number_result));
   __ pop(pc);
@@ -1023,14 +923,14 @@
     // They are both equal and they are not both Smis so both of them are not
     // Smis.  If it's not a heap number, then return equal.
     if (cond == lt || cond == gt) {
-      __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+      __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
       __ b(ge, slow);
     } else {
       __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
       __ b(eq, &heap_number);
       // Comparing JS objects with <=, >= is complicated.
       if (cond != eq) {
-        __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+        __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
         __ b(ge, slow);
         // Normally here we fall through to return_equal, but undefined is
         // special: (undefined == undefined) == true, but
@@ -1121,8 +1021,7 @@
          (lhs.is(r1) && rhs.is(r0)));
 
   Label rhs_is_smi;
-  __ tst(rhs, Operand(kSmiTagMask));
-  __ b(eq, &rhs_is_smi);
+  __ JumpIfSmi(rhs, &rhs_is_smi);
 
   // Lhs is a Smi.  Check whether the rhs is a heap number.
   __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
@@ -1153,7 +1052,7 @@
     // Convert lhs to a double in r2, r3.
     __ mov(r7, Operand(lhs));
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
-    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    __ Call(stub1.GetCode());
     // Load rhs to a double in r0, r1.
     __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
     __ pop(lr);
@@ -1195,7 +1094,7 @@
     // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(rhs));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
-    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ Call(stub2.GetCode());
     __ pop(lr);
   }
   // Fall through to both_loaded_as_doubles.
@@ -1292,8 +1191,14 @@
     // Call a native function to do a comparison between two non-NaNs.
     // Call C routine that may not cause GC or other trouble.
     __ push(lr);
-    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ PrepareCallCFunction(0, 2, r5);
+    if (masm->use_eabi_hardfloat()) {
+      CpuFeatures::Scope scope(VFP3);
+      __ vmov(d0, r0, r1);
+      __ vmov(d1, r2, r3);
+    }
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+                     0, 2);
     __ pop(pc);  // Return.
   }
 }
@@ -1306,14 +1211,14 @@
     ASSERT((lhs.is(r0) && rhs.is(r1)) ||
            (lhs.is(r1) && rhs.is(r0)));
 
-    // If either operand is a JSObject or an oddball value, then they are
+    // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
-    // FIRST_JS_OBJECT_TYPE.
-    __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+    // FIRST_SPEC_OBJECT_TYPE.
+    __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
     __ b(lt, &first_non_object);
 
     // Return non-zero (r0 is not zero)
@@ -1326,7 +1231,7 @@
     __ cmp(r2, Operand(ODDBALL_TYPE));
     __ b(eq, &return_not_equal);
 
-    __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+    __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
     __ b(ge, &return_not_equal);
 
     // Check for oddballs: true, false, null, undefined.
@@ -1403,9 +1308,9 @@
   __ Ret();
 
   __ bind(&object_test);
-  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
   __ b(lt, not_both_strings);
-  __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
   __ b(lt, not_both_strings);
   // If both objects are undetectable, they are equal. Otherwise, they
   // are not equal, since they are different objects and an object is not
@@ -1457,7 +1362,7 @@
                   scratch1,
                   Heap::kHeapNumberMapRootIndex,
                   not_found,
-                  true);
+                  DONT_DO_SMI_CHECK);
 
       STATIC_ASSERT(8 == kDoubleSize);
       __ add(scratch1,
@@ -1544,8 +1449,7 @@
   if (include_smi_compare_) {
     Label not_two_smis, smi_done;
     __ orr(r2, r1, r0);
-    __ tst(r2, Operand(kSmiTagMask));
-    __ b(ne, &not_two_smis);
+    __ JumpIfNotSmi(r2, &not_two_smis);
     __ mov(r1, Operand(r1, ASR, 1));
     __ sub(r0, r1, Operand(r0, ASR, 1));
     __ Ret();
@@ -1568,8 +1472,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(0, Smi::FromInt(0));
   __ and_(r2, lhs_, Operand(rhs_));
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(ne, &not_smis);
+  __ JumpIfNotSmi(r2, &not_smis);
   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
   // 1) Return the answer.
   // 2) Go to slow.
@@ -1656,13 +1559,22 @@
   __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
 
   __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == eq) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      lhs_,
                                                      rhs_,
                                                      r2,
                                                      r3,
-                                                     r4,
-                                                     r5);
+                                                     r4);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       lhs_,
+                                                       rhs_,
+                                                       r2,
+                                                       r3,
+                                                       r4,
+                                                       r5);
+  }
   // Never falls through to here.
 
   __ bind(&slow);
@@ -1687,96 +1599,411 @@
 
   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
-  __ InvokeBuiltin(native, JUMP_JS);
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
 }
 
 
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses VFP3 instructions.
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  CpuFeatures::Scope scope(VFP3);
 
-  Label false_result;
-  Label not_heap_number;
-  Register scratch = r9.is(tos_) ? r7 : r9;
+  Label patch;
+  const Register map = r9.is(tos_) ? r7 : r9;
 
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(tos_, ip);
-  __ b(eq, &false_result);
+  // undefined -> false.
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // HeapNumber => false iff +0, -0, or NaN.
-  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-  __ cmp(scratch, ip);
-  __ b(&not_heap_number, ne);
+  // Boolean -> its value.
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  __ sub(ip, tos_, Operand(kHeapObjectTag));
-  __ vldr(d1, ip, HeapNumber::kValueOffset);
-  __ VFPCompareAndSetFlags(d1, 0.0);
-  // "tos_" is a register, and contains a non zero value by default.
-  // Hence we only need to overwrite "tos_" with zero to return false for
-  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
-  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
-  __ Ret();
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  __ bind(&not_heap_number);
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    __ tst(tos_, Operand(kSmiTagMask));
+    // tos_ contains the correct return value already
+    __ Ret(eq);
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(tos_, &patch);
+  }
 
-  // Check if the value is 'null'.
-  // 'null' => false.
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(tos_, ip);
-  __ b(&false_result, eq);
+  if (types_.NeedsMap()) {
+    __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
 
-  // It can be an undetectable object.
-  // Undetectable => false.
-  __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
-  __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
-  __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-  __ b(&false_result, eq);
+    if (types_.CanBeUndetectable()) {
+      __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+      __ tst(ip, Operand(1 << Map::kIsUndetectable));
+      // Undetectable -> false.
+      __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+      __ Ret(ne);
+    }
+  }
 
-  // JavaScript object => true.
-  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(gt);
+  if (types_.Contains(SPEC_OBJECT)) {
+    // Spec object -> true.
+    __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+    // tos_ contains the correct non-zero return value already.
+    __ Ret(ge);
+  }
 
-  // Check for string
-  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(gt);
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
+  __ Ret(lt);  // the string length is OK as the return value
+  }
 
-  // String value => false iff empty, i.e., length is zero
-  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
-  // If length is zero, "tos_" contains zero ==> false.
-  // If length is not zero, "tos_" contains a non-zero value ==> true.
-  __ Ret();
+  if (types_.Contains(HEAP_NUMBER)) {
+    // Heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number;
+    __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+    __ b(ne, &not_heap_number);
+    __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+    __ VFPCompareAndSetFlags(d1, 0.0);
+    // "tos_" is a register, and contains a non zero value by default.
+    // Hence we only need to overwrite "tos_" with zero to return false for
+    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+    __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
+    __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
+    __ Ret();
+    __ bind(&not_heap_number);
+  }
 
-  // Return 0 in "tos_" for false .
-  __ bind(&false_result);
-  __ mov(tos_, Operand(0, RelocInfo::NONE));
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    __ LoadRoot(ip, value);
+    __ cmp(tos_, ip);
+    // The value of a root is never NULL, so we can avoid loading a non-null
+    // value into tos_ when we want to return 'true'.
+    if (!result) {
+      __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+    }
+    __ Ret(eq);
+  }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  if (!tos_.is(r3)) {
+    __ mov(r3, Operand(tos_));
+  }
+  __ mov(r2, Operand(Smi::FromInt(tos_.code())));
+  __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
+  __ Push(r3, r2, r1);
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
+}
+
+
+void UnaryOpStub::PrintName(StringStream* stream) {
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+  stream->Add("UnaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              UnaryOpIC::GetName(operand_type_));
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ mov(r3, Operand(r0));  // the operand
+  __ mov(r2, Operand(Smi::FromInt(op_)));
+  __ mov(r1, Operand(Smi::FromInt(mode_)));
+  __ mov(r0, Operand(Smi::FromInt(operand_type_)));
+  __ Push(r3, r2, r1, r0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow) {
+  __ JumpIfNotSmi(r0, non_smi);
+
+  // The result of negating zero or the smallest negative smi is not a smi.
+  __ bic(ip, r0, Operand(0x80000000), SetCC);
+  __ b(eq, slow);
+
+  // Return '0 - value'.
+  __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
   __ Ret();
 }
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
-  return stub.GetCode();
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi) {
+  __ JumpIfNotSmi(r0, non_smi);
+
+  // Flip bits and revert inverted smi-tag.
+  __ mvn(r0, Operand(r0));
+  __ bic(r0, r0, Operand(kSmiTagMask));
+  __ Ret();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+  // r0 is a heap number.  Get a new heap number in r1.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+  } else {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(r0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(r1, Operand(r0));
+    __ pop(r0);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+    __ mov(r0, Operand(r1));
+  }
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+    MacroAssembler* masm, Label* slow) {
+  Label impossible;
+
+  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+  // Convert the heap number is r0 to an untagged integer in r1.
+  __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ mvn(r1, Operand(r1));
+  __ add(r2, r1, Operand(0x40000000), SetCC);
+  __ b(mi, &try_float);
+
+  // Tag the result as a smi and we're done.
+  __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+  __ Ret();
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    // Allocate a new heap number without zapping r0, which we need if it fails.
+    __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(r0);  // Push the heap number, not the untagged int32.
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(r2, r0);  // Move the new heap number into r2.
+    // Get the heap number into r0, now that the new heap number is in r2.
+    __ pop(r0);
+    __ LeaveInternalFrame();
+
+    // Convert the heap number in r0 to an untagged integer in r1.
+    // This can't go slow-case because it's the same number we already
+    // converted once again.
+    __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
+    __ mvn(r1, Operand(r1));
+
+    __ bind(&heapnumber_allocated);
+    __ mov(r0, r2);  // Move newly allocated heap number to r0.
+  }
+
+  if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, r1);
+    __ vcvt_f64_s32(d0, s0);
+    __ sub(r2, r0, Operand(kHeapObjectTag));
+    __ vstr(d0, r2, HeapNumber::kValueOffset);
+    __ Ret();
+  } else {
+    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+    // have to set up a frame.
+    WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&impossible);
+  if (FLAG_debug_code) {
+    __ stop("Incorrect assumption in bit-not stub");
+  }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ push(r0);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   Label get_result;
 
   __ Push(r1, r0);
@@ -1787,40 +2014,43 @@
   __ Push(r2, r1, r0);
 
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
     MacroAssembler* masm) {
   UNIMPLEMENTED();
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       GenerateInt32Stub(masm);
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -1829,12 +2059,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name;
   switch (mode_) {
@@ -1843,18 +2068,14 @@
     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
     default: overwrite_name = "UnknownOverwrite"; break;
   }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
-               op_name,
-               overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
-  return name_;
+  stream->Add("BinaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              BinaryOpIC::GetName(operands_type_));
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
-    MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
   Register left = r1;
   Register right = r0;
   Register scratch1 = r7;
@@ -1979,10 +2200,10 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
-                                                    bool smi_operands,
-                                                    Label* not_numbers,
-                                                    Label* gc_required) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                       bool smi_operands,
+                                       Label* not_numbers,
+                                       Label* gc_required) {
   Register left = r1;
   Register right = r0;
   Register scratch1 = r7;
@@ -2193,7 +2414,8 @@
 // generated. If the result is not a smi and heap number allocation is not
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
@@ -2202,13 +2424,11 @@
   Register left = r1;
   Register right = r0;
   Register scratch1 = r7;
-  Register scratch2 = r9;
 
   // Perform combined smi check on both operands.
   __ orr(scratch1, left, Operand(right));
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(scratch1, Operand(kSmiTagMask));
-  __ b(ne, &not_smis);
+  __ JumpIfNotSmi(scratch1, &not_smis);
 
   // If the smi-smi operation results in a smi return is generated.
   GenerateSmiSmiOperation(masm);
@@ -2222,11 +2442,11 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label not_smis, call_runtime;
 
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     // Only allow smi results.
     GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
   } else {
@@ -2247,18 +2467,48 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateAddStrings(masm);
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
 
   Register left = r1;
   Register right = r0;
@@ -2291,37 +2541,36 @@
     case Token::MUL:
     case Token::DIV:
     case Token::MOD: {
-    // Load both operands and check that they are 32-bit integer.
-    // Jump to type transition if they are not. The registers r0 and r1 (right
-    // and left) are preserved for the runtime call.
-    FloatingPointHelper::Destination destination =
-        CpuFeatures::IsSupported(VFP3) &&
-        op_ != Token::MOD ?
-        FloatingPointHelper::kVFPRegisters :
-        FloatingPointHelper::kCoreRegisters;
+      // Load both operands and check that they are 32-bit integer.
+      // Jump to type transition if they are not. The registers r0 and r1 (right
+      // and left) are preserved for the runtime call.
+      FloatingPointHelper::Destination destination =
+          (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+              ? FloatingPointHelper::kVFPRegisters
+              : FloatingPointHelper::kCoreRegisters;
 
-    FloatingPointHelper::LoadNumberAsInt32Double(masm,
-                                                 right,
-                                                 destination,
-                                                 d7,
-                                                 r2,
-                                                 r3,
-                                                 heap_number_map,
-                                                 scratch1,
-                                                 scratch2,
-                                                 s0,
-                                                 &transition);
-    FloatingPointHelper::LoadNumberAsInt32Double(masm,
-                                                 left,
-                                                 destination,
-                                                 d6,
-                                                 r4,
-                                                 r5,
-                                                 heap_number_map,
-                                                 scratch1,
-                                                 scratch2,
-                                                 s0,
-                                                 &transition);
+      FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                   right,
+                                                   destination,
+                                                   d7,
+                                                   r2,
+                                                   r3,
+                                                   heap_number_map,
+                                                   scratch1,
+                                                   scratch2,
+                                                   s0,
+                                                   &transition);
+      FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                   left,
+                                                   destination,
+                                                   d6,
+                                                   r4,
+                                                   r5,
+                                                   heap_number_map,
+                                                   scratch1,
+                                                   scratch2,
+                                                   s0,
+                                                   &transition);
 
       if (destination == FloatingPointHelper::kVFPRegisters) {
         CpuFeatures::Scope scope(VFP3);
@@ -2355,7 +2604,7 @@
                              scratch1,
                              scratch2);
 
-          if (result_type_ <= TRBinaryOpIC::INT32) {
+          if (result_type_ <= BinaryOpIC::INT32) {
             // If the ne condition is set, result does
             // not fit in a 32-bit integer.
             __ b(ne, &transition);
@@ -2368,7 +2617,7 @@
           __ b(mi, &return_heap_number);
           // Check for minus zero. Return heap number for minus zero.
           Label not_zero;
-          __ cmp(scratch1, Operand(0));
+          __ cmp(scratch1, Operand::Zero());
           __ b(ne, &not_zero);
           __ vmov(scratch2, d5.high());
           __ tst(scratch2, Operand(HeapNumber::kSignMask));
@@ -2382,9 +2631,11 @@
           // DIV just falls through to allocating a heap number.
         }
 
-        if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
-                                                : TRBinaryOpIC::INT32) {
-          __ bind(&return_heap_number);
+        __ bind(&return_heap_number);
+        // Return a heap number, or fall through to type transition or runtime
+        // call if we can't.
+        if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+                                                 : BinaryOpIC::INT32)) {
           // We are using vfp registers so r5 is available.
           heap_number_result = r5;
           GenerateHeapResultAllocation(masm,
@@ -2492,12 +2743,13 @@
           // The non vfp3 code does not support this special case, so jump to
           // runtime if we don't support it.
           if (CpuFeatures::IsSupported(VFP3)) {
-            __ b(mi,
-                 (result_type_ <= TRBinaryOpIC::INT32) ? &transition
-                                                       : &return_heap_number);
+            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+                      ? &transition
+                      : &return_heap_number);
           } else {
-            __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
-                                                           : &call_runtime);
+            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+                      ? &transition
+                      : &call_runtime);
           }
           break;
         case Token::SHL:
@@ -2557,7 +2809,11 @@
       UNREACHABLE();
   }
 
-  if (transition.is_linked()) {
+  // We never expect DIV to yield an integer result, so we always generate
+  // type transition code for DIV operations expecting an integer result: the
+  // code will fall through to this type transition.
+  if (transition.is_linked() ||
+      ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
     __ bind(&transition);
     GenerateTypeTransition(masm);
   }
@@ -2567,7 +2823,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
   if (op_ == Token::ADD) {
@@ -2600,7 +2856,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label call_runtime;
   GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
 
@@ -2609,7 +2865,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime, call_string_add_or_runtime;
 
   GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -2626,7 +2882,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
   Label left_not_string, call_runtime;
 
@@ -2657,41 +2913,41 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
   GenerateRegisterArgsPush(masm);
   switch (op_) {
     case Token::ADD:
-      __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
       break;
     case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
       break;
     case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
       break;
     case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
       break;
     case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
       break;
     case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
       break;
     case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
       break;
     case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
       break;
     case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
       break;
     case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
       break;
     default:
       UNREACHABLE();
@@ -2699,14 +2955,12 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
-    MacroAssembler* masm,
-    Register result,
-    Register heap_number_map,
-    Register scratch1,
-    Register scratch2,
-    Label* gc_required) {
-
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                Register result,
+                                                Register heap_number_map,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Label* gc_required) {
   // Code below will scratch result if allocation fails. To keep both arguments
   // intact for the runtime call result cannot be one of these.
   ASSERT(!result.is(r0) && !result.is(r1));
@@ -2733,7 +2987,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ Push(r1, r0);
 }
 
@@ -2771,7 +3025,7 @@
                   r1,
                   Heap::kHeapNumberMapRootIndex,
                   &calculate,
-                  true);
+                  DONT_DO_SMI_CHECK);
       // Input is a HeapNumber. Load it to a double register and store the
       // low and high words into r2, r3.
       __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -2855,7 +3109,6 @@
 
     Label no_update;
     Label skip_cache;
-    const Register heap_number_map = r5;
 
     // Call C function to calculate the result and update the cache.
     // Register r0 holds precalculated cache entry address; preserve
@@ -2914,17 +3167,24 @@
   Isolate* isolate = masm->isolate();
 
   __ push(lr);
-  __ PrepareCallCFunction(2, scratch);
-  __ vmov(r0, r1, d2);
+  __ PrepareCallCFunction(0, 1, scratch);
+  if (masm->use_eabi_hardfloat()) {
+    __ vmov(d0, d2);
+  } else {
+    __ vmov(r0, r1, d2);
+  }
   switch (type_) {
     case TranscendentalCache::SIN:
-      __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::COS:
-      __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::LOG:
-      __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_log_double_function(isolate),
+          0, 1);
       break;
     default:
       UNIMPLEMENTED();
@@ -2952,141 +3212,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done;
-
-  Register heap_number_map = r6;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      Label try_float;
-      __ tst(r0, Operand(kSmiTagMask));
-      __ b(ne, &try_float);
-
-      // Go slow case if the value of the expression is zero
-      // to make sure that we switch between 0 and -0.
-      if (negative_zero_ == kStrictNegativeZero) {
-        // If we have to check for zero, then we can check for the max negative
-        // smi while we are at it.
-        __ bic(ip, r0, Operand(0x80000000), SetCC);
-        __ b(eq, &slow);
-        __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
-        __ Ret();
-      } else {
-        // The value of the expression is a smi and 0 is OK for -0.  Try
-        // optimistic subtraction '0 - value'.
-        __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
-        __ Ret(vc);
-        // We don't have to reverse the optimistic neg since the only case
-        // where we fall through is the minimum negative Smi, which is the case
-        // where the neg leaves the register unchanged.
-        __ jmp(&slow);  // Go slow on max negative Smi.
-      }
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ tst(r0, Operand(kSmiTagMask));
-      __ Assert(ne, "Unexpected smi operand.");
-    }
-
-    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ cmp(r1, heap_number_map);
-    __ b(ne, &slow);
-    // r0 is a heap number.  Get a new heap number in r1.
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
-      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-    } else {
-      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
-      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
-      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
-      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
-      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
-      __ mov(r0, Operand(r1));
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label non_smi;
-      __ JumpIfNotSmi(r0, &non_smi);
-      __ mvn(r0, Operand(r0));
-      // Bit-clear inverted smi-tag.
-      __ bic(r0, r0, Operand(kSmiTagMask));
-      __ Ret();
-      __ bind(&non_smi);
-    } else if (FLAG_debug_code) {
-      __ tst(r0, Operand(kSmiTagMask));
-      __ Assert(ne, "Unexpected smi operand.");
-    }
-
-    // Check if the operand is a heap number.
-    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ cmp(r1, heap_number_map);
-    __ b(ne, &slow);
-
-    // Convert the heap number is r0 to an untagged integer in r1.
-    __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
-
-    // Do the bitwise operation (move negated) and check if the result
-    // fits in a smi.
-    Label try_float;
-    __ mvn(r1, Operand(r1));
-    __ add(r2, r1, Operand(0x40000000), SetCC);
-    __ b(mi, &try_float);
-    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
-    __ b(&done);
-
-    __ bind(&try_float);
-    if (!overwrite_ == UNARY_OVERWRITE) {
-      // Allocate a fresh heap number, but don't overwrite r0 until
-      // we're sure we can do it without going through the slow case
-      // that needs the value in r0.
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-      __ mov(r0, Operand(r2));
-    }
-
-    if (CpuFeatures::IsSupported(VFP3)) {
-      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
-      CpuFeatures::Scope scope(VFP3);
-      __ vmov(s0, r1);
-      __ vcvt_f64_s32(d0, s0);
-      __ sub(r2, r0, Operand(kHeapObjectTag));
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-    } else {
-      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
-      // have to set up a frame.
-      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
-      __ push(lr);
-      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
-      __ pop(lr);
-    }
-  } else {
-    UNIMPLEMENTED();
-  }
-
-  __ bind(&done);
-  __ Ret();
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ push(r0);
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
@@ -3141,11 +3266,11 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(lr);
-    __ PrepareCallCFunction(3, scratch);
-    __ mov(r2, exponent);
-    __ vmov(r0, r1, double_base);
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
     __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()), 3);
+        ExternalReference::power_double_int_function(masm->isolate()),
+        1, 1);
     __ pop(lr);
     __ GetCFunctionDoubleResult(double_result);
     __ vstr(double_result,
@@ -3171,11 +3296,11 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(lr);
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, double_base);
-    __ vmov(r2, r3, double_exponent);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(double_base, double_exponent);
     __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()), 4);
+        ExternalReference::power_double_double_function(masm->isolate()),
+        0, 2);
     __ pop(lr);
     __ GetCFunctionDoubleResult(double_result);
     __ vstr(double_result,
@@ -3219,8 +3344,9 @@
 
   if (do_gc) {
     // Passing r0.
-    __ PrepareCallCFunction(1, r1);
-    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1);
+    __ PrepareCallCFunction(1, 0, r1);
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate),
+        1, 0);
   }
 
   ExternalReference scope_depth =
@@ -3255,15 +3381,10 @@
 
   __ mov(r2, Operand(ExternalReference::isolate_address()));
 
-
-  // TODO(1242173): To let the GC traverse the return address of the exit
-  // frames, we need to know where the return address is. Right now,
-  // we store it on the stack to be able to find it again, but we never
-  // restore from it in case of changes, which makes it impossible to
-  // support moving the C entry code stub. This should be fixed, but currently
-  // this is OK because the CEntryStub gets generated so early in the V8 boot
-  // sequence that it is not moving ever.
-
+  // To let the GC traverse the return address of the exit frames, we need to
+  // know where the return address is. The CEntryStub is unmovable, so
+  // we can store the address on the stack to be able to find it again and
+  // we never have to restore it, because it will not change.
   // Compute the return address in lr to return to after the jump below. Pc is
   // already at '+ 8' from the current instruction but return is after three
   // instructions so add another 4 to pc to get the return address.
@@ -3311,7 +3432,7 @@
   // Retrieve the pending exception and clear the variable.
   __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
   __ ldr(r3, MemOperand(ip));
-  __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(ip));
   __ str(r3, MemOperand(ip));
@@ -3417,6 +3538,8 @@
     CpuFeatures::Scope scope(VFP3);
     // Save callee-saved vfp registers.
     __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+    // Set up the reserved register for 0.0.
+    __ vmov(kDoubleRegZero, 0.0);
   }
 
   // Get address of argv, see stm above.
@@ -3444,20 +3567,19 @@
   __ mov(r7, Operand(Smi::FromInt(marker)));
   __ mov(r6, Operand(Smi::FromInt(marker)));
   __ mov(r5,
-         Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
+         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
   __ ldr(r5, MemOperand(r5));
   __ Push(r8, r7, r6, r5);
 
   // Setup frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // If this is the outermost JS call, set js_entry_sp value.
   Label non_outermost_js;
-  ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   __ ldr(r6, MemOperand(r5));
-  __ cmp(r6, Operand(0));
+  __ cmp(r6, Operand::Zero());
   __ b(ne, &non_outermost_js);
   __ str(fp, MemOperand(r5));
   __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
@@ -3467,7 +3589,6 @@
   __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
   __ push(ip);
-#endif
 
   // Call a faked try-block that does the invoke.
   __ bl(&invoke);
@@ -3476,7 +3597,7 @@
   // exception field in the JSEnv and return a failure sentinel.
   // Coming in here the fp will be invalid because the PushTryHandler below
   // sets it to 0 to signal the existence of the JSEntry frame.
-  __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r0, MemOperand(ip));
   __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
@@ -3494,7 +3615,7 @@
   // Clear any pending exceptions.
   __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
   __ ldr(r5, MemOperand(ip));
-  __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r5, MemOperand(ip));
 
@@ -3528,22 +3649,20 @@
   __ PopTryHandler();
 
   __ bind(&exit);  // r0 holds result
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(r5);
   __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ b(ne, &non_outermost_js_2);
-  __ mov(r6, Operand(0));
+  __ mov(r6, Operand::Zero());
   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   __ str(r6, MemOperand(r5));
   __ bind(&non_outermost_js_2);
-#endif
 
   // Restore the top frame descriptors from the stack.
   __ pop(r3);
   __ mov(ip,
-         Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
+         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
   __ str(r3, MemOperand(ip));
 
   // Reset the stack to the callee saved registers.
@@ -3707,7 +3826,7 @@
   __ b(ne, &slow);
 
   // Null is not instance of anything.
-  __ cmp(scratch, Operand(FACTORY->null_value()));
+  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
   __ b(ne, &object_not_null);
   __ mov(r0, Operand(Smi::FromInt(1)));
   __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3730,13 +3849,13 @@
     if (HasArgsInRegisters()) {
       __ Push(r0, r1);
     }
-  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     __ EnterInternalFrame();
     __ Push(r0, r1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
     __ LeaveInternalFrame();
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand::Zero());
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
     __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3801,12 +3920,233 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
   // sp[0] : number of parameters
   // sp[4] : receiver displacement
   // sp[8] : function
 
   // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &runtime);
+
+  // Patch the arguments.length and the parameters pointer in the current frame.
+  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ str(r2, MemOperand(sp, 0 * kPointerSize));
+  __ add(r3, r3, Operand(r2, LSL, 1));
+  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+  // Stack layout:
+  //  sp[0] : number of parameters (tagged)
+  //  sp[4] : address of receiver argument
+  //  sp[8] : function
+  // Registers used over whole function:
+  //  r6 : allocated object (tagged)
+  //  r9 : mapped parameter count (tagged)
+
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  // r1 = parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ mov(r2, r1);
+  __ b(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ add(r3, r3, Operand(r2, LSL, 1));
+  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+  // r1 = parameter count (tagged)
+  // r2 = argument count (tagged)
+  // Compute the mapped parameter count = min(r1, r2) in r1.
+  __ cmp(r1, Operand(r2));
+  __ mov(r1, Operand(r2), LeaveCC, gt);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  __ cmp(r1, Operand(Smi::FromInt(0)));
+  __ mov(r9, Operand::Zero(), LeaveCC, eq);
+  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
+  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+  // 2. Backing store.
+  __ add(r9, r9, Operand(r2, LSL, 1));
+  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+
+  // r0 = address of new object(s) (tagged)
+  // r2 = argument count (tagged)
+  // Get the arguments boilerplate from the current (global) context into r4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+  __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+  __ cmp(r1, Operand::Zero());
+  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+  // r0 = address of new object (tagged)
+  // r1 = mapped parameter count (tagged)
+  // r2 = argument count (tagged)
+  // r4 = address of boilerplate object (tagged)
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ ldr(r3, FieldMemOperand(r4, i));
+    __ str(r3, FieldMemOperand(r0, i));
+  }
+
+  // Setup the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+  const int kCalleeOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsCalleeIndex * kPointerSize;
+  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  const int kLengthOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsLengthIndex * kPointerSize;
+  __ str(r2, FieldMemOperand(r0, kLengthOffset));
+
+  // Setup the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, r4 will point there, otherwise
+  // it will point to the backing store.
+  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+  // r0 = address of new object (tagged)
+  // r1 = mapped parameter count (tagged)
+  // r2 = argument count (tagged)
+  // r4 = address of parameter map or backing store (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ cmp(r1, Operand(Smi::FromInt(0)));
+  // Move backing store address to r3, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(r3, r4, LeaveCC, eq);
+  __ b(eq, &skip_parameter_map);
+
+  __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+  __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
+  __ add(r6, r1, Operand(Smi::FromInt(2)));
+  __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ add(r6, r4, Operand(r1, LSL, 1));
+  __ add(r6, r6, Operand(kParameterMapHeaderSize));
+  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(r6, r1);
+  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
+  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ sub(r9, r9, Operand(r1));
+  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+  __ add(r3, r4, Operand(r6, LSL, 1));
+  __ add(r3, r3, Operand(kParameterMapHeaderSize));
+
+  // r6 = loop variable (tagged)
+  // r1 = mapping index (tagged)
+  // r3 = address of backing store (tagged)
+  // r4 = address of parameter map (tagged)
+  // r5 = temporary scratch (a.o., for address calculation)
+  // r7 = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ sub(r6, r6, Operand(Smi::FromInt(1)));
+  __ mov(r5, Operand(r6, LSL, 1));
+  __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ str(r9, MemOperand(r4, r5));
+  __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ str(r7, MemOperand(r3, r5));
+  __ add(r9, r9, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ cmp(r6, Operand(Smi::FromInt(0)));
+  __ b(ne, &parameters_loop);
+
+  __ bind(&skip_parameter_map);
+  // r2 = argument count (tagged)
+  // r3 = address of backing store (tagged)
+  // r5 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
+  __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
+  __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ mov(r9, r1);
+  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+  __ sub(r4, r4, Operand(r9, LSL, 1));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ sub(r4, r4, Operand(kPointerSize));
+  __ ldr(r6, MemOperand(r4, 0));
+  __ add(r5, r3, Operand(r9, LSL, 1));
+  __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
+  __ add(r9, r9, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ cmp(r9, Operand(r2));
+  __ b(lt, &arguments_loop);
+
+  // Return and remove the on-stack parameters.
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // r2 = argument count (taggged)
+  __ bind(&runtime);
+  __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
@@ -3834,40 +4174,31 @@
   __ mov(r1, Operand(r1, LSR, kSmiTagSize));
   __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
   __ bind(&add_arguments_object);
-  __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
+  __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
 
   // Do the allocation of both objects in one go.
-  __ AllocateInNewSpace(
-      r1,
-      r0,
-      r2,
-      r3,
-      &runtime,
-      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  __ AllocateInNewSpace(r1,
+                        r0,
+                        r2,
+                        r3,
+                        &runtime,
+                        static_cast<AllocationFlags>(TAG_OBJECT |
+                                                     SIZE_IN_WORDS));
 
   // Get the arguments boilerplate from the current (global) context.
   __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
-  __ ldr(r4, MemOperand(r4,
-                        Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+  __ ldr(r4, MemOperand(r4, Context::SlotOffset(
+      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
 
   // Copy the JS object part.
   __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
 
-  if (type_ == NEW_NON_STRICT) {
-    // Setup the callee in-object property.
-    STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-    __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
-    const int kCalleeOffset = JSObject::kHeaderSize +
-                              Heap::kArgumentsCalleeIndex * kPointerSize;
-    __ str(r3, FieldMemOperand(r0, kCalleeOffset));
-  }
-
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
   __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
-                                 Heap::kArgumentsLengthIndex * kPointerSize));
+      Heap::kArgumentsLengthIndex * kPointerSize));
 
   // If there are no actual arguments, we're done.
   Label done;
@@ -3879,12 +4210,13 @@
 
   // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
-  __ add(r4, r0, Operand(GetArgumentsObjectSize()));
+  __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
   __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
   __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
+  // Untag the length for the loop.
+  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
 
   // Copy the fixed array slots.
   Label loop;
@@ -3907,7 +4239,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
 }
 
 
@@ -3959,8 +4291,7 @@
   // Check that the first argument is a JSRegExp object.
   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &runtime);
+  __ JumpIfSmi(r0, &runtime);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
   __ b(ne, &runtime);
 
@@ -3996,8 +4327,7 @@
   // regexp_data: RegExp data (FixedArray)
   // Check that the second argument is a string.
   __ ldr(subject, MemOperand(sp, kSubjectOffset));
-  __ tst(subject, Operand(kSmiTagMask));
-  __ b(eq, &runtime);
+  __ JumpIfSmi(subject, &runtime);
   Condition is_string = masm->IsObjectStringType(subject, r0);
   __ b(NegateCondition(is_string), &runtime);
   // Get the length of the string to r3.
@@ -4010,8 +4340,7 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &runtime);
+  __ JumpIfNotSmi(r0, &runtime);
   __ cmp(r3, Operand(r0));
   __ b(ls, &runtime);
 
@@ -4020,8 +4349,7 @@
   // regexp_data: RegExp data (FixedArray)
   // Check that the fourth object is a JSArray object.
   __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &runtime);
+  __ JumpIfSmi(r0, &runtime);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
   __ b(ne, &runtime);
   // Check that the JSArray is in fast case.
@@ -4039,6 +4367,8 @@
   __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
   __ b(gt, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ mov(r9, Operand(0));
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
@@ -4046,33 +4376,45 @@
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // First check for flat string.
-  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag !=0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
-  __ b(ne, &runtime);
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(r1, Operand(kExternalStringTag));
+  __ b(lt, &cons_string);
+  __ b(eq, &runtime);
+
+  // String is sliced.
+  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ mov(r9, Operand(r9, ASR, kSmiTagSize));
+  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  // r9: offset of sliced string, smi-tagged.
+  __ jmp(&check_encoding);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
   __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
   __ cmp(r0, r1);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  // Is first part of cons or parent of slice a flat string?
+  __ bind(&check_encoding);
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  // Is first part a flat string?
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
   __ b(ne, &runtime);
-
   __ bind(&seq_string);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -4087,9 +4429,8 @@
 
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
-  __ b(ne, &runtime);
+  // a smi (code flushing support).
+  __ JumpIfSmi(r7, &runtime);
 
   // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r7: code
@@ -4139,15 +4480,24 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
-  // Argument 4 (r3): End of string data
-  // Argument 3 (r2): Start of string data
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, r3: End of string data
+  // Argument 3, r2: Start of string data
+  // Prepare start and end index of the input.
+  __ add(r9, r8, Operand(r9, LSL, r3));
   __ add(r2, r9, Operand(r1, LSL, r3));
-  __ add(r3, r9, Operand(r0, LSL, r3));
+
+  __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
+  __ mov(r8, Operand(r8, ASR, kSmiTagSize));
+  __ add(r3, r9, Operand(r8, LSL, r3));
 
   // Argument 2 (r1): Previous index.
   // Already there
@@ -4184,7 +4534,7 @@
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
   __ ldr(r1, MemOperand(r1, 0));
-  __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
   __ cmp(r0, r1);
@@ -4193,8 +4543,8 @@
   __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
 
   // Check if the exception is a termination. If so, throw as uncatchable.
-  __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
-  __ cmp(r0, ip);
+  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
+
   Label termination_exception;
   __ b(eq, &termination_exception);
 
@@ -4205,7 +4555,7 @@
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ mov(r0, Operand(FACTORY->null_value()));
+  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -4276,11 +4626,12 @@
   const int kMaxInlineLength = 100;
   Label slowcase;
   Label done;
+  Factory* factory = masm->isolate()->factory();
+
   __ ldr(r1, MemOperand(sp, kPointerSize * 2));
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(ne, &slowcase);
+  __ JumpIfNotSmi(r1, &slowcase);
   __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
   __ b(hi, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
@@ -4310,7 +4661,7 @@
   // Interleave operations for better latency.
   __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ add(r3, r0, Operand(JSRegExpResult::kSize));
-  __ mov(r4, Operand(FACTORY->empty_fixed_array()));
+  __ mov(r4, Operand(factory->empty_fixed_array()));
   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
   __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4331,13 +4682,13 @@
   // r5: Number of elements in array, untagged.
 
   // Set map.
-  __ mov(r2, Operand(FACTORY->fixed_array_map()));
+  __ mov(r2, Operand(factory->fixed_array_map()));
   __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   // Set FixedArray length.
   __ mov(r6, Operand(r5, LSL, kSmiTagSize));
   __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
   // Fill contents of fixed-array with the-hole.
-  __ mov(r2, Operand(FACTORY->the_hole_value()));
+  __ mov(r2, Operand(factory->the_hole_value()));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   // Fill fixed array elements with hole.
   // r0: JSArray, tagged.
@@ -4362,32 +4713,24 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  Label slow, non_function;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // function, receiver [, arguments]
-    Label receiver_is_value, receiver_is_js_object;
-    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ JumpIfSmi(r1, &receiver_is_value);
-
-    // Check if the receiver is a valid JS object.
-    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
-    __ b(ge, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(r1);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
-    __ LeaveInternalFrame();
-    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
-    __ bind(&receiver_is_js_object);
+    __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
+    // Call as function is indicated with the hole.
+    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+    __ b(ne, &call);
+    // Patch the receiver on the stack with the global receiver object.
+    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+    __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -4396,7 +4739,7 @@
 
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
-  __ JumpIfSmi(r1, &slow);
+  __ JumpIfSmi(r1, &non_function);
   // Get the map of the function object.
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &slow);
@@ -4404,16 +4747,48 @@
   // Fast-case: Invoke the function now.
   // r1: pushed function
   ParameterCount actual(argc_);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+    __ b(eq, &call_as_function);
+    __ InvokeFunction(r1,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(r1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(ne, &non_function);
+  __ push(r1);  // put proxy as additional argument
+  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
+  __ mov(r2, Operand(0, RelocInfo::NONE));
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+  __ SetCallKind(r5, CALL_AS_FUNCTION);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ str(r1, MemOperand(sp, argc_ * kPointerSize));
   __ mov(r0, Operand(argc_));  // Setup the number of arguments.
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+  __ SetCallKind(r5, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
 }
@@ -4421,16 +4796,9 @@
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
   ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
          (lhs_.is(r1) && rhs_.is(r0)));
-
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-
   const char* cc_name;
   switch (cc_) {
     case lt: cc_name = "LT"; break;
@@ -4441,40 +4809,14 @@
     case ne: cc_name = "NE"; break;
     default: cc_name = "UnknownCondition"; break;
   }
-
-  const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
-  const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
-  const char* strict_name = "";
-  if (strict_ && (cc_ == eq || cc_ == ne)) {
-    strict_name = "_STRICT";
-  }
-
-  const char* never_nan_nan_name = "";
-  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
-    never_nan_nan_name = "_NO_NAN";
-  }
-
-  const char* include_number_compare_name = "";
-  if (!include_number_compare_) {
-    include_number_compare_name = "_NO_NUMBER";
-  }
-
-  const char* include_smi_compare_name = "";
-  if (!include_smi_compare_) {
-    include_smi_compare_name = "_NO_SMI";
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "CompareStub_%s%s%s%s%s%s",
-               cc_name,
-               lhs_name,
-               rhs_name,
-               strict_name,
-               never_nan_nan_name,
-               include_number_compare_name,
-               include_smi_compare_name);
-  return name_;
+  bool is_equality = cc_ == eq || cc_ == ne;
+  stream->Add("CompareStub_%s", cc_name);
+  stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
+  stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
+  if (strict_ && is_equality) stream->Add("_STRICT");
+  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+  if (!include_number_compare_) stream->Add("_NO_NUMBER");
+  if (!include_smi_compare_) stream->Add("_NO_SMI");
 }
 
 
@@ -4499,6 +4841,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -4528,7 +4871,11 @@
   __ b(eq, &flat_string);
 
   // Handle non-flat strings.
-  __ tst(result_, Operand(kIsConsStringMask));
+  __ and_(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(result_, Operand(kExternalStringTag));
+  __ b(gt, &sliced_string);
   __ b(eq, &call_runtime_);
 
   // ConsString.
@@ -4536,22 +4883,34 @@
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
   __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
   __ cmp(result_, Operand(ip));
   __ b(ne, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ add(scratch_, scratch_, result_);
+  __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+  // Check that parent is not an external string. Go to runtime otherwise.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(ne, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ tst(result_, Operand(kStringEncodingMask));
   __ b(ne, &ascii_string);
 
@@ -4587,7 +4946,7 @@
               scratch_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
-              true);
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ Push(object_, index_);
   __ push(index_);  // Consumed by runtime conversion function.
@@ -4684,70 +5043,6 @@
 }
 
 
-class StringHelper : public AllStatic {
- public:
-  // Generate code for copying characters using a simple loop. This should only
-  // be used in places where the number of characters is small and the
-  // additional setup and checking in GenerateCopyCharactersLong adds too much
-  // overhead. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     bool ascii);
-
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharactersLong(MacroAssembler* masm,
-                                         Register dest,
-                                         Register src,
-                                         Register count,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Register scratch4,
-                                         Register scratch5,
-                                         int flags);
-
-
-  // Probe the symbol table for a two character string. If the string is
-  // not found by probing a jump to the label not_found is performed. This jump
-  // does not guarantee that the string is not in the symbol table. If the
-  // string is found the code falls through with the string in register r0.
-  // Contents of both c1 and c2 registers are modified. At the exit c1 is
-  // guaranteed to contain halfword with low and high bytes equal to
-  // initial contents of c1 and c2 respectively.
-  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                                   Register c1,
-                                                   Register c2,
-                                                   Register scratch1,
-                                                   Register scratch2,
-                                                   Register scratch3,
-                                                   Register scratch4,
-                                                   Register scratch5,
-                                                   Label* not_found);
-
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
                                           Register dest,
                                           Register src,
@@ -5000,9 +5295,8 @@
   static const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes];
+  Register candidate = scratch5;  // Scratch register contains candidate.
   for (int i = 0; i < kProbes; i++) {
-    Register candidate = scratch5;  // Scratch register contains candidate.
-
     // Calculate entry in symbol table.
     if (i > 0) {
       __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
@@ -5059,7 +5353,7 @@
   __ jmp(not_found);
 
   // Scratch register contains result when we fall through to here.
-  Register result = scratch;
+  Register result = candidate;
   __ bind(&found_in_symbol_table);
   __ Move(r0, result);
 }
@@ -5069,9 +5363,13 @@
                                     Register hash,
                                     Register character) {
   // hash = character + (character << 10);
-  __ add(hash, character, Operand(character, LSL, 10));
+  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+  // Untag smi seed and add the character.
+  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
+  // hash += hash << 10;
+  __ add(hash, hash, Operand(hash, LSL, 10));
   // hash ^= hash >> 6;
-  __ eor(hash, hash, Operand(hash, ASR, 6));
+  __ eor(hash, hash, Operand(hash, LSR, 6));
 }
 
 
@@ -5083,7 +5381,7 @@
   // hash += hash << 10;
   __ add(hash, hash, Operand(hash, LSL, 10));
   // hash ^= hash >> 6;
-  __ eor(hash, hash, Operand(hash, ASR, 6));
+  __ eor(hash, hash, Operand(hash, LSR, 6));
 }
 
 
@@ -5092,12 +5390,14 @@
   // hash += hash << 3;
   __ add(hash, hash, Operand(hash, LSL, 3));
   // hash ^= hash >> 11;
-  __ eor(hash, hash, Operand(hash, ASR, 11));
+  __ eor(hash, hash, Operand(hash, LSR, 11));
   // hash += hash << 15;
-  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+  __ add(hash, hash, Operand(hash, LSL, 15));
+
+  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
 
   // if (hash == 0) hash = 27;
-  __ mov(hash, Operand(27), LeaveCC, ne);
+  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
 }
 
 
@@ -5124,10 +5424,12 @@
   // Check bounds and smi-ness.
   Register to = r6;
   Register from = r7;
+
   __ Ldrd(to, from, MemOperand(sp, kToOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
   // I.e., arithmetic shift right by one un-smi-tags.
   __ mov(r2, Operand(to, ASR, 1), SetCC);
   __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
@@ -5136,70 +5438,83 @@
   __ b(mi, &runtime);  // From is negative.
 
   // Both to and from are smis.
-
   __ sub(r2, r2, Operand(r3), SetCC);
   __ b(mi, &runtime);  // Fail if from > to.
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
-  // cache). Two character strings are looked for in the symbol cache.
+  // cache). Two character strings are looked for in the symbol cache in
+  // generated code.
   __ cmp(r2, Operand(2));
   __ b(lt, &runtime);
 
-  // r2: length
-  // r3: from index (untaged smi)
+  // r2: result string length
+  // r3: from index (untagged smi)
   // r6 (a.k.a. to): to (smi)
   // r7 (a.k.a. from): from offset (smi)
-
   // Make sure first argument is a sequential (or flat) string.
-  __ ldr(r5, MemOperand(sp, kStringOffset));
+  __ ldr(r0, MemOperand(sp, kStringOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r5, Operand(kSmiTagMask));
-  __ b(eq, &runtime);
-  Condition is_string = masm->IsObjectStringType(r5, r1);
+  __ JumpIfSmi(r0, &runtime);
+  Condition is_string = masm->IsObjectStringType(r0, r1);
   __ b(NegateCondition(is_string), &runtime);
 
+  // Short-cut for the case of trivial substring.
+  Label return_r0;
+  // r0: original string
+  // r2: result string length
+  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
+  __ cmp(r2, Operand(r4, ASR, 1));
+  __ b(eq, &return_r0);
+
+  Label create_slice;
+  if (FLAG_string_slices) {
+    __ cmp(r2, Operand(SlicedString::kMinLength));
+    __ b(ge, &create_slice);
+  }
+
+  // r0: original string
   // r1: instance type
-  // r2: length
+  // r2: result string length
   // r3: from index (untagged smi)
-  // r5: string
   // r6 (a.k.a. to): to (smi)
   // r7 (a.k.a. from): from offset (smi)
   Label seq_string;
   __ and_(r4, r1, Operand(kStringRepresentationMask));
   STATIC_ASSERT(kSeqStringTag < kConsStringTag);
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
   __ cmp(r4, Operand(kConsStringTag));
-  __ b(gt, &runtime);  // External strings go to runtime.
+  __ b(gt, &runtime);  // Slices and external strings go to runtime.
   __ b(lt, &seq_string);  // Sequential strings are handled directly.
 
   // Cons string. Try to recurse (once) on the first substring.
   // (This adds a little more generality than necessary to handle flattened
   // cons strings, but not much).
-  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
-  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset));
+  __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   __ tst(r1, Operand(kStringRepresentationMask));
   STATIC_ASSERT(kSeqStringTag == 0);
-  __ b(ne, &runtime);  // Cons and External strings go to runtime.
+  __ b(ne, &runtime);  // Cons, slices and external strings go to runtime.
 
   // Definitly a sequential string.
   __ bind(&seq_string);
 
-  // r1: instance type.
-  // r2: length
-  // r3: from index (untaged smi)
-  // r5: string
+  // r0: original string
+  // r1: instance type
+  // r2: result string length
+  // r3: from index (untagged smi)
   // r6 (a.k.a. to): to (smi)
   // r7 (a.k.a. from): from offset (smi)
-  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
   __ cmp(r4, Operand(to));
   __ b(lt, &runtime);  // Fail if to > length.
   to = no_reg;
 
-  // r1: instance type.
-  // r2: result string length.
-  // r3: from index (untaged smi)
-  // r5: string.
+  // r0: original string or left hand side of the original cons string.
+  // r1: instance type
+  // r2: result string length
+  // r3: from index (untagged smi)
   // r7 (a.k.a. from): from offset (smi)
   // Check for flat ASCII string.
   Label non_ascii_flat;
@@ -5213,82 +5528,146 @@
 
   // Sub string of length 2 requested.
   // Get the two characters forming the sub string.
-  __ add(r5, r5, Operand(r3));
-  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
-  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
+  __ add(r0, r0, Operand(r3));
+  __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+  __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
 
   // Try to lookup two character string in symbol table.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ jmp(&return_r0);
 
   // r2: result string length.
   // r3: two characters combined into halfword in little endian byte order.
   __ bind(&make_two_character_string);
   __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
   __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
-  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ jmp(&return_r0);
 
   __ bind(&result_longer_than_two);
 
+  // Locate 'from' character of string.
+  __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(from, ASR, 1));
+
   // Allocate the result.
   __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
 
-  // r0: result string.
-  // r2: result string length.
-  // r5: string.
+  // r0: result string
+  // r2: result string length
+  // r5: first character of substring to copy
   // r7 (a.k.a. from): from offset (smi)
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Locate 'from' character of string.
-  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(from, ASR, 1));
 
-  // r0: result string.
-  // r1: first character of result string.
-  // r2: result string length.
-  // r5: first character of sub string to copy.
+  // r0: result string
+  // r1: first character of result string
+  // r2: result string length
+  // r5: first character of substring to copy
   STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
-  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ jmp(&return_r0);
 
   __ bind(&non_ascii_flat);
-  // r2: result string length.
-  // r5: string.
+  // r0: original string
+  // r2: result string length
   // r7 (a.k.a. from): from offset (smi)
   // Check for flat two byte string.
 
+  // Locate 'from' character of string.
+  __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ add(r5, r5, Operand(from));
+
   // Allocate the result.
   __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
 
-  // r0: result string.
-  // r2: result string length.
-  // r5: string.
+  // r0: result string
+  // r2: result string length
+  // r5: first character of substring to copy
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Locate 'from' character of string.
-  __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // As "from" is a smi it is 2 times the value which matches the size of a two
-  // byte character.
-  __ add(r5, r5, Operand(from));
+
   from = no_reg;
 
   // r0: result string.
   // r1: first character of result.
   // r2: result length.
-  // r5: first character of string to copy.
+  // r5: first character of substring to copy.
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(
       masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+  __ jmp(&return_r0);
+
+  if (FLAG_string_slices) {
+    __ bind(&create_slice);
+    // r0: original string
+    // r1: instance type
+    // r2: length
+    // r3: from index (untagged smi)
+    // r6 (a.k.a. to): to (smi)
+    // r7 (a.k.a. from): from offset (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ tst(r1, Operand(kStringRepresentationMask));
+    __ b(eq, &seq_string);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ tst(r1, Operand(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ b(eq, &runtime);
+
+    __ tst(r1, Operand(kSlicedNotConsMask));
+    __ b(ne, &sliced_string);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
+    __ LoadRoot(r9, Heap::kEmptyStringRootIndex);
+    __ cmp(r5, r9);
+    __ b(ne, &runtime);
+    __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+    __ add(r7, r7, r5);
+    __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(r5, r0);
+
+    __ bind(&allocate_slice);
+    // r1: instance type of original string
+    // r2: length
+    // r5: underlying subject string
+    // r7 (a.k.a. from): from offset (smi)
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ tst(r1, Operand(kStringEncodingMask));
+    __ b(eq, &two_byte_slice);
+    __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime);
+    __ bind(&set_slice_header);
+    __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+  }
+
+  __ bind(&return_r0);
   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
@@ -5299,6 +5678,45 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2,
+                                                      Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
+  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ cmp(length, scratch2);
+  __ b(eq, &check_zero_length);
+  __ bind(&strings_not_equal);
+  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(length, Operand(length));
+  __ b(ne, &compare_chars);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, length, scratch2, scratch3,
+                                &strings_not_equal);
+
+  // Characters are equal.
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
@@ -5306,7 +5724,7 @@
                                                         Register scratch2,
                                                         Register scratch3,
                                                         Register scratch4) {
-  Label compare_lengths;
+  Label result_not_equal, compare_lengths;
   // Find minimum length and length difference.
   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
@@ -5318,46 +5736,56 @@
   __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
-  // Untag smi.
-  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+  // Compare loop.
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, min_length, scratch2, scratch4,
+                                &result_not_equal);
 
-  // Setup registers so that we only need to increment one register
-  // in the loop.
-  __ add(scratch2, min_length,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ add(left, left, Operand(scratch2));
-  __ add(right, right, Operand(scratch2));
-  // Registers left and right points to the min_length character of strings.
-  __ rsb(min_length, min_length, Operand(-1));
-  Register index = min_length;
-  // Index starts at -min_length.
-
-  {
-    // Compare loop.
-    Label loop;
-    __ bind(&loop);
-    // Compare characters.
-    __ add(index, index, Operand(1), SetCC);
-    __ ldrb(scratch2, MemOperand(left, index), ne);
-    __ ldrb(scratch4, MemOperand(right, index), ne);
-    // Skip to compare lengths with eq condition true.
-    __ b(eq, &compare_lengths);
-    __ cmp(scratch2, scratch4);
-    __ b(eq, &loop);
-    // Fallthrough with eq condition false.
-  }
-  // Compare lengths -  strings up to min-length are equal.
+  // Compare lengths - strings up to min-length are equal.
   __ bind(&compare_lengths);
   ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
-  // Use zero length_delta as result.
-  __ mov(r0, Operand(length_delta), SetCC, eq);
-  // Fall through to here if characters compare not-equal.
+  // Use length_delta as result if it's zero.
+  __ mov(r0, Operand(length_delta), SetCC);
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
   __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
   __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
   __ Ret();
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch1,
+    Register scratch2,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ add(scratch1, length,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(left, left, Operand(scratch1));
+  __ add(right, right, Operand(scratch1));
+  __ rsb(length, length, Operand::Zero());
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ ldrb(scratch1, MemOperand(left, index));
+  __ ldrb(scratch2, MemOperand(right, index));
+  __ cmp(scratch1, scratch2);
+  __ b(ne, chars_not_equal);
+  __ add(index, index, Operand(1), SetCC);
+  __ b(ne, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -5684,7 +6112,7 @@
 
   if (call_builtin.is_linked()) {
     __ bind(&call_builtin);
-    __ InvokeBuiltin(builtin_id, JUMP_JS);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
   }
 }
 
@@ -5742,8 +6170,7 @@
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
   __ orr(r2, r1, r0);
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(ne, &miss);
+  __ JumpIfNotSmi(r2, &miss);
 
   if (GetCondition() == eq) {
     // For equality we do not care about the sign of the result.
@@ -5767,8 +6194,7 @@
   Label unordered;
   Label miss;
   __ and_(r2, r1, Operand(r0));
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &generic_stub);
+  __ JumpIfSmi(r2, &generic_stub);
 
   __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
   __ b(ne, &miss);
@@ -5810,12 +6236,114 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+  Register tmp1 = r2;
+  Register tmp2 = r3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp1, Operand(tmp2));
+  __ tst(tmp1, Operand(kIsSymbolMask));
+  __ b(eq, &miss);
+
+  // Symbols are compared by identity.
+  __ cmp(left, right);
+  // Make sure r0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(r0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+  Register tmp1 = r2;
+  Register tmp2 = r3;
+  Register tmp3 = r4;
+  Register tmp4 = r5;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ orr(tmp3, tmp1, tmp2);
+  __ tst(tmp3, Operand(kIsNotStringMask));
+  __ b(ne, &miss);
+
+  // Fast check for identical strings.
+  __ cmp(left, right);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+  __ Ret(eq);
+
+  // Handle not identical strings.
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp3, tmp1, Operand(tmp2));
+  __ tst(tmp3, Operand(kIsSymbolMask));
+  // Make sure r0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(r0));
+  __ Ret(ne);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
   __ and_(r2, r1, Operand(r0));
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r2, &miss);
 
   __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
   __ b(ne, &miss);
@@ -5861,12 +6389,8 @@
 
 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
                                     ExternalReference function) {
-  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
-                     RelocInfo::CODE_TARGET));
   __ mov(r2, Operand(function));
-  // Push return address (accessible to GC through exit frame pc).
-  __ str(pc, MemOperand(sp, 0));
-  __ Jump(r2);  // Call the api function.
+  GenerateCall(masm, r2);
 }
 
 
@@ -5875,8 +6399,247 @@
   __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
                      RelocInfo::CODE_TARGET));
   // Push return address (accessible to GC through exit frame pc).
-  __ str(pc, MemOperand(sp, 0));
+  // Note that using pc with str is deprecated.
+  Label start;
+  __ bind(&start);
+  __ add(ip, pc, Operand(Assembler::kInstrSize));
+  __ str(ip, MemOperand(sp, 0));
   __ Jump(target);  // Call the C++ function.
+  ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
+            masm->SizeOfCodeGeneratedSince(&start));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
+    __ sub(index, index, Operand(1));
+    __ and_(index, index, Operand(
+        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+    __ add(tmp, properties, Operand(index, LSL, 1));
+    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    ASSERT(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ cmp(entity_name, tmp);
+    __ b(eq, done);
+
+    if (i != kInlinedProbes - 1) {
+      // Stop if found the property.
+      __ cmp(entity_name, Operand(Handle<String>(name)));
+      __ b(eq, miss);
+
+      // Check if the entry name is not a symbol.
+      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+      __ ldrb(entity_name,
+              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+      __ tst(entity_name, Operand(kIsSymbolMask));
+      __ b(eq, miss);
+
+      // Restore the properties.
+      __ ldr(properties,
+             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+    }
+  }
+
+  const int spill_mask =
+      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
+       r2.bit() | r1.bit() | r0.bit());
+
+  __ stm(db_w, sp, spill_mask);
+  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(r1, Operand(Handle<String>(name)));
+  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ tst(r0, Operand(r0));
+  __ ldm(ia_w, sp, spill_mask);
+
+  __ b(eq, done);
+  __ b(ne, miss);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  // Compute the capacity mask.
+  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
+  __ sub(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ add(scratch2, scratch2, Operand(
+          StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    }
+    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+    // Check if the key is identical to the name.
+    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ cmp(name, Operand(ip));
+    __ b(eq, done);
+  }
+
+  const int spill_mask =
+      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
+       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ stm(db_w, sp, spill_mask);
+  __ Move(r0, elements);
+  __ Move(r1, name);
+  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ tst(r0, Operand(r0));
+  __ mov(scratch2, Operand(r2));
+  __ ldm(ia_w, sp, spill_mask);
+
+  __ b(ne, done);
+  __ b(eq, miss);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Registers:
+  //  result: StringDictionary to probe
+  //  r1: key
+  //  : StringDictionary to probe.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = r0;
+  Register dictionary = r0;
+  Register key = r1;
+  Register index = r2;
+  Register mask = r3;
+  Register hash = r4;
+  Register undefined = r5;
+  Register entry_key = r6;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+  __ sub(mask, mask, Operand(1));
+
+  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ add(index, hash, Operand(
+          StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    } else {
+      __ mov(index, Operand(hash));
+    }
+    __ and_(index, mask, Operand(index, LSR, String::kHashShift));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
+
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ add(index, dictionary, Operand(index, LSL, 2));
+    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ cmp(entry_key, Operand(undefined));
+    __ b(eq, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmp(entry_key, Operand(key));
+    __ b(eq, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a symbol.
+      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ ldrb(entry_key,
+              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ tst(entry_key, Operand(kIsSymbolMask));
+      __ b(eq, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result, Operand::Zero());
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ mov(result, Operand(1));
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ mov(result, Operand::Zero());
+  __ Ret();
 }
 
 
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index d82afc7..cdea03e 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,41 +58,91 @@
 };
 
 
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
-  void Generate(MacroAssembler* masm);
+  UnaryOpStub(Token::Value op,
+              UnaryOverwriteMode mode,
+              UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
+      : op_(op),
+        mode_(mode),
+        operand_type_(operand_type) {
+  }
 
  private:
-  Register tos_;
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return tos_.code(); }
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  virtual void PrintName(StringStream* stream);
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED) {
     use_vfp3_ = CpuFeatures::IsSupported(VFP3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_vfp3_(VFP3Bits::decode(key)),
         operands_type_(operands_type),
-        result_type_(result_type),
-        name_(NULL) { }
+        result_type_(result_type) { }
 
  private:
   enum SmiCodeGenerateHeapNumberResults {
@@ -105,32 +155,19 @@
   bool use_vfp3_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
-           "(mode %d, runtime_type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class VFP3Bits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -158,6 +195,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -172,21 +210,85 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
 };
 
 
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
@@ -240,8 +342,7 @@
  public:
   StringCompareStub() { }
 
-  // Compare two flat ASCII strings and returns result in r0.
-  // Does not use the stack.
+  // Compares two flat ASCII strings and returns result in r0.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -250,11 +351,27 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in r0.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register length,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* chars_not_equal);
 };
 
 
@@ -289,12 +406,6 @@
   }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
 };
 
 
@@ -321,8 +432,6 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "NumberToStringStub"; }
 };
 
 
@@ -340,8 +449,6 @@
   int MinorKey() { return 0; }
 
   bool NeedsImmovableCode() { return true; }
-
-  const char* GetName() { return "RegExpCEntryStub"; }
 };
 
 
@@ -362,8 +469,197 @@
   int MinorKey() { return 0; }
 
   bool NeedsImmovableCode() { return true; }
+};
 
-  const char* GetName() { return "DirectCEntryStub"; }
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  enum Destination {
+    kVFPRegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will still be scratched. If
+  // either r0 or r1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with r0 and r1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm,
+                                   Register object,
+                                   Register dst,
+                                   Register heap_number_map,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   DwVfpRegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+  // the result either in |double_dst| or |dst2:dst1|, depending on
+  // |destination|.
+  // Warning: The value in |int_scratch| will be changed in the process!
+  static void ConvertIntToDouble(MacroAssembler* masm,
+                                 Register int_scratch,
+                                 Destination destination,
+                                 DwVfpRegister double_dst,
+                                 Register dst1,
+                                 Register dst2,
+                                 Register scratch2,
+                                 SwVfpRegister single_scratch);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm,
+                                      Register object,
+                                      Destination destination,
+                                      DwVfpRegister double_dst,
+                                      Register dst1,
+                                      Register dst2,
+                                      Register heap_number_map,
+                                      Register scratch1,
+                                      Register scratch2,
+                                      SwVfpRegister single_scratch,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when VFP3 is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm,
+                                Register object,
+                                Register dst,
+                                Register heap_number_map,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                DwVfpRegister double_scratch,
+                                Label* not_int32);
+
+  // Generate non VFP3 code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm,
+                                   Register src1,
+                                   Register src2,
+                                   Register dst,
+                                   Register scratch,
+                                   Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when VFP3 is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in r0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+                                          Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         DwVfpRegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return LookupModeBits::encode(mode_);
+  }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  LookupMode mode_;
 };
 
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 01aa805..d27982a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -58,9 +58,7 @@
   // Print the code after compiling it.
   static void PrintCode(Handle<Code> code, CompilationInfo* info);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
-#endif
 
   static void SetFunctionInfo(Handle<JSFunction> fun,
                               FunctionLiteral* lit,
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 6c82c12..d4f251f 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -35,7 +35,7 @@
 namespace v8 {
 namespace internal {
 
-int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 16;
 
 
 int Deoptimizer::patch_size() {
@@ -44,12 +44,6 @@
 }
 
 
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
-  // Nothing to do. No new relocation information is written for lazy
-  // deoptimization on ARM.
-}
-
-
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
   HandleScope scope;
   AssertNoAllocation no_allocation;
@@ -58,59 +52,38 @@
 
   // Get the optimized code.
   Code* code = function->code();
+  Address code_start_address = code->instruction_start();
 
   // Invalidate the relocation information, as it will become invalid by the
   // code patching below, and is not needed any more.
   code->InvalidateRelocation();
 
-  // For each return after a safepoint insert an absolute call to the
-  // corresponding deoptimization entry.
-  unsigned last_pc_offset = 0;
-  SafepointTable table(function->code());
-  for (unsigned i = 0; i < table.length(); i++) {
-    unsigned pc_offset = table.GetPcOffset(i);
-    SafepointEntry safepoint_entry = table.GetEntry(i);
-    int deoptimization_index = safepoint_entry.deoptimization_index();
-    int gap_code_size = safepoint_entry.gap_code_size();
-    // Check that we did not shoot past next safepoint.
-    CHECK(pc_offset >= last_pc_offset);
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
 #ifdef DEBUG
-    // Destroy the code which is not supposed to be run again.
-    int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
-    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                          instructions);
-    for (int x = 0; x < instructions; x++) {
-      destroyer.masm()->bkpt(0);
-    }
+  Address prev_call_address = NULL;
 #endif
-    last_pc_offset = pc_offset;
-    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
-          deoptimization_index, Deoptimizer::LAZY);
-      last_pc_offset += gap_code_size;
-      int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
-                                                        RelocInfo::NONE);
-      int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
-      ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
-      ASSERT(call_size_in_bytes <= patch_size());
-      CodePatcher patcher(code->instruction_start() + last_pc_offset,
-                          call_size_in_words);
-      patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
-      last_pc_offset += call_size_in_bytes;
-    }
-  }
-
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+                                                      RelocInfo::NONE);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+    ASSERT(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
 
 #ifdef DEBUG
-  // Destroy the code which is not supposed to be run again.
-  int instructions =
-      (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
-  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                        instructions);
-  for (int x = 0; x < instructions; x++) {
-    destroyer.masm()->bkpt(0);
-  }
+    prev_call_address = call_address;
 #endif
+  }
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
@@ -125,11 +98,6 @@
     PrintF("[forced deoptimization: ");
     function->PrintName();
     PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
-    if (FLAG_print_code) {
-      code->PrintLn();
-    }
-#endif
   }
 }
 
@@ -270,6 +238,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -385,6 +356,9 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -519,7 +493,7 @@
 
 
   // Set the continuation for the topmost frame.
-  if (is_topmost) {
+  if (is_topmost && bailout_type_ != DEBUGGER) {
     Builtins* builtins = isolate_->builtins();
     Code* continuation = (bailout_type_ == EAGER)
         ? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -527,14 +501,32 @@
     output_frame->SetContinuation(
         reinterpret_cast<uint32_t>(continuation->entry()));
   }
+}
 
-  if (output_count_ - 1 == frame_index) iterator->Done();
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+  }
 }
 
 
 #define __ masm()->
 
-
 // This code tries to be close to ia32 code so that any changes can be
 // easily ported.
 void Deoptimizer::EntryGenerator::Generate() {
@@ -552,15 +544,25 @@
   const int kDoubleRegsSize =
       kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
 
-  // Save all general purpose registers before messing with them.
-  __ sub(sp, sp, Operand(kDoubleRegsSize));
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
-    DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
-    __ vstr(vfp_reg, sp, offset);
+  // Save all VFP registers before messing with them.
+  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+  DwVfpRegister last =
+      DwVfpRegister::FromAllocationIndex(
+          DwVfpRegister::kNumAllocatableRegisters - 1);
+  ASSERT(last.code() > first.code());
+  ASSERT((last.code() - first.code()) ==
+      (DwVfpRegister::kNumAllocatableRegisters - 1));
+#ifdef DEBUG
+  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
+    ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+           (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
   }
+#endif
+  __ vstm(db_w, sp, first, last);
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
+  // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
+  // handle this a bit differently.
   __ stm(db_w, sp, restored_regs  | sp.bit() | lr.bit() | pc.bit());
 
   const int kSavedRegistersAreaSize =
@@ -702,9 +704,7 @@
   __ pop(ip);  // remove sp
   __ pop(ip);  // remove lr
 
-  // Set up the roots register.
-  ExternalReference roots_address = ExternalReference::roots_address(isolate);
-  __ mov(r10, Operand(roots_address));
+  __ InitializeRootRegister();
 
   __ pop(ip);  // remove pc
   __ pop(r7);  // get continuation, leave pc on stack
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index a3775b5..603b3cf 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -200,7 +200,7 @@
 
 // These shift names are defined in a way to match the native disassembler
 // formatting. See for example the command "objdump -d <binary file>".
-static const char* shift_names[kNumberOfShifts] = {
+static const char* const shift_names[kNumberOfShifts] = {
   "lsl", "lsr", "asr", "ror"
 };
 
@@ -502,13 +502,16 @@
         ASSERT(STRING_STARTS_WITH(format, "memop"));
         if (instr->HasL()) {
           Print("ldr");
-        } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
-          if (instr->Bits(7, 4) == 0xf) {
-            Print("strd");
-          } else {
-            Print("ldrd");
-          }
         } else {
+          if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+              (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+            if (instr->Bit(5) == 1) {
+              Print("strd");
+            } else {
+              Print("ldrd");
+            }
+            return 5;
+          }
           Print("str");
         }
         return 5;
@@ -1086,10 +1089,10 @@
         }
       } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
         // vabs
-        Format(instr, "vabs'cond 'Dd, 'Dm");
+        Format(instr, "vabs.f64'cond 'Dd, 'Dm");
       } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
         // vneg
-        Format(instr, "vneg'cond 'Dd, 'Dm");
+        Format(instr, "vneg.f64'cond 'Dd, 'Dm");
       } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 84e108b..26bbd82 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,10 +93,11 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset  = 0 * kPointerSize;
-  static const int kStateOffset = 1 * kPointerSize;
-  static const int kFPOffset    = 2 * kPointerSize;
-  static const int kPCOffset    = 3 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kStateOffset   = 1 * kPointerSize;
+  static const int kContextOffset = 2 * kPointerSize;
+  static const int kFPOffset      = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
   static const int kSize = kPCOffset + kPointerSize;
 };
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 871b453..50ed8b1 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -46,6 +46,11 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -86,17 +91,19 @@
   }
 
   void EmitPatchInfo() {
-    int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
-    Register reg;
-    reg.set_code(delta_to_patch_site / kOff12Mask);
-    __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+      Register reg;
+      reg.set_code(delta_to_patch_site / kOff12Mask);
+      __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
 #ifdef DEBUG
-    info_emitted_ = true;
+      info_emitted_ = true;
 #endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
   }
 
-  bool is_bound() const { return patch_site_.is_bound(); }
-
  private:
   MacroAssembler* masm_;
   Label patch_site_;
@@ -123,6 +130,7 @@
 void FullCodeGenerator::Generate(CompilationInfo* info) {
   ASSERT(info_ == NULL);
   info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -133,7 +141,21 @@
   }
 #endif
 
-  int locals_count = scope()->num_stack_slots();
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (info->is_strict_mode() || info->is_native()) {
+    Label ok;
+    __ cmp(r5, Operand(0));
+    __ b(eq, &ok);
+    int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ str(r2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  int locals_count = info->scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
   if (locals_count > 0) {
@@ -153,7 +175,7 @@
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is in r1.
@@ -162,23 +184,23 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in both r0 and cp.  It replaces the context
     // passed to us.  It's saved in the stack and kept live in cp.
     __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
     // Copy any necessary parameters into the context.
-    int num_parameters = scope()->num_parameters();
+    int num_parameters = info->scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+        __ mov(r1, Operand(Context::SlotOffset(var->index())));
         __ str(r0, MemOperand(cp, r1));
         // Update the write barrier. This clobbers all involved
         // registers, so we have to use two more registers to avoid
@@ -200,28 +222,29 @@
       __ mov(r3, r1);
     }
     // Receiver is just before the parameters on the caller's stack.
-    int offset = scope()->num_parameters() * kPointerSize;
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
     __ add(r2, fp,
            Operand(StandardFrameConstants::kCallerSPOffset + offset));
-    __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+    __ mov(r1, Operand(Smi::FromInt(num_parameters)));
     __ Push(r3, r2, r1);
 
     // Arguments to ArgumentsAccessStub:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub stub(
-        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
-                         : ArgumentsAccessStub::NEW_NON_STRICT);
+    ArgumentsAccessStub::Type type;
+    if (is_strict_mode()) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+    }
+    ArgumentsAccessStub stub(type);
     __ CallStub(&stub);
 
-    Variable* arguments_shadow = scope()->arguments_shadow();
-    if (arguments_shadow != NULL) {
-      // Duplicate the value; move-to-slot operation might clobber registers.
-      __ mov(r3, r0);
-      Move(arguments_shadow->AsSlot(), r3, r1, r2);
-    }
-    Move(arguments->AsSlot(), r0, r1, r2);
+    SetVar(arguments, r0, r1, r2);
   }
 
   if (FLAG_trace) {
@@ -235,17 +258,19 @@
     scope()->VisitIllegalRedeclaration(this);
 
   } else {
+    PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
     { Comment cmnt(masm_, "[ Declarations");
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
       Label ok;
       __ LoadRoot(ip, Heap::kStackLimitRootIndex);
       __ cmp(sp, Operand(ip));
@@ -325,7 +350,7 @@
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       // Here we use masm_-> instead of the __ macro to avoid the code coverage
       // tool from instrumenting as we rely on the code size here.
-      int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+      int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
       CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
       __ RecordJSReturn();
       masm_->mov(sp, fp);
@@ -344,26 +369,30 @@
 }
 
 
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  codegen()->Move(result_register(), slot);
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
 }
 
 
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  codegen()->Move(result_register(), slot);
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
   __ push(result_register());
 }
 
 
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
-  codegen()->Move(result_register(), slot);
+  codegen()->GetVar(result_register(), var);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -397,7 +426,7 @@
     if (true_label_ != fall_through_) __ b(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
-    codegen()->DoTest(true_label_, false_label_, fall_through_);
+    codegen()->DoTest(this);
   }
 }
 
@@ -444,7 +473,7 @@
   } else {
     // For simplicity we always test the accumulator register.
     __ mov(result_register(), Operand(lit));
-    codegen()->DoTest(true_label_, false_label_, fall_through_);
+    codegen()->DoTest(this);
   }
 }
 
@@ -480,7 +509,7 @@
   __ Drop(count);
   __ Move(result_register(), reg);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -558,27 +587,11 @@
 }
 
 
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+                               Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
   if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    // Emit the inlined tests assumed by the stub.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_false);
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_true);
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_false);
-    STATIC_ASSERT(kSmiTag == 0);
-    __ tst(result_register(), result_register());
-    __ b(eq, if_false);
-    __ JumpIfSmi(result_register(), if_true);
-
-    // Call the ToBoolean stub for all other cases.
     ToBooleanStub stub(result_register());
     __ CallStub(&stub);
     __ tst(result_register(), result_register());
@@ -590,8 +603,6 @@
     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
     __ cmp(r0, ip);
   }
-
-  // The stub returns nonzero for true.
   Split(ne, if_true, if_false, fall_through);
 }
 
@@ -611,45 +622,54 @@
 }
 
 
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-    case Slot::LOCAL:
-      return MemOperand(fp, SlotOffset(slot));
-    case Slot::CONTEXT: {
-      int context_chain_length =
-          scope()->ContextChainLength(slot->var()->scope());
-      __ LoadContext(scratch, context_chain_length);
-      return ContextOperand(scratch, slot->index());
-    }
-    case Slot::LOOKUP:
-      UNREACHABLE();
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  ASSERT(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
   }
-  UNREACHABLE();
-  return MemOperand(r0, 0);
+  return MemOperand(fp, offset);
 }
 
 
-void FullCodeGenerator::Move(Register destination, Slot* source) {
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
   // Use destination as scratch.
-  MemOperand slot_operand = EmitSlotSearch(source, destination);
-  __ ldr(destination, slot_operand);
+  MemOperand location = VarOperand(var, dest);
+  __ ldr(dest, location);
 }
 
 
-void FullCodeGenerator::Move(Slot* dst,
-                             Register src,
-                             Register scratch1,
-                             Register scratch2) {
-  ASSERT(dst->type() != Slot::LOOKUP);  // Not yet implemented.
-  ASSERT(!scratch1.is(src) && !scratch2.is(src));
-  MemOperand location = EmitSlotSearch(dst, scratch1);
+void FullCodeGenerator::SetVar(Variable* var,
+                               Register src,
+                               Register scratch0,
+                               Register scratch1) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  ASSERT(!scratch0.is(src));
+  ASSERT(!scratch0.is(scratch1));
+  ASSERT(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
   // Emit the write barrier code if the location is in the heap.
-  if (dst->type() == Slot::CONTEXT) {
-    __ RecordWrite(scratch1,
-                   Operand(Context::SlotOffset(dst->index())),
-                   scratch2,
+  if (var->IsContextSlot()) {
+    __ RecordWrite(scratch0,
+                   Operand(Context::SlotOffset(var->index())),
+                   scratch1,
                    src);
   }
 }
@@ -682,127 +702,103 @@
 }
 
 
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
+void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
                                         Variable::Mode mode,
-                                        FunctionLiteral* function) {
-  Comment cmnt(masm_, "[ Declaration");
-  ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
+                                        FunctionLiteral* function,
+                                        int* global_count) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      ++(*global_count);
+      break;
 
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ str(ip, MemOperand(fp, SlotOffset(slot)));
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a 'with'.
-          __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
-          __ cmp(r1, cp);
-          __ Check(eq, "Unexpected declaration in current context.");
-        }
-        if (mode == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ str(ip, ContextOperand(cp, slot->index()));
-          // No write barrier since the_hole_value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ str(result_register(), ContextOperand(cp, slot->index()));
-          int offset = Context::SlotOffset(slot->index());
-          // We know that we have written a function, which is not a smi.
-          __ mov(r1, Operand(cp));
-          __ RecordWrite(r1, Operand(offset), r2, result_register());
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ mov(r2, Operand(variable->name()));
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR ||
-               mode == Variable::CONST);
-        PropertyAttributes attr =
-            (mode == Variable::VAR) ? NONE : READ_ONLY;
-        __ mov(r1, Operand(Smi::FromInt(attr)));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-          __ Push(cp, r2, r1, r0);
-        } else if (function != NULL) {
-          __ Push(cp, r2, r1);
-          // Push initial value for function declaration.
-          VisitForStackValue(function);
-        } else {
-          __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
-          __ Push(cp, r2, r1, r0);
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (function != NULL) {
+        Comment cmnt(masm_, "[ Declaration");
+        VisitForAccumulatorValue(function);
+        __ str(result_register(), StackOperand(variable));
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ str(ip, StackOperand(variable));
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We
-      // cannot visit the rewrite because it's shared and we risk
-      // recording duplicate AST IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+    case Variable::CONTEXT:
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+        __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+        __ Check(ne, "Declaration in with context.");
+        __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+        __ Check(ne, "Declaration in catch context.");
       }
       if (function != NULL) {
-        __ push(r0);
+        Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
-        __ pop(r2);
-      } else {
-        __ mov(r2, r0);
-        __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+        __ str(result_register(), ContextOperand(cp, variable->index()));
+        int offset = Context::SlotOffset(variable->index());
+        // We know that we have written a function, which is not a smi.
+        __ mov(r1, Operand(cp));
+        __ RecordWrite(r1, Operand(offset), r2, result_register());
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ str(ip, ContextOperand(cp, variable->index()));
+        // No write barrier since the_hole_value is in old space.
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       }
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
+      break;
 
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
-      // Value in r0 is ignored (declarations are statements).
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Declaration");
+      __ mov(r2, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ mov(r1, Operand(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (function != NULL) {
+        __ Push(cp, r2, r1);
+        // Push initial value for function declaration.
+        VisitForStackValue(function);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, r2, r1, r0);
+      } else {
+        __ mov(r0, Operand(Smi::FromInt(0)));  // Indicates no initial value.
+        __ Push(cp, r2, r1, r0);
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
     }
   }
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
-  __ mov(r2, Operand(pairs));
-  __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
-  __ Push(cp, r2, r1, r0);
-  __ CallRuntime(Runtime::kDeclareGlobals, 4);
+  __ mov(r1, Operand(pairs));
+  __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+  __ Push(cp, r1, r0);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -857,7 +853,9 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    patch_site.EmitPatchInfo();
+
     __ cmp(r0, Operand(0));
     __ b(ne, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -869,7 +867,7 @@
   __ bind(&next_test);
   __ Drop(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
-    __ b(nested_statement.break_target());
+    __ b(nested_statement.break_label());
   } else {
     __ b(default_clause->body_target());
   }
@@ -883,7 +881,7 @@
     VisitStatements(clause->statements());
   }
 
-  __ bind(nested_statement.break_target());
+  __ bind(nested_statement.break_label());
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
@@ -911,11 +909,11 @@
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(r0, &convert);
-  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
-  __ b(hs, &done_convert);
+  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+  __ b(ge, &done_convert);
   __ bind(&convert);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(r0);
 
@@ -943,9 +941,8 @@
   // check for an enum cache.  Leave the map in r2 for the subsequent
   // prototype load.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
-  __ cmp(r3, empty_descriptor_array_value);
-  __ b(eq, &call_runtime);
+  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(r3, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (r3).  This is the case if the next enumeration
@@ -990,7 +987,7 @@
 
   // We got a map in register r0. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(r0, r1);
   __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
   __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -1015,7 +1012,7 @@
   // Load the current count to r0, load the length to r1.
   __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
   __ cmp(r0, r1);  // Compare to the array length.
-  __ b(hs, loop_statement.break_target());
+  __ b(hs, loop_statement.break_label());
 
   // Get the current entry of the array into register r3.
   __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
@@ -1039,9 +1036,9 @@
   // just skip it.
   __ push(r1);  // Enumerable.
   __ push(r3);  // Current entry.
-  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
   __ mov(r3, Operand(r0), SetCC);
-  __ b(eq, loop_statement.continue_target());
+  __ b(eq, loop_statement.continue_label());
 
   // Update the 'each' property or variable from the possibly filtered
   // entry in register r3.
@@ -1057,7 +1054,7 @@
 
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   __ pop(r0);
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
@@ -1066,7 +1063,7 @@
   __ b(&loop);
 
   // Remove the pointers stored on the stack.
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   __ Drop(5);
 
   // Exit and decrement the loop depth.
@@ -1105,101 +1102,13 @@
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var());
+  EmitVariableLoad(expr);
 }
 
 
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Label* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  Register context = cp;
-  Register next = r3;
-  Register temp = r4;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
-        __ tst(temp, temp);
-        __ b(ne, slow);
-      }
-      __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is NULL.
-  __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
-  __ tst(temp, temp);
-  __ b(ne, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextOperand(context, slot->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow,
-    Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    __ jmp(done);
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      __ ldr(r0, ContextSlotOperandCheckExtensions(potential_slot, slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-        __ cmp(r0, ip);
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      }
-      __ jmp(done);
-    } else if (rewrite != NULL) {
-      // Generate fast case for calls of an argument function.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          __ ldr(r1,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   slow));
-          __ mov(r0, Operand(key_literal->handle()));
-          Handle<Code> ic =
-              isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
-          __ jmp(done);
-        }
-      }
-    }
-  }
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
   Register current = cp;
   Register next = r1;
   Register temp = r2;
@@ -1214,8 +1123,7 @@
         __ b(ne, slow);
       }
       // Load next context in chain.
-      __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
-      __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
       // Walk the rest of the chain without clobbering cp.
       current = next;
     }
@@ -1241,97 +1149,137 @@
     __ tst(temp, temp);
     __ b(ne, slow);
     // Load next context in chain.
-    __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
-    __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+    __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
     __ b(&loop);
     __ bind(&fast);
   }
 
   __ ldr(r0, GlobalObjectOperand());
-  __ mov(r2, Operand(slot->var()->name()));
+  __ mov(r2, Operand(var->name()));
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, mode);
+  __ Call(ic, mode);
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  // Four cases: non-this global variables, lookup slots, all other
-  // types of slots, and parameters that rewrite to explicit property
-  // accesses on the arguments object.
-  Slot* slot = var->AsSlot();
-  Property* property = var->AsProperty();
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  ASSERT(var->IsContextSlot());
+  Register context = cp;
+  Register next = r3;
+  Register temp = r4;
 
-  if (var->is_global() && !var->is_this()) {
-    Comment cmnt(masm_, "Global variable");
-    // Use inline caching. Variable name is passed in r2 and the global
-    // object (receiver) in r0.
-    __ ldr(r0, GlobalObjectOperand());
-    __ mov(r2, Operand(var->name()));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    context()->Plug(r0);
-
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    Label done, slow;
-
-    // Generate code for loading from variables potentially shadowed
-    // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    Comment cmnt(masm_, "Lookup slot");
-    __ mov(r1, Operand(var->name()));
-    __ Push(cp, r1);  // Context and name.
-    __ CallRuntime(Runtime::kLoadContextSlot, 2);
-    __ bind(&done);
-
-    context()->Plug(r0);
-
-  } else if (slot != NULL) {
-    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
-                            ? "Context slot"
-                            : "Stack slot");
-    if (var->mode() == Variable::CONST) {
-      // Constants may be the hole value if they have not been initialized.
-      // Unhole them.
-      MemOperand slot_operand = EmitSlotSearch(slot, r0);
-      __ ldr(r0, slot_operand);
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(r0, ip);
-      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      context()->Plug(r0);
-    } else {
-      context()->Plug(slot);
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ tst(temp, temp);
+        __ b(ne, slow);
+      }
+      __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
     }
-  } else {
-    Comment cmnt(masm_, "Rewritten parameter");
-    ASSERT_NOT_NULL(property);
-    // Rewritten parameter accesses are of the form "slot[literal]".
+  }
+  // Check that last extension is NULL.
+  __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ tst(temp, temp);
+  __ b(ne, slow);
 
-    // Assert that the object is in a slot.
-    Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
-    ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, var->index());
+}
 
-    // Load the object.
-    Move(r1, object_slot);
 
-    // Assert that the key is a smi.
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    ASSERT(key_literal->handle()->IsSmi());
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+                                                  TypeofState typeof_state,
+                                                  Label* slow,
+                                                  Label* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    __ jmp(done);
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == Variable::CONST) {
+      __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+    }
+    __ jmp(done);
+  }
+}
 
-    // Load the key.
-    __ mov(r0, Operand(key_literal->handle()));
 
-    // Call keyed load IC. It has arguments key and receiver in r0 and r1.
-    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
-    context()->Plug(r0);
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
+
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "Global variable");
+      // Use inline caching. Variable name is passed in r2 and the global
+      // object (receiver) in r0.
+      __ ldr(r0, GlobalObjectOperand());
+      __ mov(r2, Operand(var->name()));
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      context()->Plug(r0);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot()
+                              ? "Context variable"
+                              : "Stack variable");
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        GetVar(r0, var);
+        __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+        if (var->mode() == Variable::LET) {
+          Label done;
+          __ b(ne, &done);
+          __ mov(r0, Operand(var->name()));
+          __ push(r0);
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          __ bind(&done);
+        } else {
+          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+        }
+        context()->Plug(r0);
+      }
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      Comment cmnt(masm_, "Lookup variable");
+      __ mov(r1, Operand(var->name()));
+      __ Push(cp, r1);  // Context and name.
+      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      __ bind(&done);
+      context()->Plug(r0);
+    }
   }
 }
 
@@ -1438,8 +1386,10 @@
             VisitForAccumulatorValue(value);
             __ mov(r2, Operand(key->handle()));
             __ ldr(r1, MemOperand(sp));
-            Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1570,7 +1520,7 @@
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* property = expr->target()->AsProperty();
@@ -1596,27 +1546,13 @@
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
-          __ push(r0);
-          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForAccumulatorValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ ldr(r1, MemOperand(sp, 0));
         __ push(r0);
       } else {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
-          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
-          __ Push(r1, r0);
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForStackValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
   }
@@ -1627,7 +1563,7 @@
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
-          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          EmitVariableLoad(expr->target()->AsVariableProxy());
           PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
@@ -1651,13 +1587,13 @@
     SetSourcePosition(expr->position() + 1);
     AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
 
     // Deoptimization point in case the binary operation may have side effects.
@@ -1693,7 +1629,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1701,11 +1637,11 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left_expr,
@@ -1727,14 +1663,15 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(op, mode);
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
   __ jmp(&done);
 
   __ bind(&smi_case);
   // Smi case. This code works the same way as the smi-smi case in the type
   // recording binary operation stub, see
-  // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
+  // BinaryOpStub::GenerateSmiSmiOperation for comments.
   switch (op) {
     case Token::SAR:
       __ b(&stub_call);
@@ -1804,11 +1741,14 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(r1);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);
+  BinaryOpStub stub(op, mode);
+  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
   context()->Plug(r0);
 }
 
@@ -1822,7 +1762,7 @@
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->AsProperty();
@@ -1848,30 +1788,20 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ Call(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(r0);  // Preserve value.
-      if (prop->is_synthetic()) {
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        { AccumulatorValueContext for_object(this);
-          EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
-        }
-        __ mov(r2, r0);
-        __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-        __ mov(r1, r0);
-        __ pop(r2);
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ mov(r1, r0);
+      __ pop(r2);
       __ pop(r0);  // Restore value.
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ Call(ic);
       break;
     }
   }
@@ -1882,96 +1812,93 @@
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  // Left-hand sides that rewrite to explicit property accesses do not reach
-  // here.
-  ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->AsSlot() != NULL);
-
-  if (var->is_global()) {
-    ASSERT(!var->is_this());
-    // Assignment to a global variable.  Use inline caching for the
-    // assignment.  Right-hand-side value is passed in r0, variable name in
-    // r2, and the global object in r1.
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, GlobalObjectOperand());
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
-    // Like var declarations, const declarations are hoisted to function
-    // scope.  However, unlike var initializers, const initializers are able
-    // to drill a hole to that function context, even from inside a 'with'
-    // context.  We thus bypass the normal static scope lookup.
-    Slot* slot = var->AsSlot();
-    Label skip;
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-        // No const parameters.
-        UNREACHABLE();
-        break;
-      case Slot::LOCAL:
-        // Detect const reinitialization by checking for the hole value.
-        __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
-        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-        __ cmp(r1, ip);
-        __ b(ne, &skip);
-        __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
-        break;
-      case Slot::CONTEXT: {
-        __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
-        __ ldr(r2, ContextOperand(r1, slot->index()));
-        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-        __ cmp(r2, ip);
-        __ b(ne, &skip);
-        __ str(r0, ContextOperand(r1, slot->index()));
-        int offset = Context::SlotOffset(slot->index());
-        __ mov(r3, r0);  // Preserve the stored value in r0.
-        __ RecordWrite(r1, Operand(offset), r3, r2);
-        break;
-      }
-      case Slot::LOOKUP:
-        __ push(r0);
-        __ mov(r0, Operand(slot->var()->name()));
-        __ Push(cp, r0);  // Context and name.
-        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-        break;
+    // Const initializers need a write barrier.
+    ASSERT(!var->IsParameter());  // No const parameters.
+    if (var->IsStackLocal()) {
+      Label skip;
+      __ ldr(r1, StackOperand(var));
+      __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+      __ b(ne, &skip);
+      __ str(result_register(), StackOperand(var));
+      __ bind(&skip);
+    } else {
+      ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+      // Like var declarations, const declarations are hoisted to function
+      // scope.  However, unlike var initializers, const initializers are
+      // able to drill a hole to that function context, even from inside a
+      // 'with' context.  We thus bypass the normal static scope lookup for
+      // var->IsContextSlot().
+      __ push(r0);
+      __ mov(r0, Operand(var->name()));
+      __ Push(cp, r0);  // Context and name.
+      __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
-    __ bind(&skip);
 
-  } else if (var->mode() != Variable::CONST) {
-    // Perform the assignment for non-const variables.  Const assignments
-    // are simply skipped.
-    Slot* slot = var->AsSlot();
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        // Perform the assignment.
-        __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
-        break;
-
-      case Slot::CONTEXT: {
-        MemOperand target = EmitSlotSearch(slot, r1);
-        // Perform the assignment and issue the write barrier.
-        __ str(result_register(), target);
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    if (var->IsLookupSlot()) {
+      __ push(r0);  // Value.
+      __ mov(r1, Operand(var->name()));
+      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ Push(cp, r1, r0);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
+    } else {
+      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      Label assign;
+      MemOperand location = VarOperand(var, r1);
+      __ ldr(r3, location);
+      __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+      __ b(ne, &assign);
+      __ mov(r3, Operand(var->name()));
+      __ push(r3);
+      __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      // Perform the assignment.
+      __ bind(&assign);
+      __ str(result_register(), location);
+      if (var->IsContextSlot()) {
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
-        int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+        int offset = Context::SlotOffset(var->index());
         __ RecordWrite(r1, Operand(offset), r2, r3);
-        break;
       }
+    }
 
-      case Slot::LOOKUP:
-        // Call the runtime for the assignment.
-        __ push(r0);  // Value.
-        __ mov(r1, Operand(slot->var()->name()));
-        __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
-        __ Push(cp, r1, r0);  // Context, name, strict mode.
-        __ CallRuntime(Runtime::kStoreContextSlot, 4);
-        break;
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
+    if (var->IsStackAllocated() || var->IsContextSlot()) {
+      MemOperand location = VarOperand(var, r1);
+      if (FLAG_debug_code && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ ldr(r2, location);
+        __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+        __ Check(eq, "Let binding re-initialization.");
+      }
+      // Perform the assignment.
+      __ str(r0, location);
+      if (var->IsContextSlot()) {
+        __ mov(r3, r0);
+        __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+      }
+    } else {
+      ASSERT(var->IsLookupSlot());
+      __ push(r0);  // Value.
+      __ mov(r1, Operand(var->name()));
+      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ Push(cp, r1, r0);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
+  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -2006,7 +1933,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2052,7 +1979,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2101,10 +2028,9 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  EmitCallIC(ic, mode);
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+  __ Call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2113,8 +2039,7 @@
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2135,11 +2060,10 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2147,7 +2071,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2158,8 +2082,7 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2179,10 +2102,16 @@
   __ push(r1);
 
   // Push the receiver of the enclosing function and do runtime call.
-  __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+  int receiver_offset = 2 + info_->scope()->num_parameters();
+  __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(r1);
-  // Push the strict mode flag.
-  __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ mov(r1, Operand(Smi::FromInt(strict_mode)));
   __ push(r1);
 
   __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
@@ -2199,10 +2128,11 @@
 #endif
 
   Comment cmnt(masm_, "[ Call");
-  Expression* fun = expr->expression();
-  Variable* var = fun->AsVariableProxy()->AsVariable();
+  Expression* callee = expr->expression();
+  VariableProxy* proxy = callee->AsVariableProxy();
+  Property* property = callee->AsProperty();
 
-  if (var != NULL && var->is_possibly_eval()) {
+  if (proxy != NULL && proxy->var()->is_possibly_eval()) {
     // In a call to eval, we first call %ResolvePossiblyDirectEval to
     // resolve the function we need to call and the receiver of the
     // call.  Then we call the resolved function using the given
@@ -2211,7 +2141,7 @@
     int arg_count = args->length();
 
     { PreservePositionScope pos_scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(callee);
       __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
       __ push(r2);  // Reserved receiver slot.
 
@@ -2225,11 +2155,10 @@
       // in generated code. If we succeed, there is no need to perform a
       // context lookup in the runtime system.
       Label done;
-      if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
         Label slow;
-        EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
-                                          NOT_INSIDE_TYPEOF,
-                                          &slow);
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
         // Push the function and resolve eval.
         __ push(r0);
         EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
@@ -2237,14 +2166,12 @@
         __ bind(&slow);
       }
 
-      // Push copy of the function (found below the arguments) and
+      // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(r1);
       EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      if (done.is_linked()) {
-        __ bind(&done);
-      }
+      __ bind(&done);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
@@ -2254,37 +2181,32 @@
 
     // Record source position for debugger.
     SetSourcePosition(expr->position());
-    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, r0);
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
+  } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ ldr(r0, GlobalObjectOperand());
     __ push(r0);
-    EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
+    EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                      NOT_INSIDE_TYPEOF,
-                                      &slow,
-                                      &done);
+      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
     }
 
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in r0)
     // and the object holding it (returned in edx).
     __ push(context_register());
-    __ mov(r2, Operand(var->name()));
+    __ mov(r2, Operand(proxy->name()));
     __ push(r2);
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ Push(r0, r1);  // Function, receiver.
@@ -2298,67 +2220,39 @@
       __ bind(&done);
       // Push function.
       __ push(r0);
-      // Push global receiver.
-      __ ldr(r1, GlobalObjectOperand());
-      __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+      // The receiver is implicitly the global receiver. Indicate this
+      // by passing the hole to the call function stub.
+      __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
       __ push(r1);
       __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
-  } else if (fun->AsProperty() != NULL) {
-    // Call to an object property.
-    Property* prop = fun->AsProperty();
-    Literal* key = prop->key()->AsLiteral();
-    if (key != NULL && key->handle()->IsSymbol()) {
-      // Call to a named property, use call IC.
-      { PreservePositionScope scope(masm()->positions_recorder());
-        VisitForStackValue(prop->obj());
-      }
-      EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+  } else if (property != NULL) {
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(property->obj());
+    }
+    if (property->key()->IsPropertyName()) {
+      EmitCallWithIC(expr,
+                     property->key()->AsLiteral()->handle(),
+                     RelocInfo::CODE_TARGET);
     } else {
-      // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed CallIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, r1);
-        __ ldr(r1, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
-        __ ldr(r1, GlobalObjectOperand());
-        __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
-        __ Push(r0, r1);  // Function, receiver.
-        EmitCallWithStub(expr);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
-      }
+      EmitKeyedCallWithIC(expr, property->key());
     }
   } else {
+    // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(callee);
     }
     // Load global receiver object.
     __ ldr(r1, GlobalObjectOperand());
     __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
     __ push(r1);
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2463,9 +2357,9 @@
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
   __ b(ne, if_false);
   __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, if_false);
-  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
@@ -2486,7 +2380,7 @@
                          &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
@@ -2548,7 +2442,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(r1, r4);
   __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
   // r4: descriptor array
   // r3: length of descriptor array
@@ -2583,8 +2477,7 @@
   // If a valueOf property is not found on the object check that it's
   // prototype is the un-modified String prototype. If not result is false.
   __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, if_false);
+  __ JumpIfSmi(r2, if_false);
   __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
   __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
@@ -2617,7 +2510,7 @@
                          &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
@@ -2729,7 +2622,7 @@
   // parameter count in r0.
   VisitForAccumulatorValue(args->at(0));
   __ mov(r1, r0);
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
   context()->Plug(r0);
@@ -2741,7 +2634,7 @@
 
   Label exit;
   // Get the number of formal parameters.
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
 
   // Check if the calling frame is an arguments adaptor frame.
   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2769,16 +2662,18 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);  // Map is now in r0.
+  __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+  // Map is now in r0.
   __ b(lt, &null);
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
-  __ b(eq, &function);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+  __ b(ge, &function);
 
   // Check if the constructor in the map is a function.
   __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
@@ -2798,7 +2693,7 @@
 
   // Objects with a non-function constructor have class 'Object'.
   __ bind(&non_function_constructor);
-  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ LoadRoot(r0, Heap::kObject_symbolRootIndex);
   __ jmp(&done);
 
   // Non-JS objects have class null.
@@ -2821,13 +2716,12 @@
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
   ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
-#endif
+
   // Finally, we're expected to leave a value on the top of the stack.
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   context()->Plug(r0);
@@ -3161,17 +3055,17 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in r1. Move it in there.
-  if (!result_register().is(r1)) __ mov(r1, result_register());
+  // InvokeFunction requires the function in r1. Move it in there.
+  __ mov(r1, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  __ InvokeFunction(r1, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->Plug(r0);
 }
@@ -3298,7 +3192,7 @@
 
   Label done, not_found;
   // tmp now holds finger offset as a smi.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
   // r2 now holds finger offset as a smi.
   __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -3338,8 +3232,7 @@
   __ b(eq, &ok);
   // Fail if either is a non-HeapObject.
   __ and_(tmp, left, Operand(right));
-  __ tst(tmp, Operand(kSmiTagMask));
-  __ b(eq, &fail);
+  __ JumpIfSmi(tmp, &fail);
   __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
   __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
@@ -3429,9 +3322,7 @@
   __ b(ne, &bailout);
 
   // Check that the array has fast elements.
-  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
-  __ tst(scratch2, Operand(1 << Map::kHasFastElements));
-  __ b(eq, &bailout);
+  __ CheckFastElements(scratch1, scratch2, &bailout);
 
   // If the array has length zero, return the empty string.
   __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -3656,9 +3547,10 @@
   if (expr->is_jsruntime()) {
     // Call the JS runtime function.
     __ mov(r2, Operand(expr->name()));
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
-        isolate()->stub_cache()->ComputeCallInitialize(arg_count, NOT_IN_LOOP);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+    __ Call(ic, mode, expr->id());
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3673,38 +3565,32 @@
   switch (expr->op()) {
     case Token::DELETE: {
       Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
-      Property* prop = expr->expression()->AsProperty();
-      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
 
-      if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
-          __ push(r1);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
-          context()->Plug(r0);
-        }
-      } else if (var != NULL) {
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+        __ push(r1);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(r0);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
-        // but "delete this" is.
+        // but "delete this" is allowed.
         ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
-        if (var->is_global()) {
+        if (var->IsUnallocated()) {
           __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
           __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
           __ Push(r2, r1, r0);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
           context()->Plug(r0);
-        } else if (var->AsSlot() != NULL &&
-                   var->AsSlot()->type() != Slot::LOOKUP) {
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
-          context()->Plug(false);
+          context()->Plug(var->is_this());
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
@@ -3766,8 +3652,7 @@
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
       VisitForAccumulatorValue(expr->expression());
       Label no_conversion;
-      __ tst(result_register(), Operand(kSmiTagMask));
-      __ b(eq, &no_conversion);
+      __ JumpIfSmi(result_register(), &no_conversion);
       ToNumberStub convert_stub;
       __ CallStub(&convert_stub);
       __ bind(&no_conversion);
@@ -3775,48 +3660,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register r0.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(r0);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register r0.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_code = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_code) {
-        Label call_stub;
-        __ JumpIfNotSmi(r0, &call_stub);
-        __ mvn(r0, Operand(r0));
-        // Bit-clear inverted smi-tag.
-        __ bic(r0, r0, Operand(kSmiTagMask));
-        __ b(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOpFlags flags = inline_smi_code
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(r0);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3824,6 +3674,23 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register r0.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  context()->Plug(r0);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3836,7 +3703,7 @@
   }
 
   // Expression can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->expression()->AsProperty();
@@ -3851,7 +3718,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3864,15 +3731,8 @@
       __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
-      if (prop->is_arguments_access()) {
-        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
-        __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
-        __ push(r0);
-        __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
       EmitKeyedPropertyLoad(prop);
@@ -3936,8 +3796,9 @@
   // Record position before stub call.
   SetSourcePosition(expr->position());
 
-  TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  patch_site.EmitPatchInfo();
   __ bind(&done);
 
   // Store the value returned in r0.
@@ -3968,7 +3829,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3985,7 +3846,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4004,25 +3865,22 @@
   ASSERT(!context()->IsEffect());
   ASSERT(!context()->IsTest());
   VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
     __ ldr(r0, GlobalObjectOperand());
     __ mov(r2, Operand(proxy->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    __ Call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
-  } else if (proxy != NULL &&
-             proxy->var()->AsSlot() != NULL &&
-             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->AsSlot();
-    EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ mov(r0, Operand(proxy->name()));
@@ -4034,30 +3892,18 @@
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    context()->HandleExpression(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
-                                          Expression* left,
-                                          Expression* right,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
-  // Check for the pattern: typeof <expression> == <string literal>.
-  Literal* right_literal = right->AsLiteral();
-  if (right_literal == NULL) return false;
-  Handle<Object> right_literal_value = right_literal->handle();
-  if (!right_literal_value->IsString()) return false;
-  UnaryOperation* left_unary = left->AsUnaryOperation();
-  if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
-  Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(left_unary->expression());
+    VisitForTypeofValue(expr);
   }
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
@@ -4080,6 +3926,10 @@
     __ b(eq, if_true);
     __ CompareRoot(r0, Heap::kFalseValueRootIndex);
     Split(eq, if_true, if_false, fall_through);
+  } else if (FLAG_harmony_typeof &&
+             check->Equals(isolate()->heap()->null_symbol())) {
+    __ CompareRoot(r0, Heap::kNullValueRootIndex);
+    Split(eq, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
     __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
     __ b(eq, if_true);
@@ -4092,18 +3942,20 @@
 
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE);
+    __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
     Split(ge, if_true, if_false, fall_through);
 
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    __ CompareRoot(r0, Heap::kNullValueRootIndex);
-    __ b(eq, if_true);
+    if (!FLAG_harmony_typeof) {
+      __ CompareRoot(r0, Heap::kNullValueRootIndex);
+      __ b(eq, if_true);
+    }
     // Check for JS objects => true.
-    __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
-    __ b(lo, if_false);
-    __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE);
-    __ b(hs, if_false);
+    __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ b(lt, if_false);
+    __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ b(gt, if_false);
     // Check for undetectable objects => false.
     __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
     __ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -4111,8 +3963,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
+}
 
-  return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+  Split(eq, if_true, if_false, fall_through);
 }
 
 
@@ -4132,19 +3994,17 @@
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
-  Token::Value op = expr->op();
-  Expression* left = expr->left();
-  Expression* right = expr->right();
-  if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
     context()->Plug(if_true, if_false);
     return;
   }
 
+  Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ InvokeBuiltin(Builtins::IN, CALL_JS);
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
       PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
@@ -4165,11 +4025,8 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cond = eq;
-      bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
-          strict = true;
-          // Fall through
         case Token::EQ:
           cond = eq;
           __ pop(r1);
@@ -4214,7 +4071,8 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
@@ -4247,8 +4105,7 @@
     __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
     __ cmp(r0, r1);
     __ b(eq, if_true);
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, if_false);
+    __ JumpIfSmi(r0, if_false);
     // It can be an undetectable object.
     __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
@@ -4276,57 +4133,6 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  ASSERT(mode == RelocInfo::CODE_TARGET ||
-         mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Counters* counters = isolate()->counters();
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
-    default:
-      break;
-  }
-
-  __ Call(ic, mode);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
-  Counters* counters = isolate()->counters();
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
-    default:
-      break;
-  }
-
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  if (patch_site != NULL && patch_site->is_bound()) {
-    patch_site->EmitPatchInfo();
-  } else {
-    __ nop();  // Signals no inlined code.
-  }
-}
-
-
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ str(value, MemOperand(fp, frame_offset));
@@ -4338,6 +4144,27 @@
 }
 
 
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_global_scope()) {
+    // Contexts nested in the global context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ mov(ip, Operand(Smi::FromInt(0)));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+  } else {
+    ASSERT(declaration_scope->is_function_scope());
+    __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+  __ push(ip);
+}
+
+
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
@@ -4348,7 +4175,7 @@
   // Cook return address in link register to stack (smi encoded Code* delta)
   __ sub(r1, lr, Operand(masm_->CodeObject()));
   ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ add(r1, r1, Operand(r1));  // Convert to smi.
   __ push(r1);
 }
@@ -4368,6 +4195,34 @@
 
 #undef __
 
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth,
+    int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ PopTryHandler();
+  __ bl(finally_entry_);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
+}
+
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 8acf7c2..2e49cae 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -79,15 +79,14 @@
   //       elements map.
 
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the receiver is a valid JS object.
-  __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+  __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
   __ b(lt, miss);
 
   // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
 
   GenerateGlobalInstanceTypeCheck(masm, t1, miss);
 
@@ -105,65 +104,6 @@
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register scratch1,
-                                           Register scratch2) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset = StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
-  __ sub(scratch1, scratch1, Operand(1));
-
-  const int kElementsStartOffset = StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      ASSERT(StringDictionary::GetProbeOffset(i) <
-             1 << (32 - String::kHashFieldOffset));
-      __ add(scratch2, scratch2, Operand(
-          StringDictionary::GetProbeOffset(i) << String::kHashShift));
-    }
-    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
-    // Scale the index by multiplying by the element size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
-    // Check if the key is identical to the name.
-    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
-    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ cmp(name, Operand(ip));
-    if (i != kProbes - 1) {
-      __ b(eq, done);
-    } else {
-      __ b(ne, miss);
-    }
-  }
-}
-
-
 // Helper function used from LoadIC/CallIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -191,13 +131,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch1,
-                                 scratch2);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
 
   // If probing finds an entry check that the value is a normal
   // property.
@@ -206,7 +146,7 @@
       StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
   __ b(ne, miss);
 
   // Get the value at the masked, scaled index and return.
@@ -240,13 +180,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch1,
-                                 scratch2);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
 
   // If probing finds an entry in the dictionary check that the value
   // is a normal property that is not read only.
@@ -254,9 +194,9 @@
   const int kElementsStartOffset = StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask
-      = (PropertyDetails::TypeField::mask() |
-         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
   __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
   __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
   __ b(ne, miss);
@@ -272,101 +212,6 @@
 }
 
 
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
-                                         Label* miss,
-                                         Register elements,
-                                         Register key,
-                                         Register result,
-                                         Register t0,
-                                         Register t1,
-                                         Register t2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // t0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // t1 - used to hold the capacity mask of the dictionary
-  //
-  // t2 - used for the index into the dictionary.
-  Label done;
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  __ mvn(t1, Operand(t0));
-  __ add(t0, t1, Operand(t0, LSL, 15));
-  // hash = hash ^ (hash >> 12);
-  __ eor(t0, t0, Operand(t0, LSR, 12));
-  // hash = hash + (hash << 2);
-  __ add(t0, t0, Operand(t0, LSL, 2));
-  // hash = hash ^ (hash >> 4);
-  __ eor(t0, t0, Operand(t0, LSR, 4));
-  // hash = hash * 2057;
-  __ mov(t1, Operand(2057));
-  __ mul(t0, t0, t1);
-  // hash = hash ^ (hash >> 16);
-  __ eor(t0, t0, Operand(t0, LSR, 16));
-
-  // Compute the capacity mask.
-  __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
-  __ mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
-  __ sub(t1, t1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  static const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
-    // Use t2 for index calculations and keep the hash intact in t0.
-    __ mov(t2, t0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
-    }
-    __ and_(t2, t2, Operand(t1));
-
-    // Scale the index by multiplying by the element size.
-    ASSERT(NumberDictionary::kEntrySize == 3);
-    __ add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
-
-    // Check if the key is identical to the name.
-    __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
-    __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
-    __ cmp(key, Operand(ip));
-    if (i != kProbes - 1) {
-      __ b(eq, &done);
-    } else {
-      __ b(ne, miss);
-    }
-  }
-
-  __ bind(&done);
-  // Check that the value is a normal property.
-  // t2: elements + (index * kPointerSize)
-  const int kDetailsOffset =
-      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
-  __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
-  __ b(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      NumberDictionary::kElementsStartOffset + kPointerSize;
-  __ ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -492,7 +337,7 @@
   // Fast case: Do the load.
   __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   // The key is a smi.
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
   __ ldr(scratch2,
          MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -525,7 +370,7 @@
   // Is the string a symbol?
   // map: key map
   __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ tst(hash, Operand(kIsSymbolMask));
   __ b(eq, not_symbol);
 }
@@ -538,7 +383,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r1    : receiver
   //  -- r2    : name
@@ -547,9 +393,8 @@
 
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
@@ -561,8 +406,7 @@
   // to probe.
   //
   // Check for number.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &number);
+  __ JumpIfSmi(r1, &number);
   __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
   __ b(ne, &non_number);
   __ bind(&number);
@@ -606,8 +450,7 @@
   // r1: function
 
   // Check that the value isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, miss);
+  __ JumpIfSmi(r1, miss);
 
   // Check that the value is a JSFunction.
   __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
@@ -615,7 +458,8 @@
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+  __ InvokeFunction(r1, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -641,7 +485,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -678,8 +525,7 @@
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
-    __ tst(r2, Operand(kSmiTagMask));
-    __ b(eq, &invoke);
+    __ JumpIfSmi(r2, &invoke);
     __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
     __ b(eq, &global);
     __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -693,22 +539,33 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+  __ InvokeFunction(r1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -716,8 +573,8 @@
 
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -728,7 +585,7 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
@@ -738,7 +595,7 @@
   //  -- lr    : return address
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
@@ -785,7 +642,7 @@
   __ b(ne, &slow_load);
   __ mov(r0, Operand(r2, ASR, kSmiTagSize));
   // r0: untagged index
-  GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+  __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
   __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
   __ jmp(&do_call);
 
@@ -824,7 +681,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -852,8 +712,7 @@
 
   // Check if the name is a string.
   Label miss;
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r2, &miss);
   __ IsObjectJSStringType(r2, r0, &miss);
 
   GenerateCallNormal(masm, argc);
@@ -874,9 +733,8 @@
   // -----------------------------------
 
   // Probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC);
+  Code::Flags flags =
+      Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(
       masm, flags, r0, r2, r3, r4, r5);
 
@@ -926,222 +784,176 @@
   __ TailCallExternalReference(ref, 2, 1);
 }
 
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
-                                      Address* inline_end_address) {
-  if (V8::UseCrankshaft()) return false;
 
-  // If the instruction after the call site is not the pseudo instruction nop1
-  // then this is not related to an inlined in-object property load. The nop1
-  // instruction is located just after the call to the IC in the deferred code
-  // handling the miss in the inlined code. After the nop1 instruction there is
-  // a branch instruction for jumping back from the deferred code.
-  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
-  Instr instr_after_call = Assembler::instr_at(address_after_call);
-  int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+                                                Register object,
+                                                Register key,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                Label* unmapped_case,
+                                                Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
 
-  // A negative result means the code is not marked.
-  if (code_marker <= 0) return 0;
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+  __ b(lt, slow_case);
 
-  Address address_after_nop = address_after_call + Assembler::kInstrSize;
-  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
-  // There may be some reg-reg move and frame merging code to skip over before
-  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
-  // code.
-  while (!Assembler::IsBranch(instr_after_nop)) {
-    address_after_nop += Assembler::kInstrSize;
-    instr_after_nop = Assembler::instr_at(address_after_nop);
-  }
+  // Check that the key is a positive smi.
+  __ tst(key, Operand(0x8000001));
+  __ b(ne, slow_case);
 
-  // Find the end of the inlined code for handling the load.
-  int b_offset =
-      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
-  ASSERT(b_offset < 0);  // Jumping back from deferred code.
-  *inline_end_address = address_after_nop + b_offset;
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+  __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
 
-  return code_marker;
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+  __ cmp(key, Operand(scratch2));
+  __ b(cs, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ mov(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, key, scratch3);
+  __ add(scratch3, scratch3, Operand(kOffset));
+
+  __ ldr(scratch2, MemOperand(scratch1, scratch3));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch2, scratch3);
+  __ b(eq, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ mov(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, scratch2, scratch3);
+  __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  return MemOperand(scratch1, scratch3);
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the load if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
-  // The immediate must be representable in 12 bits.
-  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
-  Address ldr_property_instr_address =
-      inline_end_address - Assembler::kInstrSize;
-  ASSERT(Assembler::IsLdrRegisterImmediate(
-      Assembler::instr_at(ldr_property_instr_address)));
-  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
-  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
-      ldr_property_instr, offset - kHeapObjectTag);
-  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
-  // Indicate that code has changed.
-  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
-  // Patch the map check.
-  // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
-  // 4 instructions before the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = -4;
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(scratch));
+  __ b(cs, slow_case);
+  __ mov(scratch, Operand(kPointerSize >> 1));
+  __ mul(scratch, key, scratch);
+  __ add(scratch,
+         scratch,
+         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  return MemOperand(backing_store, scratch);
 }
 
 
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // Find the end of the inlined code for handling the contextual load if
-  // this is inlined IC call site.
-  Address inline_end_address = 0;
-  int marker = InlinedICSiteMarker(address, &inline_end_address);
-  if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
-        (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
-    return false;
-  }
-  // On ARM we don't rely on the is_dont_delete argument as the hint is already
-  // embedded in the code marker.
-  bool marker_is_dont_delete =
-      marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
-  // These are the offsets from the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = marker_is_dont_delete ? -5: -8;
-  int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
-  if (FLAG_debug_code && marker_is_dont_delete) {
-    // Three extra instructions were generated to check for the_hole_value.
-    ldr_map_offset -= 3;
-    ldr_cell_offset -= 3;
-  }
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Address ldr_cell_instr_address =
-      inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  // Patch the cell address.
-  Assembler::set_target_address_at(ldr_cell_instr_address,
-                                   reinterpret_cast<Address>(cell));
-
-  return true;
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Label slow, notin;
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
+  __ ldr(r0, mapped_location);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in r2.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
+  __ ldr(r2, unmapped_location);
+  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+  __ cmp(r2, r3);
+  __ b(eq, &slow);
+  __ mov(r0, r2);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for the store if there is an
-  // inlined version of the store.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Compute the address of the map load instruction.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
-       Assembler::kInstrSize);
-
-  // Update the offsets if initializing the inlined store. No reason
-  // to update the offsets when clearing the inlined version because
-  // it will bail out in the map check.
-  if (map != HEAP->null_value()) {
-    // Patch the offset in the actual store instruction.
-    Address str_property_instr_address =
-        ldr_map_instr_address + 3 * Assembler::kInstrSize;
-    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
-    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
-    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
-        str_property_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
-    // Patch the offset in the add instruction that is part of the
-    // write barrier.
-    Address add_offset_instr_address =
-        str_property_instr_address + Assembler::kInstrSize;
-    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
-    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
-    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
-        add_offset_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
-    // Indicate that code has changed.
-    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
-  }
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-
-  return true;
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+  Label slow, notin;
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
+  __ str(r0, mapped_location);
+  __ add(r6, r3, r5);
+  __ RecordWrite(r3, r6, r9);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in r3.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
+  __ str(r0, unmapped_location);
+  __ add(r6, r3, r4);
+  __ RecordWrite(r3, r6, r9);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the store if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+                                             int argc) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  Label slow, notin;
+  // Load receiver.
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
+  __ ldr(r1, mapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow, r3);
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in r3.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
+  __ ldr(r1, unmapped_location);
+  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+  __ cmp(r1, r3);
+  __ b(eq, &slow);
+  GenerateFunctionTailCall(masm, argc, &slow, r3);
+  __ bind(&slow);
+  GenerateMiss(masm, argc);
 }
 
 
 Object* KeyedLoadIC_Miss(Arguments args);
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
@@ -1153,8 +965,11 @@
 
   __ Push(r1, r0);
 
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+  // Perform tail call to the entry.
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1195,11 +1010,8 @@
   GenerateKeyedLoadReceiverCheck(
       masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
 
-  // Check the "has fast elements" bit in the receiver's map which is
-  // now in r2.
-  __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
-  __ tst(r3, Operand(1 << Map::kHasFastElements));
-  __ b(eq, &check_number_dictionary);
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(r2, r3, &check_number_dictionary);
 
   GenerateFastArrayLoad(
       masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
@@ -1218,7 +1030,7 @@
   __ cmp(r3, ip);
   __ b(ne, &slow);
   __ mov(r2, Operand(r0, ASR, kSmiTagSize));
-  GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
+  __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
   __ Ret();
 
   // Slow case, key and receiver still in r0 and r1.
@@ -1345,7 +1157,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -1385,11 +1197,11 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- r1     : key
@@ -1400,8 +1212,29 @@
   // Push receiver, key and value for runtime call.
   __ Push(r2, r1, r0);
 
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
   ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
@@ -1444,11 +1277,9 @@
   // r4 and r5 are used as general scratch registers.
 
   // Check that the key is a smi.
-  __ tst(key, Operand(kSmiTagMask));
-  __ b(ne, &slow);
+  __ JumpIfNotSmi(key, &slow);
   // Check that the object isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, &slow);
+  __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
@@ -1460,9 +1291,13 @@
   __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
-  // Check that the object is some kind of JS object.
-  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+  // Check that the object is some kind of JSObject.
+  __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
   __ b(lt, &slow);
+  __ cmp(r4, Operand(JS_PROXY_TYPE));
+  __ b(eq, &slow);
+  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(eq, &slow);
 
   // Object case: Check key against length in the elements array.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -1496,7 +1331,7 @@
   __ cmp(key, Operand(ip));
   __ b(hs, &slow);
   // Calculate key + 1 as smi.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ add(r4, key, Operand(Smi::FromInt(1)));
   __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ b(&fast);
@@ -1543,10 +1378,8 @@
   // -----------------------------------
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC,
-                                         strict_mode);
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
 
   Isolate::Current()->stub_cache()->GenerateProbe(
       masm, flags, r1, r2, r3, r4, r5);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index efd226e..30ccd05 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -61,22 +61,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
-  for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
-  for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -112,21 +111,18 @@
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  inputs_.PrintOperandsTo(stream);
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
 }
 
 
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
-  for (int i = 0; i < N; i++) {
+  for (int i = 0; i < results_.length(); i++) {
     if (i > 0) stream->Add(" ");
-    elems_[i]->PrintTo(stream);
+    results_[i]->PrintTo(stream);
   }
 }
 
@@ -151,7 +147,7 @@
 }
 
 
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < 4; i++) {
     stream->Add("(");
     if (parallel_moves_[i] != NULL) {
@@ -238,6 +234,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -262,12 +265,6 @@
 }
 
 
-void LTypeofIs::PrintDataTo(StringStream* stream) {
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
 void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if typeof ");
   InputAt(0)->PrintTo(stream);
@@ -301,19 +298,26 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[r2] #%d / ", arity());
 }
 
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
@@ -330,13 +334,6 @@
 }
 
 
-void LClassOfTest::PrintDataTo(StringStream* stream) {
-  stream->Add("= class_of_test(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
 void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
   arguments()->PrintTo(stream);
 
@@ -375,6 +372,15 @@
 }
 
 
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
 void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add("[");
@@ -423,8 +429,7 @@
     LLabel* label = LLabel::cast(first_instr);
     if (last_instr->IsGoto()) {
       LGoto* goto_instr = LGoto::cast(last_instr);
-      if (!goto_instr->include_stack_check() &&
-          label->IsRedundant() &&
+      if (label->IsRedundant() &&
           !label->is_loop_header()) {
         bool can_eliminate = true;
         for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -449,7 +454,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -541,7 +546,8 @@
 
 void LChunkBuilder::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LChunk building in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -795,6 +801,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new LDeoptimize);
 }
@@ -808,7 +819,7 @@
 
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
+    return DefineAsRegister(new LBitI(op, left, right));
   } else {
     ASSERT(instr->representation().IsTagged());
     ASSERT(instr->left()->representation().IsTagged());
@@ -835,11 +846,11 @@
   }
 
   ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
-  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
-  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
 
-  HValue* right_value = instr->OperandAt(1);
+  HValue* right_value = instr->right();
   LOperand* right = NULL;
   int constant_value = 0;
   if (right_value->IsConstant()) {
@@ -847,27 +858,25 @@
     right = chunk_->DefineConstantOperand(constant);
     constant_value = constant->Integer32Value() & 0x1f;
   } else {
-    right = UseRegister(right_value);
+    right = UseRegisterAtStart(right_value);
   }
 
   // Shift operations can only deoptimize if we do a logical shift
   // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
-  if (can_deopt) AssignEnvironment(result);
-  return result;
+      DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -880,7 +889,7 @@
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
   LArithmeticD* result = new LArithmeticD(op, left, right);
-  return DefineSameAsFirst(result);
+  return DefineAsRegister(result);
 }
 
 
@@ -978,18 +987,7 @@
     if (FLAG_stress_environments && !instr->HasEnvironment()) {
       instr = AssignEnvironment(instr);
     }
-    if (current->IsTest() && !instr->IsGoto()) {
-      ASSERT(instr->IsControl());
-      HTest* test = HTest::cast(current);
-      instr->set_hydrogen_value(test->value());
-      HBasicBlock* first = test->FirstSuccessor();
-      HBasicBlock* second = test->SecondSuccessor();
-      ASSERT(first != NULL && second != NULL);
-      instr->SetBranchTargets(first->block_id(), second->block_id());
-    } else {
-      instr->set_hydrogen_value(current);
-    }
-
+    instr->set_hydrogen_value(current);
     chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
@@ -1011,6 +1009,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1028,106 +1028,23 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
-                                   instr->include_stack_check());
-  if (instr->include_stack_check())  result = AssignPointerMap(result);
-  return result;
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
   HValue* v = instr->value();
   if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
-        LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
-        LInstruction* result = new LCmpTAndBranch(left_operand,
-                                                  right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-      return new LHasInstanceTypeAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      LOperand* temp = TempRegister();
-      return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LInstruction* result =
-          new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
-                                   UseFixed(instance_of->right(), r1));
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
-    } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
-    }
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
   }
-  return new LBranch(UseRegisterAtStart(v));
+  return AssignEnvironment(new LBranch(UseRegister(v)));
 }
 
 
+
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1157,7 +1074,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -1182,8 +1099,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1212,6 +1134,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1224,15 +1154,15 @@
     LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
     switch (op) {
       case kMathAbs:
-        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
       case kMathFloor:
         return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
       case kMathSqrt:
-        return DefineSameAsFirst(result);
+        return DefineAsRegister(result);
       case kMathRound:
         return AssignEnvironment(DefineAsRegister(result));
       case kMathPowHalf:
-        return DefineSameAsFirst(result);
+        return DefineAsRegister(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1310,7 +1240,7 @@
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
-  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+  return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
 }
 
 
@@ -1355,15 +1285,20 @@
       mod = new LModI(value, UseOrConstant(instr->right()));
     } else {
       LOperand* dividend = UseRegister(instr->left());
-      LOperand* divisor = UseRegisterAtStart(instr->right());
+      LOperand* divisor = UseRegister(instr->right());
       mod = new LModI(dividend,
                       divisor,
                       TempRegister(),
-                      FixedTemp(d1),
-                      FixedTemp(d2));
+                      FixedTemp(d10),
+                      FixedTemp(d11));
     }
 
-    return AssignEnvironment(DefineSameAsFirst(mod));
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      return AssignEnvironment(DefineAsRegister(mod));
+    } else {
+      return DefineAsRegister(mod);
+    }
   } else if (instr->representation().IsTagged()) {
     return DoArithmeticT(Token::MOD, instr);
   } else {
@@ -1383,16 +1318,22 @@
   if (instr->representation().IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* left;
     LOperand* right = UseOrConstant(instr->MostConstantOperand());
     LOperand* temp = NULL;
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+        (instr->CheckFlag(HValue::kCanOverflow) ||
+        !right->IsConstantOperand())) {
+      left = UseRegister(instr->LeastConstantOperand());
       temp = TempRegister();
+    } else {
+      left = UseRegisterAtStart(instr->LeastConstantOperand());
     }
-    LMulI* mul = new LMulI(left, right, temp);
-    return AssignEnvironment(DefineSameAsFirst(mul));
+    return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
+
   } else {
     return DoArithmeticT(Token::MUL, instr);
   }
@@ -1406,7 +1347,7 @@
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new LSubI(left, right);
-    LInstruction* result = DefineSameAsFirst(sub);
+    LInstruction* result = DefineAsRegister(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
     }
@@ -1426,7 +1367,7 @@
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
     LAddI* add = new LAddI(left, right);
-    LInstruction* result = DefineSameAsFirst(add);
+    LInstruction* result = DefineAsRegister(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
     }
@@ -1457,71 +1398,83 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
   Token::Value op = instr->token();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  bool reversed = (op == Token::GT || op == Token::LTE);
+  LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+  LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+  LCmpT* result = new LCmpT(left, right);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
   Representation r = instr->GetInputRepresentation();
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else if (r.IsDouble()) {
+    return new LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else {
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-    bool reversed = (op == Token::GT || op == Token::LTE);
-    LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
-    LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
-    LCmpT* result = new LCmpT(left, right);
-    return MarkAsCall(DefineFixed(result, r0), instr);
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
 
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
-    HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
-  return DefineAsRegister(result);
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new LIsNull(value));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+    HCompareConstantEqAndBranch* instr) {
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new LIsObject(value));
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseAtStart(instr->value());
-
-  return DefineAsRegister(new LIsSmi(value));
+  LOperand* temp = TempRegister();
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
+  return new LIsSmiAndBranch(Use(instr->value()));
+}
 
-  return DefineAsRegister(new LHasInstanceType(value));
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1534,19 +1487,19 @@
 }
 
 
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
-    HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-
-  return DefineAsRegister(new LHasCachedArrayIndex(value));
+  return new LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseTempRegister(instr->value());
-  return DefineSameAsFirst(new LClassOfTest(value));
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
 }
 
 
@@ -1556,23 +1509,23 @@
 }
 
 
-LInstruction* LChunkBuilder::DoExternalArrayLength(
-    HExternalArrayLength* instr) {
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LExternalArrayLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
-  LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayLength(array));
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
   LValueOf* result = new LValueOf(object, TempRegister());
-  return AssignEnvironment(DefineSameAsFirst(result));
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
@@ -1595,6 +1548,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
@@ -1614,7 +1580,7 @@
         LOperand* temp1 = TempRegister();
         LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
                                                       : NULL;
-        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d3)
+        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
                                                       : NULL;
         res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
         res = AssignEnvironment(res);
@@ -1703,6 +1669,49 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve d1 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+    return AssignEnvironment(DefineAsRegister(res));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LOperand* temp3 = FixedTemp(d11);
+    LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+    return AssignEnvironment(DefineSameAsFirst(res));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), r0));
 }
@@ -1834,25 +1843,42 @@
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterAtStart(instr->key());
   LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineSameAsFirst(result));
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseTempRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
-  ExternalArrayType array_type = instr->array_type();
+  ElementsKind elements_kind = instr->elements_kind();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
-  return (array_type == kExternalUnsignedIntArray) ?
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
       AssignEnvironment(load_instr) : load_instr;
 }
 
@@ -1886,22 +1912,42 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
-  ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ElementsKind elements_kind = instr->elements_kind();
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  bool val_is_temp_register = array_type == kExternalPixelArray ||
-      array_type == kExternalFloatArray;
+  bool val_is_temp_register =
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
@@ -1946,9 +1992,16 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
   LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
@@ -2055,13 +2108,14 @@
 }
 
 
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
-  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
-  return DefineAsRegister(new LIsConstructCall());
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2097,7 +2151,12 @@
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  return MarkAsCall(new LStackCheck, instr);
+  if (instr->is_function_entry()) {
+    return MarkAsCall(new LStackCheck, instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+  }
 }
 
 
@@ -2106,8 +2165,8 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2121,4 +2180,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index e1c65d2..8c18760 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -32,6 +32,7 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
@@ -73,15 +74,15 @@
   V(CheckMap)                                   \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
-  V(ClassOfTest)                                \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTestAndBranch)                       \
-  V(CmpID)                                      \
+  V(CmpConstantEqAndBranch)                     \
   V(CmpIDAndBranch)                             \
-  V(CmpJSObjectEq)                              \
-  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpObjectEqAndBranch)                       \
   V(CmpMapAndBranch)                            \
   V(CmpT)                                       \
-  V(CmpTAndBranch)                              \
   V(ConstantD)                                  \
   V(ConstantI)                                  \
   V(ConstantT)                                  \
@@ -90,28 +91,26 @@
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
-  V(ExternalArrayLength)                        \
-  V(FixedArrayLength)                           \
+  V(ElementsKind)                               \
+  V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceType)                            \
   V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
-  V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
-  V(IsNull)                                     \
+  V(InvokeFunction)                             \
+  V(IsConstructCallAndBranch)                   \
   V(IsNullAndBranch)                            \
-  V(IsObject)                                   \
   V(IsObjectAndBranch)                          \
-  V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -121,6 +120,7 @@
   V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
+  V(LoadKeyedFastDoubleElement)                 \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -147,42 +147,38 @@
   V(StoreContextSlot)                           \
   V(StoreGlobalCell)                            \
   V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
   V(SubI)                                       \
   V(TaggedToI)                                  \
+  V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
   V(Typeof)                                     \
-  V(TypeofIs)                                   \
   V(TypeofIsAndBranch)                          \
-  V(IsConstructCall)                            \
-  V(IsConstructCallAndBranch)                   \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -204,13 +200,27 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
-  virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
@@ -265,37 +275,6 @@
 };
 
 
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
-  OperandContainer() {
-    for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
-  }
-  int length() { return NumElements; }
-  ElementType& operator[](int i) {
-    ASSERT(i < length());
-    return elems_[i];
-  }
-  void PrintOperandsTo(StringStream* stream);
-
- private:
-  ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
-  int length() { return 0; }
-  void PrintOperandsTo(StringStream* stream) { }
-  ElementType& operator[](int i) {
-    UNREACHABLE();
-    static ElementType t = 0;
-    return t;
-  }
-};
-
-
 // R = number of result operands (0 or 1).
 // I = number of input operands.
 // T = number of temporary operands.
@@ -318,9 +297,9 @@
   virtual void PrintOutputOperandTo(StringStream* stream);
 
  protected:
-  OperandContainer<LOperand*, R> results_;
-  OperandContainer<LOperand*, I> inputs_;
-  OperandContainer<LOperand*, T> temps_;
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
 };
 
 
@@ -334,8 +313,13 @@
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
-  virtual void PrintDataTo(StringStream* stream) const;
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
+  virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -365,21 +349,26 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
-  LGoto(int block_id, bool include_stack_check = false)
-    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+  explicit LGoto(int block_id) : block_id_(block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
   virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int block_id() const { return block_id_; }
-  bool include_stack_check() const { return include_stack_check_; }
 
  private:
   int block_id_;
-  bool include_stack_check_;
 };
 
 
@@ -453,19 +442,17 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
-  int true_block_id() const { return true_block_id_; }
-  int false_block_id() const { return false_block_id_; }
-  void SetBranchTargets(int true_block_id, int false_block_id) {
-    true_block_id_ = true_block_id;
-    false_block_id_ = false_block_id;
-  }
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
 
  private:
-  int true_block_id_;
-  int false_block_id_;
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
 };
 
 
@@ -581,23 +568,6 @@
 };
 
 
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
-  LCmpID(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
-
-  Token::Value op() const { return hydrogen()->token(); }
-  bool is_double() const {
-    return hydrogen()->GetInputRepresentation().IsDouble();
-  }
-};
-
-
 class LCmpIDAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -606,7 +576,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
 
   Token::Value op() const { return hydrogen()->token(); }
   bool is_double() const {
@@ -632,41 +602,31 @@
 };
 
 
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
-  LCmpJSObjectEq(LOperand* left, LOperand* right) {
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
 };
 
 
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
  public:
-  LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
     inputs_[0] = left;
-    inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
-                               "cmp-jsobject-eq-and-branch")
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
 };
 
 
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsNull(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
-  bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
 class LIsNullAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsNullAndBranch(LOperand* value) {
@@ -674,7 +634,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
   bool is_strict() const { return hydrogen()->is_strict(); }
 
@@ -682,17 +642,7 @@
 };
 
 
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
- public:
-  explicit LIsObject(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -700,22 +650,12 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
-  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -723,19 +663,24 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
  public:
-  explicit LHasInstanceType(LOperand* value) {
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
 };
 
 
@@ -747,7 +692,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
                                "has-instance-type-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -764,17 +709,6 @@
 };
 
 
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LHasCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
 class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -783,18 +717,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LClassOfTest(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -809,7 +732,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
                                "class-of-test-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -823,21 +746,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
-  LCmpTAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -854,17 +763,6 @@
 };
 
 
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
-  LInstanceOfAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
 class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -977,7 +875,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-  DECLARE_HYDROGEN_ACCESSOR(Value)
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1016,25 +914,26 @@
 };
 
 
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LExternalArrayLength(LOperand* value) {
+  explicit LFixedArrayBaseLength(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
 };
 
 
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LFixedArrayLength(LOperand* value) {
+  explicit LElementsKind(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
 };
 
 
@@ -1104,6 +1003,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1120,6 +1020,7 @@
     inputs_[1] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1227,6 +1128,22 @@
 };
 
 
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1241,8 +1158,8 @@
 
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
@@ -1358,6 +1275,11 @@
 };
 
 
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
 class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1412,6 +1334,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1540,7 +1479,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1560,7 +1499,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1665,6 +1604,28 @@
 };
 
 
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
 class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
@@ -1701,12 +1662,28 @@
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1816,6 +1793,44 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -1867,21 +1882,6 @@
 };
 
 
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LTypeofIs(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
-  Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
@@ -1889,7 +1889,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
@@ -1897,13 +1897,6 @@
 };
 
 
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
-  DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
 class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -1955,6 +1948,26 @@
 class LStackCheck: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
 };
 
 
@@ -2175,7 +2188,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 3dcd427..4a201ab 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -40,37 +40,22 @@
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
-                     int deoptimization_index)
+                     Safepoint::DeoptMode mode)
       : codegen_(codegen),
         pointers_(pointers),
-        deoptimization_index_(deoptimization_index) { }
+        deopt_mode_(mode) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) {
-    ASSERT(call_size >= 0);
-    // Ensure that we have enough space after the previous safepoint position
-    // for the generated code there.
-    int call_end = codegen_->masm()->pc_offset() + call_size;
-    int prev_jump_end =
-        codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
-    if (call_end < prev_jump_end) {
-      int padding_size = prev_jump_end - call_end;
-      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
-      while (padding_size > 0) {
-        codegen_->masm()->nop();
-        padding_size -= Assembler::kInstrSize;
-      }
-    }
-  }
+  virtual void BeforeCall(int call_size) const { }
 
-  virtual void AfterCall() {
-    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
  private:
   LCodeGen* codegen_;
   LPointerMap* pointers_;
-  int deoptimization_index_;
+  Safepoint::DeoptMode deopt_mode_;
 };
 
 
@@ -85,22 +70,23 @@
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
+      GenerateDeoptJumpTable() &&
       GenerateSafepointTable();
 }
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
 }
 
 
 void LCodeGen::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LCodeGen in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -145,11 +131,25 @@
   // fp: Caller's frame pointer.
   // lr: Caller's pc.
 
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (info_->is_strict_mode() || info_->is_native()) {
+    Label ok;
+    __ cmp(r5, Operand(0));
+    __ b(eq, &ok);
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ str(r2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
   __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
   __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(r0, Operand(slots));
@@ -174,23 +174,23 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
-    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both r0 and cp.  It replaces the context
     // passed to us.  It's saved in the stack and kept live in cp.
     __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
     // Copy any necessary parameters into the context.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+        __ mov(r1, Operand(Context::SlotOffset(var->index())));
         __ str(r0, MemOperand(cp, r1));
         // Update the write barrier. This clobbers all involved
         // registers, so we have to use two more registers to avoid
@@ -227,35 +227,59 @@
       instr->CompileToNative(this);
     }
   }
+  EnsureSpaceForLazyDeopt();
   return !is_aborted();
 }
 
 
-LInstruction* LCodeGen::GetNextInstruction() {
-  if (current_instruction_ < instructions_->length() - 1) {
-    return instructions_->at(current_instruction_ + 1);
-  } else {
-    return NULL;
-  }
-}
-
-
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
-  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
-    LDeferredCode* code = deferred_[i];
-    __ bind(code->entry());
-    code->Generate();
-    __ jmp(code->exit());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      code->Generate();
+      __ jmp(code->exit());
+    }
   }
 
-  // Force constant pool emission at the end of deferred code to make
-  // sure that no constant pools are emitted after the official end of
-  // the instruction sequence.
+  // Force constant pool emission at the end of the deferred code to make
+  // sure that no constant pools are emitted after.
   masm()->CheckConstPool(true, false);
 
-  // Deferred code is the last part of the instruction sequence. Mark
-  // the generated code as done unless we bailed out.
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, ie that offsets to the table can be encoded in the 24bit signed
+  // immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+      deopt_jump_table_.length() * 2)) {
+    Abort("Generated code is too large");
+  }
+
+  // Block the constant pool emission during the jump table emission.
+  __ BlockConstPoolFor(deopt_jump_table_.length());
+  __ RecordComment("[ Deoptimisation jump table");
+  Label table_start;
+  __ bind(&table_start);
+  for (int i = 0; i < deopt_jump_table_.length(); i++) {
+    __ bind(&deopt_jump_table_[i].label);
+    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
+    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+  }
+  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
+      deopt_jump_table_.length() * 2);
+  __ RecordComment("]");
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
   if (!is_aborted()) status_ = DONE;
   return !is_aborted();
 }
@@ -263,7 +287,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -459,7 +483,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -496,11 +520,11 @@
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ Call(code, mode);
-  RegisterLazyDeoptimization(instr, safepoint_mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -516,7 +540,7 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
@@ -525,37 +549,12 @@
                                        LInstruction* instr) {
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
-                                          SafepointMode safepoint_mode) {
-  // Create the environment to bailout to. If the call has side effects
-  // execution has to continue after the call otherwise execution can continue
-  // from a previous bailout point repeating the call.
-  LEnvironment* deoptimization_environment;
-  if (instr->HasDeoptimizationEnvironment()) {
-    deoptimization_environment = instr->deoptimization_environment();
-  } else {
-    deoptimization_environment = instr->environment();
-  }
-
-  RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    RecordSafepoint(instr->pointer_map(),
-                    deoptimization_environment->deoptimization_index());
-  } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(),
-        0,
-        deoptimization_environment->deoptimization_index());
-  }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
   if (!environment->HasBeenRegistered()) {
     // Physical stack frame layout:
     // -x ............. -4  0 ..................................... y
@@ -577,14 +576,17 @@
     Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
-    environment->Register(deoptimization_index, translation.index());
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
     deoptimizations_.Add(environment);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -602,19 +604,18 @@
     return;
   }
 
+  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+
   if (cc == al) {
-    if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    if (FLAG_trap_on_deopt) {
-      Label done;
-      __ b(&done, NegateCondition(cc));
-      __ stop("trap_on_deopt");
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-      __ bind(&done);
-    } else {
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (deopt_jump_table_.is_empty() ||
+        (deopt_jump_table_.last().address != entry)) {
+      deopt_jump_table_.Add(JumpTableEntry(entry));
     }
+    __ b(cc, &deopt_jump_table_.last().label);
   }
 }
 
@@ -647,6 +648,7 @@
     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
     data->SetArgumentsStackHeight(i,
                                   Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
   }
   code->set_deoptimization_data(*data);
 }
@@ -678,16 +680,28 @@
 }
 
 
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
 void LCodeGen::RecordSafepoint(
     LPointerMap* pointers,
     Safepoint::Kind kind,
     int arguments,
-    int deoptimization_index) {
+    Safepoint::DeoptMode deopt_mode) {
   ASSERT(expected_safepoint_kind_ == kind);
 
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-      kind, arguments, deoptimization_index);
+      kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
@@ -704,36 +718,36 @@
 
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
 }
 
 
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
   LPointerMap empty_pointers(RelocInfo::kNoPosition);
-  RecordSafepoint(&empty_pointers, deoptimization_index);
+  RecordSafepoint(&empty_pointers, deopt_mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
-                                            int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
-      deoptimization_index);
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
     LPointerMap* pointers,
     int arguments,
-    int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
-      deoptimization_index);
+    Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
 }
 
 
 void LCodeGen::RecordPosition(int position) {
-  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
 }
 
@@ -746,7 +760,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -763,12 +777,11 @@
     LParallelMove* move = gap->GetParallelMove(inner_pos);
     if (move != NULL) DoParallelMove(move);
   }
+}
 
-  LInstruction* next = GetNextInstruction();
-  if (next != NULL && next->IsLazyBailout()) {
-    int pc = masm()->pc_offset();
-    safepoints_.SetPcAfterGap(pc);
-  }
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
 }
 
 
@@ -831,6 +844,7 @@
 void LCodeGen::DoModI(LModI* instr) {
   if (instr->hydrogen()->HasPowerOf2Divisor()) {
     Register dividend = ToRegister(instr->InputAt(0));
+    Register result = ToRegister(instr->result());
 
     int32_t divisor =
         HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -840,17 +854,15 @@
     Label positive_dividend, done;
     __ cmp(dividend, Operand(0));
     __ b(pl, &positive_dividend);
-    __ rsb(dividend, dividend, Operand(0));
-    __ and_(dividend, dividend, Operand(divisor - 1));
-    __ rsb(dividend, dividend, Operand(0), SetCC);
+    __ rsb(result, dividend, Operand(0));
+    __ and_(result, result, Operand(divisor - 1), SetCC);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ b(ne, &done);
-      DeoptimizeIf(al, instr->environment());
-    } else {
-      __ b(&done);
+      DeoptimizeIf(eq, instr->environment());
     }
+    __ rsb(result, result, Operand(0));
+    __ b(&done);
     __ bind(&positive_dividend);
-    __ and_(dividend, dividend, Operand(divisor - 1));
+    __ and_(result, dividend, Operand(divisor - 1));
     __ bind(&done);
     return;
   }
@@ -866,8 +878,6 @@
   DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
   DwVfpRegister quotient = double_scratch0();
 
-  ASSERT(result.is(left));
-
   ASSERT(!dividend.is(divisor));
   ASSERT(!dividend.is(quotient));
   ASSERT(!divisor.is(quotient));
@@ -883,6 +893,8 @@
     DeoptimizeIf(eq, instr->environment());
   }
 
+  __ Move(result, left);
+
   // (0 % x) must yield 0 (if x is finite, which is the case here).
   __ cmp(left, Operand(0));
   __ b(eq, &done);
@@ -1067,11 +1079,11 @@
     __ mov(r0, right);
     __ mov(r1, left);
   }
-  TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
+  BinaryOpStub stub(op, OVERWRITE_LEFT);
   __ CallStub(&stub);
   RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
                                          0,
-                                         Safepoint::kNoDeoptimizationIndex);
+                                         Safepoint::kNoLazyDeopt);
   // Overwrite the stored value of r0 with the result of the stub.
   __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
 }
@@ -1079,68 +1091,125 @@
 
 void LCodeGen::DoMulI(LMulI* instr) {
   Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
   Register left = ToRegister(instr->InputAt(0));
-  Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+  LOperand* right_op = instr->InputAt(1);
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
-      !instr->InputAt(1)->IsConstantOperand()) {
-    __ orr(ToRegister(instr->TempAt(0)), left, right);
-  }
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
 
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    // scratch:left = left * right.
-    __ smull(left, scratch, left, right);
-    __ mov(ip, Operand(left, ASR, 31));
-    __ cmp(ip, Operand(scratch));
-    DeoptimizeIf(ne, instr->environment());
+  if (right_op->IsConstantOperand() && !can_overflow) {
+    // Use optimized code for specific constants.
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      __ cmp(left, Operand(0));
+      DeoptimizeIf(eq, instr->environment());
+    }
+
+    switch (constant) {
+      case -1:
+        __ rsb(result, left, Operand(0));
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          __ cmp(left, Operand(0));
+          DeoptimizeIf(mi, instr->environment());
+        }
+        __ mov(result, Operand(0));
+        break;
+      case 1:
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (IsPowerOf2(constant_abs) ||
+            IsPowerOf2(constant_abs - 1) ||
+            IsPowerOf2(constant_abs + 1)) {
+          if (IsPowerOf2(constant_abs)) {
+            int32_t shift = WhichPowerOf2(constant_abs);
+            __ mov(result, Operand(left, LSL, shift));
+          } else if (IsPowerOf2(constant_abs - 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs - 1);
+            __ add(result, left, Operand(left, LSL, shift));
+          } else if (IsPowerOf2(constant_abs + 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs + 1);
+            __ rsb(result, left, Operand(left, LSL, shift));
+          }
+
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  __ rsb(result, result, Operand(0));
+
+        } else {
+          // Generate standard code.
+          __ mov(ip, Operand(constant));
+          __ mul(result, left, ip);
+        }
+    }
+
   } else {
-    __ mul(left, left, right);
-  }
+    Register right = EmitLoadRegister(right_op, scratch);
+    if (bailout_on_minus_zero) {
+      __ orr(ToRegister(instr->TempAt(0)), left, right);
+    }
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // Bail out if the result is supposed to be negative zero.
-    Label done;
-    __ cmp(left, Operand(0));
-    __ b(ne, &done);
-    if (instr->InputAt(1)->IsConstantOperand()) {
-      if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
-        DeoptimizeIf(al, instr->environment());
-      }
+    if (can_overflow) {
+      // scratch:result = left * right.
+      __ smull(result, scratch, left, right);
+      __ cmp(scratch, Operand(result, ASR, 31));
+      DeoptimizeIf(ne, instr->environment());
     } else {
-      // Test the non-zero operand for negative sign.
+      __ mul(result, left, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      // Bail out if the result is supposed to be negative zero.
+      Label done;
+      __ cmp(result, Operand(0));
+      __ b(ne, &done);
       __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
       DeoptimizeIf(mi, instr->environment());
+      __ bind(&done);
     }
-    __ bind(&done);
   }
 }
 
 
 void LCodeGen::DoBitI(LBitI* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
-  Register result = ToRegister(left);
-  Operand right_operand(no_reg);
+  LOperand* left_op = instr->InputAt(0);
+  LOperand* right_op = instr->InputAt(1);
+  ASSERT(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
 
-  if (right->IsStackSlot() || right->IsArgument()) {
-    Register right_reg = EmitLoadRegister(right, ip);
-    right_operand = Operand(right_reg);
+  if (right_op->IsStackSlot() || right_op->IsArgument()) {
+    right = Operand(EmitLoadRegister(right_op, ip));
   } else {
-    ASSERT(right->IsRegister() || right->IsConstantOperand());
-    right_operand = ToOperand(right);
+    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
   }
 
   switch (instr->op()) {
     case Token::BIT_AND:
-      __ and_(result, ToRegister(left), right_operand);
+      __ and_(result, left, right);
       break;
     case Token::BIT_OR:
-      __ orr(result, ToRegister(left), right_operand);
+      __ orr(result, left, right);
       break;
     case Token::BIT_XOR:
-      __ eor(result, ToRegister(left), right_operand);
+      __ eor(result, left, right);
       break;
     default:
       UNREACHABLE();
@@ -1150,54 +1219,62 @@
 
 
 void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->InputAt(1);
+  Register left = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
   Register scratch = scratch0();
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
-  Register result = ToRegister(left);
-  if (right->IsRegister()) {
-    // Mask the right operand.
-    __ and_(scratch, ToRegister(right), Operand(0x1F));
+  if (right_op->IsRegister()) {
+    // Mask the right_op operand.
+    __ and_(scratch, ToRegister(right_op), Operand(0x1F));
     switch (instr->op()) {
       case Token::SAR:
-        __ mov(result, Operand(result, ASR, scratch));
+        __ mov(result, Operand(left, ASR, scratch));
         break;
       case Token::SHR:
         if (instr->can_deopt()) {
-          __ mov(result, Operand(result, LSR, scratch), SetCC);
+          __ mov(result, Operand(left, LSR, scratch), SetCC);
           DeoptimizeIf(mi, instr->environment());
         } else {
-          __ mov(result, Operand(result, LSR, scratch));
+          __ mov(result, Operand(left, LSR, scratch));
         }
         break;
       case Token::SHL:
-        __ mov(result, Operand(result, LSL, scratch));
+        __ mov(result, Operand(left, LSL, scratch));
         break;
       default:
         UNREACHABLE();
         break;
     }
   } else {
-    int value = ToInteger32(LConstantOperand::cast(right));
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
     switch (instr->op()) {
       case Token::SAR:
         if (shift_count != 0) {
-          __ mov(result, Operand(result, ASR, shift_count));
+          __ mov(result, Operand(left, ASR, shift_count));
+        } else {
+          __ Move(result, left);
         }
         break;
       case Token::SHR:
-        if (shift_count == 0 && instr->can_deopt()) {
-          __ tst(result, Operand(0x80000000));
-          DeoptimizeIf(ne, instr->environment());
+        if (shift_count != 0) {
+          __ mov(result, Operand(left, LSR, shift_count));
         } else {
-          __ mov(result, Operand(result, LSR, shift_count));
+          if (instr->can_deopt()) {
+            __ tst(left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr->environment());
+          }
+          __ Move(result, left);
         }
         break;
       case Token::SHL:
         if (shift_count != 0) {
-          __ mov(result, Operand(result, LSL, shift_count));
+          __ mov(result, Operand(left, LSL, shift_count));
+        } else {
+          __ Move(result, left);
         }
         break;
       default:
@@ -1211,16 +1288,16 @@
 void LCodeGen::DoSubI(LSubI* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
-  ASSERT(left->Equals(instr->result()));
+  LOperand* result = instr->result();
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   SBit set_cond = can_overflow ? SetCC : LeaveCC;
 
   if (right->IsStackSlot() || right->IsArgument()) {
     Register right_reg = EmitLoadRegister(right, ip);
-    __ sub(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+    __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
     ASSERT(right->IsRegister() || right->IsConstantOperand());
-    __ sub(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+    __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   }
 
   if (can_overflow) {
@@ -1239,7 +1316,7 @@
   ASSERT(instr->result()->IsDoubleRegister());
   DwVfpRegister result = ToDoubleRegister(instr->result());
   double v = instr->value();
-  __ vmov(result, v);
+  __ Vmov(result, v);
 }
 
 
@@ -1256,17 +1333,24 @@
 }
 
 
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
   Register result = ToRegister(instr->result());
   Register array = ToRegister(instr->InputAt(0));
-  __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset));
+  __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
 }
 
 
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
   Register result = ToRegister(instr->result());
-  Register array = ToRegister(instr->InputAt(0));
-  __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
 }
 
 
@@ -1274,15 +1358,16 @@
   Register input = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
   Register map = ToRegister(instr->TempAt(0));
-  ASSERT(input.is(result));
   Label done;
 
   // If the object is a smi return the object.
   __ tst(input, Operand(kSmiTagMask));
+  __ Move(result, input, eq);
   __ b(eq, &done);
 
   // If the object is not a value type, return the object.
   __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+  __ Move(result, input, ne);
   __ b(ne, &done);
   __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
 
@@ -1291,9 +1376,9 @@
 
 
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->Equals(instr->result()));
-  __ mvn(ToRegister(input), Operand(ToRegister(input)));
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ mvn(result, Operand(input));
 }
 
 
@@ -1311,16 +1396,16 @@
 void LCodeGen::DoAddI(LAddI* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
-  ASSERT(left->Equals(instr->result()));
+  LOperand* result = instr->result();
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   SBit set_cond = can_overflow ? SetCC : LeaveCC;
 
   if (right->IsStackSlot() || right->IsArgument()) {
     Register right_reg = EmitLoadRegister(right, ip);
-    __ add(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+    __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
     ASSERT(right->IsRegister() || right->IsConstantOperand());
-    __ add(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+    __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   }
 
   if (can_overflow) {
@@ -1332,30 +1417,31 @@
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
   DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+  DoubleRegister result = ToDoubleRegister(instr->result());
   switch (instr->op()) {
     case Token::ADD:
-      __ vadd(left, left, right);
+      __ vadd(result, left, right);
       break;
     case Token::SUB:
-      __ vsub(left, left, right);
+      __ vsub(result, left, right);
       break;
     case Token::MUL:
-      __ vmul(left, left, right);
+      __ vmul(result, left, right);
       break;
     case Token::DIV:
-      __ vdiv(left, left, right);
+      __ vdiv(result, left, right);
       break;
     case Token::MOD: {
       // Save r0-r3 on the stack.
       __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
 
-      __ PrepareCallCFunction(4, scratch0());
-      __ vmov(r0, r1, left);
-      __ vmov(r2, r3, right);
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ SetCallCDoubleArguments(left, right);
       __ CallCFunction(
-          ExternalReference::double_fp_operation(Token::MOD, isolate()), 4);
+          ExternalReference::double_fp_operation(Token::MOD, isolate()),
+          0, 2);
       // Move the result in the double result register.
-      __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
+      __ GetCFunctionDoubleResult(result);
 
       // Restore r0-r3.
       __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1373,7 +1459,7 @@
   ASSERT(ToRegister(instr->InputAt(1)).is(r0));
   ASSERT(ToRegister(instr->result()).is(r0));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ nop();  // Signals no inlined code.
 }
@@ -1410,7 +1496,7 @@
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  Representation r = instr->hydrogen()->representation();
+  Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
     Register reg = ToRegister(instr->InputAt(0));
     __ cmp(reg, Operand(0));
@@ -1426,97 +1512,112 @@
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->InputAt(0));
-    if (instr->hydrogen()->type().IsBoolean()) {
-      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-      __ cmp(reg, ip);
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
       EmitBranch(true_block, false_block, eq);
+    } else if (type.IsSmi()) {
+      __ cmp(reg, Operand(0));
+      EmitBranch(true_block, false_block, ne);
     } else {
       Label* true_label = chunk_->GetAssemblyLabel(true_block);
       Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      __ cmp(reg, ip);
-      __ b(eq, false_label);
-      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-      __ cmp(reg, ip);
-      __ b(eq, true_label);
-      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(reg, ip);
-      __ b(eq, false_label);
-      __ cmp(reg, Operand(0));
-      __ b(eq, false_label);
-      __ tst(reg, Operand(kSmiTagMask));
-      __ b(eq, true_label);
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
 
-      // Test double values. Zero and NaN are false.
-      Label call_stub;
-      DoubleRegister dbl_scratch = d0;
-      Register scratch = scratch0();
-      __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-      __ cmp(scratch, Operand(ip));
-      __ b(ne, &call_stub);
-      __ sub(ip, reg, Operand(kHeapObjectTag));
-      __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
-      __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
-      __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
-      __ b(ne, false_label);
-      __ b(true_label);
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ b(eq, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ b(eq, true_label);
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ b(eq, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ b(eq, false_label);
+      }
 
-      // The conversion stub doesn't cause garbage collections so it's
-      // safe to not record a safepoint after the call.
-      __ bind(&call_stub);
-      ToBooleanStub stub(reg);
-      RegList saved_regs = kJSCallerSaved | kCalleeSaved;
-      __ stm(db_w, sp, saved_regs);
-      __ CallStub(&stub);
-      __ cmp(reg, Operand(0));
-      __ ldm(ia_w, sp, saved_regs);
-      EmitBranch(true_block, false_block, ne);
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ cmp(reg, Operand(0));
+        __ b(eq, false_label);
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ tst(reg, Operand(kSmiTagMask));
+        DeoptimizeIf(eq, instr->environment());
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ tst(ip, Operand(1 << Map::kIsUndetectable));
+          __ b(ne, false_label);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+        __ b(ge, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+        __ b(ge, &not_string);
+        __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+        __ cmp(ip, Operand(0));
+        __ b(ne, true_label);
+        __ b(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ b(ne, &not_heap_number);
+        __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+        __ b(vs, false_label);  // NaN -> false.
+        __ b(eq, false_label);  // +0, -0 -> false.
+        __ b(true_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(al, instr->environment());
     }
   }
 }
 
 
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
   block = chunk_->LookupDestination(block);
   int next_block = GetNextEmittedBlock(current_block_);
   if (block != next_block) {
-    // Perform stack overflow check if this goto needs it before jumping.
-    if (deferred_stack_check != NULL) {
-      __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-      __ cmp(sp, Operand(ip));
-      __ b(hs, chunk_->GetAssemblyLabel(block));
-      __ jmp(deferred_stack_check->entry());
-      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
-    } else {
-      __ jmp(chunk_->GetAssemblyLabel(block));
-    }
+    __ jmp(chunk_->GetAssemblyLabel(block));
   }
 }
 
 
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
 void LCodeGen::DoGoto(LGoto* instr) {
-  class DeferredStackCheck: public LDeferredCode {
-   public:
-    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-   private:
-    LGoto* instr_;
-  };
-
-  DeferredStackCheck* deferred = NULL;
-  if (instr->include_stack_check()) {
-    deferred = new DeferredStackCheck(this, instr);
-  }
-  EmitGoto(instr->block_id(), deferred);
+  EmitGoto(instr->block_id());
 }
 
 
@@ -1553,34 +1654,6 @@
 }
 
 
-void LCodeGen::DoCmpID(LCmpID* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  LOperand* result = instr->result();
-  Register scratch = scratch0();
-
-  Label unordered, done;
-  if (instr->is_double()) {
-    // Compare left and right as doubles and load the
-    // resulting flags into the normal status register.
-    __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
-    // If a NaN is involved, i.e. the result is unordered (V set),
-    // jump to unordered to return false.
-    __ b(vs, &unordered);
-  } else {
-    EmitCmpI(left, right);
-  }
-
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
-  __ b(cc, &done);
-
-  __ bind(&unordered);
-  __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
@@ -1603,18 +1676,7 @@
 }
 
 
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
-  Register left = ToRegister(instr->InputAt(0));
-  Register right = ToRegister(instr->InputAt(1));
-  Register result = ToRegister(instr->result());
-
-  __ cmp(left, Operand(right));
-  __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->InputAt(0));
   Register right = ToRegister(instr->InputAt(1));
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1625,37 +1687,13 @@
 }
 
 
-void LCodeGen::DoIsNull(LIsNull* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(reg, ip);
-  if (instr->is_strict()) {
-    __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
-    __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-  } else {
-    Label true_value, false_value, done;
-    __ b(eq, &true_value);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(ip, reg);
-    __ b(eq, &true_value);
-    __ tst(reg, Operand(kSmiTagMask));
-    __ b(eq, &false_value);
-    // Check for undetectable objects by looking in the bit field in
-    // the map. The object has already been smi checked.
-    Register scratch = result;
-    __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
-    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
-    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
-    __ b(ne, &true_value);
-    __ bind(&false_value);
-    __ LoadRoot(result, Heap::kFalseValueRootIndex);
-    __ jmp(&done);
-    __ bind(&true_value);
-    __ LoadRoot(result, Heap::kTrueValueRootIndex);
-    __ bind(&done);
-  }
+  __ cmp(left, Operand(instr->hydrogen()->right()));
+  EmitBranch(true_block, false_block, eq);
 }
 
 
@@ -1680,8 +1718,7 @@
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     __ cmp(reg, ip);
     __ b(eq, true_label);
-    __ tst(reg, Operand(kSmiTagMask));
-    __ b(eq, false_label);
+    __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
     // the map. The object has already been smi checked.
     __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -1694,13 +1731,13 @@
 
 Condition LCodeGen::EmitIsObject(Register input,
                                  Register temp1,
-                                 Register temp2,
                                  Label* is_not_object,
                                  Label* is_object) {
+  Register temp2 = scratch0();
   __ JumpIfSmi(input, is_not_object);
 
-  __ LoadRoot(temp1, Heap::kNullValueRootIndex);
-  __ cmp(input, temp1);
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ cmp(input, temp2);
   __ b(eq, is_object);
 
   // Load map.
@@ -1712,37 +1749,16 @@
 
   // Load instance type and check that it is in object type range.
   __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
-  __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, is_not_object);
-  __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   return le;
 }
 
 
-void LCodeGen::DoIsObject(LIsObject* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register temp = scratch0();
-  Label is_false, is_true, done;
-
-  Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
-  __ b(true_cond, &is_true);
-
-  __ bind(&is_false);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ b(&done);
-
-  __ bind(&is_true);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
   Register temp1 = ToRegister(instr->TempAt(0));
-  Register temp2 = scratch0();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1750,25 +1766,12 @@
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
   Condition true_cond =
-      EmitIsObject(reg, temp1, temp2, false_label, true_label);
+      EmitIsObject(reg, temp1, false_label, true_label);
 
   EmitBranch(true_block, false_block, true_cond);
 }
 
 
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  Register result = ToRegister(instr->result());
-  Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
-  __ tst(input_reg, Operand(kSmiTagMask));
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  Label done;
-  __ b(eq, &done);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1779,7 +1782,22 @@
 }
 
 
-static InstanceType TestType(HHasInstanceType* instr) {
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ tst(temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, ne);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
@@ -1788,7 +1806,7 @@
 }
 
 
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == to) return eq;
@@ -1799,23 +1817,6 @@
 }
 
 
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  Label done;
-  __ tst(input, Operand(kSmiTagMask));
-  __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
-  __ b(eq, &done);
-  __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
-  Condition cond = BranchCondition(instr->hydrogen());
-  __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   Register scratch = scratch0();
   Register input = ToRegister(instr->InputAt(0));
@@ -1825,8 +1826,7 @@
 
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  __ tst(input, Operand(kSmiTagMask));
-  __ b(eq, false_label);
+  __ JumpIfSmi(input, false_label);
 
   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1846,20 +1846,6 @@
 }
 
 
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ ldr(scratch,
-         FieldMemOperand(input, String::kHashFieldOffset));
-  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
-  __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
 void LCodeGen::DoHasCachedArrayIndexAndBranch(
     LHasCachedArrayIndexAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
@@ -1885,28 +1871,28 @@
                                Register temp2) {
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
-  __ tst(input, Operand(kSmiTagMask));
-  __ b(eq, is_false);
-  __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+  __ JumpIfSmi(input, is_false);
+  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
   __ b(lt, is_false);
 
   // Map is now in temp.
   // Functions have class 'Function'.
-  __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ b(eq, is_true);
+    __ b(ge, is_true);
   } else {
-    __ b(eq, is_false);
+    __ b(ge, is_false);
   }
 
   // Check if the constructor in the map is a function.
   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
 
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
@@ -1932,27 +1918,6 @@
 }
 
 
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  ASSERT(input.is(result));
-  Handle<String> class_name = instr->hydrogen()->class_name();
-
-  Label done, is_true, is_false;
-
-  EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
-  __ b(ne, &is_false);
-
-  __ bind(&is_true);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&is_false);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = scratch0();
@@ -1996,20 +1961,6 @@
 }
 
 
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(r0));  // Object is in r0.
-  ASSERT(ToRegister(instr->InputAt(1)).is(r1));  // Function is in r1.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ cmp(r0, Operand(0));
-  EmitBranch(true_block, false_block, eq);
-}
-
-
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
    public:
@@ -2017,7 +1968,7 @@
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() {
-      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
 
     Label* map_check() { return &map_check_; }
@@ -2085,8 +2036,8 @@
 }
 
 
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                                Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
   Register result = ToRegister(instr->result());
   ASSERT(result.is(r0));
 
@@ -2118,6 +2069,9 @@
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LEnvironment* env = instr->deoptimization_environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   // Put the result value into the result register slot and
   // restore all registers.
   __ StoreToSafepointRegisterSlot(result, result);
@@ -2164,25 +2118,6 @@
 }
 
 
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  // The compare stub expects compare condition and the input operands
-  // reversed for GT and LTE.
-  Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
-  __ cmp(r0, Operand(0));
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 void LCodeGen::DoReturn(LReturn* instr) {
   if (FLAG_trace) {
     // Push the return value on the stack as the parameter.
@@ -2190,7 +2125,7 @@
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
   __ add(sp, sp, Operand(sp_delta));
@@ -2289,23 +2224,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-    __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2329,7 +2270,7 @@
       Label next;
       __ cmp(scratch, Operand(map));
       __ b(ne, &next);
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&next);
     }
@@ -2338,7 +2279,7 @@
     if (instr->hydrogen()->need_generic()) {
       Label generic;
       __ b(ne, &generic);
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&generic);
       __ mov(r2, Operand(name));
@@ -2346,7 +2287,7 @@
       CallCode(ic, RelocInfo::CODE_TARGET, instr);
     } else {
       DeoptimizeIf(ne, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2415,7 +2356,7 @@
 
   __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
   if (FLAG_debug_code) {
-    Label done;
+    Label done, fail;
     __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
     __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
     __ cmp(scratch, ip);
@@ -2423,11 +2364,18 @@
     __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
     __ cmp(scratch, ip);
     __ b(eq, &done);
-    __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
-    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-    __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
-    __ cmp(scratch, Operand(kExternalArrayTypeCount));
-    __ Check(cc, "Check for fast elements failed.");
+    // |scratch| still contains |input|'s map.
+    __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+    __ ubfx(scratch, scratch, Map::kElementsKindShift,
+            Map::kElementsKindBitCount);
+    __ cmp(scratch, Operand(FAST_ELEMENTS));
+    __ b(eq, &done);
+    __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ b(lt, &fail);
+    __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ b(le, &done);
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed.");
     __ bind(&done);
   }
 }
@@ -2465,58 +2413,129 @@
   Register key = EmitLoadRegister(instr->key(), scratch0());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
-  ASSERT(result.is(elements));
 
   // Load the result.
   __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
 
   // Check for the hole value.
-  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-  __ cmp(result, scratch);
-  DeoptimizeIf(eq, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, scratch);
+    DeoptimizeIf(eq, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int shift_size =
+      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  Operand operand = key_is_constant
+      ? Operand(constant_key * (1 << shift_size) +
+                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+      : Operand(key, LSL, shift_size);
+  __ add(elements, elements, operand);
+  if (!key_is_constant) {
+    __ add(elements, elements,
+           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // TODO(danno): If no hole check is required, there is no need to allocate
+    // elements into a temporary register, instead scratch can be used.
+    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+    __ cmp(scratch, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  __ vldr(result, elements, 0);
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
   Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
-    CpuFeatures::Scope scope(VFP3);
-    DwVfpRegister result(ToDoubleRegister(instr->result()));
-    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
-    __ vldr(result.low(), scratch0(), 0);
-    __ vcvt_f64_f32(result, result.low());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
   } else {
-    Register result(ToRegister(instr->result()));
-    switch (array_type) {
-      case kExternalByteArray:
-        __ ldrsb(result, MemOperand(external_pointer, key));
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    CpuFeatures::Scope scope(VFP3);
+    DwVfpRegister result = ToDoubleRegister(instr->result());
+    Operand operand = key_is_constant
+        ? Operand(constant_key * (1 << shift_size))
+        : Operand(key, LSL, shift_size);
+    __ add(scratch0(), external_pointer, operand);
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ vldr(result.low(), scratch0(), 0);
+      __ vcvt_f64_f32(result, result.low());
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ vldr(result, scratch0(), 0);
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand(key_is_constant
+        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+        : MemOperand(external_pointer, key, LSL, shift_size));
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ ldrsb(result, mem_operand);
         break;
-      case kExternalUnsignedByteArray:
-      case kExternalPixelArray:
-        __ ldrb(result, MemOperand(external_pointer, key));
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ ldrb(result, mem_operand);
         break;
-      case kExternalShortArray:
-        __ ldrsh(result, MemOperand(external_pointer, key, LSL, 1));
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ ldrsh(result, mem_operand);
         break;
-      case kExternalUnsignedShortArray:
-        __ ldrh(result, MemOperand(external_pointer, key, LSL, 1));
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ ldrh(result, mem_operand);
         break;
-      case kExternalIntArray:
-        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+      case EXTERNAL_INT_ELEMENTS:
+        __ ldr(result, mem_operand);
         break;
-      case kExternalUnsignedIntArray:
-        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ ldr(result, mem_operand);
         __ cmp(result, Operand(0x80000000));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
         // happens, and generate code that returns a double rather than int.
         DeoptimizeIf(cs, instr->environment());
         break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -2582,9 +2601,26 @@
   ASSERT(function.is(r1));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(r0));
 
-  // If the receiver is null or undefined, we have to pass the global object
-  // as a receiver.
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
   Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ ldr(scratch,
+         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(scratch,
+         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+  __ tst(scratch,
+         Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
+  __ b(ne, &receiver_ok);
+
+  // Do not transform the receiver to object for builtins.
+  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+  __ b(ne, &receiver_ok);
+
+  // Normal function. Replace undefined or null with global receiver.
   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   __ cmp(receiver, scratch);
   __ b(eq, &global_object);
@@ -2595,12 +2631,14 @@
   // Deoptimize if the receiver is not a JS object.
   __ tst(receiver, Operand(kSmiTagMask));
   DeoptimizeIf(eq, instr->environment());
-  __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
-  DeoptimizeIf(lo, instr->environment());
+  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+  DeoptimizeIf(lt, instr->environment());
   __ jmp(&receiver_ok);
 
   __ bind(&global_object);
   __ ldr(receiver, GlobalObjectOperand());
+  __ ldr(receiver,
+         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2631,16 +2669,14 @@
   __ bind(&invoke);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
   v8::internal::ParameterCount actual(receiver);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2656,6 +2692,12 @@
 }
 
 
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, cp);
@@ -2666,13 +2708,11 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result,
-         MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
-  __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+         MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
 }
 
 
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
-  Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
 }
@@ -2687,7 +2727,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2707,11 +2748,12 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(r5, call_kind);
   __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   __ Call(ip);
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2721,13 +2763,16 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
   __ mov(r1, Operand(instr->function()));
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
-  ASSERT(instr->InputAt(0)->Equals(instr->result()));
   Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
   Register scratch = scratch0();
 
   // Deoptimize if not a heap number.
@@ -2741,10 +2786,10 @@
   scratch = no_reg;
   __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   // Check the sign of the argument. If the argument is positive, just
-  // return it. We do not need to patch the stack since |input| and
-  // |result| are the same register and |input| would be restored
-  // unchanged by popping safepoint registers.
+  // return it.
   __ tst(exponent, Operand(HeapNumber::kSignMask));
+  // Move the input to the result if necessary.
+  __ Move(result, input);
   __ b(eq, &done);
 
   // Input is negative. Reverse its sign.
@@ -2784,7 +2829,7 @@
     __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
     __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
 
-    __ StoreToSafepointRegisterSlot(tmp1, input);
+    __ StoreToSafepointRegisterSlot(tmp1, result);
   }
 
   __ bind(&done);
@@ -2793,11 +2838,13 @@
 
 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
   Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
   __ cmp(input, Operand(0));
+  __ Move(result, input, pl);
   // We can make rsb conditional because the previous cmp instruction
   // will clear the V (overflow) flag and rsb won't set this flag
   // if input is positive.
-  __ rsb(input, input, Operand(0), SetCC, mi);
+  __ rsb(result, input, Operand(0), SetCC, mi);
   // Deoptimize on overflow.
   DeoptimizeIf(vs, instr->environment());
 }
@@ -2817,11 +2864,11 @@
     LUnaryMathOperation* instr_;
   };
 
-  ASSERT(instr->InputAt(0)->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsDouble()) {
     DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
-    __ vabs(input, input);
+    DwVfpRegister result = ToDoubleRegister(instr->result());
+    __ vabs(result, input);
   } else if (r.IsInteger32()) {
     EmitIntegerMathAbs(instr);
   } else {
@@ -2871,49 +2918,81 @@
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
-  Register scratch1 = scratch0();
-  Register scratch2 = result;
-  __ EmitVFPTruncate(kRoundToNearest,
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ vmov(result, input.high());
+  __ ubfx(scratch,
+          result,
+          HeapNumber::kExponentShift,
+          HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, Operand(0), LeaveCC, le);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ b(le, &check_sign_on_zero);
+  } else {
+    __ b(le, &done);
+  }
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
+  DeoptimizeIf(ge, instr->environment());
+
+  // Save the original sign for later comparison.
+  __ and_(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Vmov(double_scratch0(), 0.5);
+  __ vadd(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ vmov(result, double_scratch0().high());
+  __ eor(result, result, Operand(scratch), SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(mi, instr->environment());
+  } else {
+    __ mov(result, Operand(0), LeaveCC, mi);
+    __ b(mi, &done);
+  }
+
+  __ EmitVFPTruncate(kRoundToMinusInf,
                      double_scratch0().low(),
-                     input,
-                     scratch1,
-                     scratch2);
+                     double_scratch0(),
+                     result,
+                     scratch);
   DeoptimizeIf(ne, instr->environment());
   __ vmov(result, double_scratch0().low());
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Test for -0.
-    Label done;
     __ cmp(result, Operand(0));
     __ b(ne, &done);
-    __ vmov(scratch1, input.high());
-    __ tst(scratch1, Operand(HeapNumber::kSignMask));
+    __ bind(&check_sign_on_zero);
+    __ vmov(scratch, input.high());
+    __ tst(scratch, Operand(HeapNumber::kSignMask));
     DeoptimizeIf(ne, instr->environment());
-    __ bind(&done);
   }
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  ASSERT(ToDoubleRegister(instr->result()).is(input));
-  __ vsqrt(input, input);
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ vsqrt(result, input);
 }
 
 
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-  SwVfpRegister single_scratch = double_scratch0().low();
-  DoubleRegister double_scratch = double_scratch0();
-  ASSERT(ToDoubleRegister(instr->result()).is(input));
-
+  DoubleRegister result = ToDoubleRegister(instr->result());
   // Add +0 to convert -0 to +0.
-  __ mov(scratch, Operand(0));
-  __ vmov(single_scratch, scratch);
-  __ vcvt_f64_s32(double_scratch, single_scratch);
-  __ vadd(input, input, double_scratch);
-  __ vsqrt(input, input);
+  __ vadd(result, input, kDoubleRegZero);
+  __ vsqrt(result, result);
 }
 
 
@@ -2925,19 +3004,18 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   if (exponent_type.IsDouble()) {
     // Prepare arguments and call C function.
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, ToDoubleRegister(left));
-    __ vmov(r2, r3, ToDoubleRegister(right));
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left),
+                               ToDoubleRegister(right));
     __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 4);
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   } else if (exponent_type.IsInteger32()) {
     ASSERT(ToRegister(right).is(r0));
     // Prepare arguments and call C function.
-    __ PrepareCallCFunction(4, scratch);
-    __ mov(r2, ToRegister(right));
-    __ vmov(r0, r1, ToDoubleRegister(left));
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
     __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 4);
+        ExternalReference::power_double_int_function(isolate()), 1, 1);
   } else {
     ASSERT(exponent_type.IsTagged());
     ASSERT(instr->hydrogen()->left()->representation().IsDouble());
@@ -2967,11 +3045,10 @@
 
     // Prepare arguments and call C function.
     __ bind(&call);
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, ToDoubleRegister(left));
-    __ vmov(r2, r3, result_reg);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
     __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 4);
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   }
   // Store the result in the result register.
   __ GetCFunctionDoubleResult(result_reg);
@@ -3035,12 +3112,25 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(r1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
@@ -3050,10 +3140,11 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ mov(r2, Operand(instr->name()));
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, mode, instr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
@@ -3063,7 +3154,7 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3074,10 +3165,11 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP);
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ mov(r2, Operand(instr->name()));
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, mode, instr);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -3085,7 +3177,7 @@
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
   __ mov(r1, Operand(instr->target()));
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -3181,39 +3273,104 @@
 }
 
 
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  DwVfpRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  Label not_nan;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  Operand operand = key_is_constant
+      ? Operand(constant_key * (1 << shift_size) +
+                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+      : Operand(key, LSL, shift_size);
+  __ add(scratch, elements, operand);
+  if (!key_is_constant) {
+    __ add(scratch, scratch,
+           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  // Check for NaN. All NaNs must be canonicalized.
+  __ VFPCompareAndSetFlags(value, value);
+
+  // Only load canonical NaN if the comparison above set the overflow.
+  __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
+
+  __ bind(&not_nan);
+  __ vstr(value, scratch, 0);
+}
+
+
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
 
   Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     CpuFeatures::Scope scope(VFP3);
     DwVfpRegister value(ToDoubleRegister(instr->value()));
-    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
-    __ vcvt_f32_f64(double_scratch0().low(), value);
-    __ vstr(double_scratch0().low(), scratch0(), 0);
+    Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+                                    : Operand(key, LSL, shift_size));
+    __ add(scratch0(), external_pointer, operand);
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ vcvt_f32_f64(double_scratch0().low(), value);
+      __ vstr(double_scratch0().low(), scratch0(), 0);
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ vstr(value, scratch0(), 0);
+    }
   } else {
     Register value(ToRegister(instr->value()));
-    switch (array_type) {
-      case kExternalPixelArray:
-        // Clamp the value to [0..255].
-        __ Usat(value, 8, Operand(value));
-        __ strb(value, MemOperand(external_pointer, key));
+    MemOperand mem_operand(key_is_constant
+        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+        : MemOperand(external_pointer, key, LSL, shift_size));
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ strb(value, mem_operand);
         break;
-      case kExternalByteArray:
-      case kExternalUnsignedByteArray:
-        __ strb(value, MemOperand(external_pointer, key));
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ strh(value, mem_operand);
         break;
-      case kExternalShortArray:
-      case kExternalUnsignedShortArray:
-        __ strh(value, MemOperand(external_pointer, key, LSL, 1));
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ str(value, mem_operand);
         break;
-      case kExternalIntArray:
-      case kExternalUnsignedIntArray:
-        __ str(value, MemOperand(external_pointer, key, LSL, 2));
-        break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3233,6 +3390,14 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3243,97 +3408,82 @@
     LStringCharCodeAt* instr_;
   };
 
-  Register scratch = scratch0();
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  Label flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result, Operand(kStringRepresentationMask));
-  __ b(eq, &flat_string);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ tst(result, Operand(kIsIndirectStringMask));
+  __ b(eq, &check_sequential);
 
-  // Handle non-flat strings.
-  __ tst(result, Operand(kIsConsStringMask));
-  __ b(eq, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ tst(result, Operand(kSlicedNotConsMask));
+  __ b(eq, &cons_string);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
+  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
-  __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ bind(&cons_string);
+  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
   __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(scratch, ip);
+  __ cmp(result, ip);
   __ b(ne, deferred->entry());
   // Get the first of the two strings and load its instance type.
   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result, Operand(kStringRepresentationMask));
   __ b(ne, deferred->entry());
 
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ tst(result, Operand(kStringEncodingMask));
   __ b(ne, &ascii_string);
 
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ ldrh(result,
-            FieldMemOperand(string,
-                            SeqTwoByteString::kHeaderSize + 2 * const_index));
-  } else {
-    __ add(scratch,
-           string,
-           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-    __ ldrh(result, MemOperand(scratch, index, LSL, 1));
-  }
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  __ add(result,
+         string,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ ldrh(result, MemOperand(result, index, LSL, 1));
   __ jmp(&done);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ ldrb(result, FieldMemOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ add(scratch,
-           string,
-           Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-    __ ldrb(result, MemOperand(scratch, index));
-  }
+  __ add(result,
+         string,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ ldrb(result, MemOperand(result, index));
+
   __ bind(&done);
   __ bind(deferred->exit());
 }
@@ -3466,8 +3616,8 @@
 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
   Label slow;
   Register reg = ToRegister(instr->InputAt(0));
-  DoubleRegister dbl_scratch = d0;
-  SwVfpRegister flt_scratch = s0;
+  DoubleRegister dbl_scratch = double_scratch0();
+  SwVfpRegister flt_scratch = dbl_scratch.low();
 
   // Preserve the value of all registers.
   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -3561,10 +3711,13 @@
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
-    __ tst(ToRegister(input), Operand(kSmiTagMask));
-    DeoptimizeIf(ne, instr->environment());
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    __ SmiUntag(ToRegister(input), SetCC);
+    DeoptimizeIf(cs, instr->environment());
+  } else {
+    __ SmiUntag(ToRegister(input));
   }
-  __ SmiUntag(ToRegister(input));
 }
 
 
@@ -3573,14 +3726,13 @@
                                 bool deoptimize_on_undefined,
                                 LEnvironment* env) {
   Register scratch = scratch0();
-  SwVfpRegister flt_scratch = s0;
-  ASSERT(!result_reg.is(d0));
+  SwVfpRegister flt_scratch = double_scratch0().low();
+  ASSERT(!result_reg.is(double_scratch0()));
 
   Label load_smi, heap_number, done;
 
   // Smi check.
-  __ tst(input_reg, Operand(kSmiTagMask));
-  __ b(eq, &load_smi);
+  __ JumpIfSmi(input_reg, &load_smi);
 
   // Heap number map check.
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3641,6 +3793,12 @@
 
   Label done;
 
+  // The input was optimistically untagged; revert it.
+  // The carry flag is set when we reach this deferred code as we just executed
+  // SmiUntag(heap_object, SetCC)
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ adc(input_reg, input_reg, Operand(input_reg));
+
   // Heap number map check.
   __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -3713,13 +3871,12 @@
 
   DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
 
-  // Smi check.
-  __ tst(input_reg, Operand(kSmiTagMask));
-  __ b(ne, deferred->entry());
-
-  // Smi to int32 conversion
-  __ SmiUntag(input_reg);  // Untag smi.
-
+  // Optimistically untag the input.
+  // If the input is a HeapObject, SmiUntag will set the carry flag.
+  __ SmiUntag(input_reg, SetCC);
+  // Branch to deferred code if the input was tagged.
+  // The deferred code will take care of restoring the tag.
+  __ b(cs, deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3744,7 +3901,6 @@
   Register scratch1 = scratch0();
   Register scratch2 = ToRegister(instr->TempAt(0));
   DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
-  DwVfpRegister double_scratch = double_scratch0();
   SwVfpRegister single_scratch = double_scratch0().low();
 
   Label done;
@@ -3792,22 +3948,41 @@
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(first));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
-    DeoptimizeIf(ne, instr->environment());
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmp(scratch, Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr->environment());
+    } else {
+      DeoptimizeIf(lo, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmp(scratch, Operand(last));
+        DeoptimizeIf(hi, instr->environment());
+      }
+    }
   } else {
-    DeoptimizeIf(lo, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmp(scratch, Operand(last));
-      DeoptimizeIf(hi, instr->environment());
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ tst(scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+    } else {
+      __ and_(scratch, scratch, Operand(mask));
+      __ cmp(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr->environment());
     }
   }
 }
@@ -3832,6 +4007,59 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(factory()->heap_number_map()));
+  __ b(eq, &heap_number);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, Operand(factory()->undefined_value()));
+  DeoptimizeIf(ne, instr->environment());
+  __ mov(result_reg, Operand(0));
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ vldr(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(result_reg, input_reg);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result,
                               Handle<HeapObject> object) {
   if (heap()->InNewSpace(*object)) {
@@ -4009,29 +4237,6 @@
 }
 
 
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Label true_label;
-  Label false_label;
-  Label done;
-
-  Condition final_branch_condition = EmitTypeofIs(&true_label,
-                                                  &false_label,
-                                                  input,
-                                                  instr->type_literal());
-  __ b(final_branch_condition, &true_label);
-  __ bind(&false_label);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ b(&done);
-
-  __ bind(&true_label);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4075,6 +4280,10 @@
     __ CompareRoot(input, Heap::kFalseValueRootIndex);
     final_branch_condition = eq;
 
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    final_branch_condition = eq;
+
   } else if (type_name->Equals(heap()->undefined_symbol())) {
     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
     __ b(eq, true_label);
@@ -4087,17 +4296,21 @@
 
   } else if (type_name->Equals(heap()->function_symbol())) {
     __ JumpIfSmi(input, false_label);
-    __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
+    __ CompareObjectType(input, input, scratch,
+                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
     final_branch_condition = ge;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
-    __ CompareRoot(input, Heap::kNullValueRootIndex);
-    __ b(eq, true_label);
-    __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE);
-    __ b(lo, false_label);
-    __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE);
-    __ b(hs, false_label);
+    if (!FLAG_harmony_typeof) {
+      __ CompareRoot(input, Heap::kNullValueRootIndex);
+      __ b(eq, true_label);
+    }
+    __ CompareObjectType(input, input, scratch,
+                         FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ b(lt, false_label);
+    __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ b(gt, false_label);
     // Check for undetectable objects => false.
     __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
     __ tst(ip, Operand(1 << Map::kIsUndetectable));
@@ -4113,26 +4326,6 @@
 }
 
 
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
-  Register result = ToRegister(instr->result());
-  Label true_label;
-  Label false_label;
-  Label done;
-
-  EmitIsConstructCall(result, scratch0());
-  __ b(eq, &true_label);
-
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ b(&done);
-
-
-  __ bind(&true_label);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp1 = ToRegister(instr->TempAt(0));
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4162,9 +4355,29 @@
 }
 
 
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= Assembler::kInstrSize;
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  // No code for lazy bailout instruction. Used to capture environment after a
-  // call for populating the safepoint data with deoptimization data.
+  EnsureSpaceForLazyDeopt();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
@@ -4181,25 +4394,78 @@
   __ Push(object, key, strict);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
-  __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  Register obj = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(key, obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  // Perform stack overflow check.
-  Label ok;
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  __ cmp(sp, Operand(ip));
-  __ b(hs, &ok);
-  StackCheckStub stub;
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ bind(&ok);
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LStackCheck* instr_;
+  };
+
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &done);
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt();
+    __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(lo, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt();
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
 }
 
 
@@ -4214,12 +4480,14 @@
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(osr_pc_offset_ == -1);
   osr_pc_offset_ = masm()->pc_offset();
 }
 
 
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 092e7b7..0e34c9f 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -51,12 +51,14 @@
         current_instruction_(-1),
         instructions_(chunk->instructions()),
         deoptimizations_(4),
+        deopt_jump_table_(4),
         deoptimization_literals_(8),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
+        last_lazy_deopt_pc_(0),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -107,14 +109,15 @@
   void DoDeferredNumberTagI(LNumberTagI* instr);
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
-  void DoDeferredStackCheck(LGoto* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                        Label* map_check);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -146,7 +149,7 @@
   HGraph* graph() const { return chunk_->graph(); }
 
   Register scratch0() { return r9; }
-  DwVfpRegister double_scratch0() { return d0; }
+  DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
 
   int GetNextEmittedBlock(int block);
   LInstruction* GetNextInstruction();
@@ -158,8 +161,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -171,6 +174,7 @@
   bool GeneratePrologue();
   bool GenerateBody();
   bool GenerateDeferredCode();
+  bool GenerateDeoptJumpTable();
   bool GenerateSafepointTable();
 
   enum SafepointMode {
@@ -206,14 +210,16 @@
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr,
-                                  SafepointMode safepoint_mode);
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
 
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
   void AddToTranslation(Translation* translation,
@@ -242,22 +248,19 @@
   void RecordSafepoint(LPointerMap* pointers,
                        Safepoint::Kind kind,
                        int arguments,
-                       int deoptimization_index);
-  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
-  void RecordSafepoint(int deoptimization_index);
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
-                                    int deoptimization_index);
+                                    Safepoint::DeoptMode mode);
   void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
                                               int arguments,
-                                              int deoptimization_index);
+                                              Safepoint::DeoptMode mode);
   void RecordPosition(int position);
-  int LastSafepointEnd() {
-    return static_cast<int>(safepoints_.GetPcAfterGap());
-  }
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
-  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
@@ -276,7 +279,6 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsObject(Register input,
                          Register temp1,
-                         Register temp2,
                          Label* is_not_object,
                          Label* is_object);
 
@@ -284,10 +286,20 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+
+  struct JumpTableEntry {
+    explicit inline JumpTableEntry(Address entry)
+        : label(),
+          address(entry) { }
+    Label label;
+    Address address;
+  };
+
+  void EnsureSpaceForLazyDeopt();
 
   LChunk* const chunk_;
   MacroAssembler* const masm_;
@@ -297,6 +309,7 @@
   int current_instruction_;
   const ZoneList<LInstruction*>* instructions_;
   ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> deopt_jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -304,6 +317,7 @@
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
+  int last_lazy_deopt_pc_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index 02608a6..1cfdc79 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,7 +34,6 @@
 namespace internal {
 
 static const Register kSavedValueRegister = { 9 };
-static const DoubleRegister kSavedDoubleValueRegister = { 0 };
 
 LGapResolver::LGapResolver(LCodeGen* owner)
     : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
@@ -172,9 +171,9 @@
   } else if (source->IsStackSlot()) {
     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
-    __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+    __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
-    __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+    __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
   }
@@ -193,11 +192,9 @@
   } else if (saved_destination_->IsStackSlot()) {
     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
   } else if (saved_destination_->IsDoubleRegister()) {
-    __ vmov(cgen_->ToDoubleRegister(saved_destination_),
-            kSavedDoubleValueRegister);
+    __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
   } else if (saved_destination_->IsDoubleStackSlot()) {
-    __ vstr(kSavedDoubleValueRegister,
-            cgen_->ToMemOperand(saved_destination_));
+    __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
   }
@@ -235,8 +232,8 @@
           // ip is overwritten while saving the value to the destination.
           // Therefore we can't use ip.  It is OK if the read from the source
           // destroys ip, since that happens before the value is read.
-          __ vldr(kSavedDoubleValueRegister.low(), source_operand);
-          __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
+          __ vldr(kScratchDoubleReg.low(), source_operand);
+          __ vstr(kScratchDoubleReg.low(), destination_operand);
         } else {
           __ ldr(ip, source_operand);
           __ str(ip, destination_operand);
@@ -254,7 +251,6 @@
     } else {
       ASSERT(destination->IsStackSlot());
       ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
-      MemOperand destination_operand = cgen_->ToMemOperand(destination);
       __ mov(kSavedValueRegister, source_operand);
       __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
     }
@@ -265,8 +261,7 @@
       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
     } else {
       ASSERT(destination->IsDoubleStackSlot());
-      MemOperand destination_operand = cgen_->ToMemOperand(destination);
-      __ vstr(source_register, destination_operand);
+      __ vstr(source_register, cgen_->ToMemOperand(destination));
     }
 
   } else if (source->IsDoubleStackSlot()) {
@@ -288,8 +283,8 @@
         __ ldr(kSavedValueRegister, source_high_operand);
         __ str(kSavedValueRegister, destination_high_operand);
       } else {
-        __ vldr(kSavedDoubleValueRegister, source_operand);
-        __ vstr(kSavedDoubleValueRegister, destination_operand);
+        __ vldr(kScratchDoubleReg, source_operand);
+        __ vstr(kScratchDoubleReg, destination_operand);
       }
     }
   } else {
diff --git a/src/arm/lithium-gap-resolver-arm.h b/src/arm/lithium-gap-resolver-arm.h
index 334d292..9dd09c8 100644
--- a/src/arm/lithium-gap-resolver-arm.h
+++ b/src/arm/lithium-gap-resolver-arm.h
@@ -40,7 +40,6 @@
 
 class LGapResolver BASE_EMBEDDED {
  public:
-
   explicit LGapResolver(LCodeGen* owner);
 
   // Resolve a set of parallel moves, emitting assembler instructions.
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6a095d3..7a1f802 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -83,7 +83,7 @@
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond) {
 #if USE_BX
-  mov(ip, Operand(target, rmode), LeaveCC, cond);
+  mov(ip, Operand(target, rmode));
   bx(ip, cond);
 #else
   mov(pc, Operand(target, rmode), LeaveCC, cond);
@@ -91,7 +91,7 @@
 }
 
 
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
                           Condition cond) {
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
@@ -118,10 +118,8 @@
 void MacroAssembler::Call(Register target, Condition cond) {
   // Block constant pool for the call instruction sequence.
   BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
-  int pre_position = pc_offset();
-#endif
-
+  Label start;
+  bind(&start);
 #if USE_BLX
   blx(target, cond);
 #else
@@ -129,33 +127,29 @@
   mov(lr, Operand(pc), LeaveCC, cond);
   mov(pc, Operand(target), LeaveCC, cond);
 #endif
-
-#ifdef DEBUG
-  int post_position = pc_offset();
-  CHECK_EQ(pre_position + CallSize(target, cond), post_position);
-#endif
+  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
 }
 
 
 int MacroAssembler::CallSize(
-    intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+    Address target, RelocInfo::Mode rmode, Condition cond) {
   int size = 2 * kInstrSize;
   Instr mov_instr = cond | MOV | LeaveCC;
-  if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+  intptr_t immediate = reinterpret_cast<intptr_t>(target);
+  if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
     size += kInstrSize;
   }
   return size;
 }
 
 
-void MacroAssembler::Call(
-    intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+void MacroAssembler::Call(Address target,
+                          RelocInfo::Mode rmode,
+                          Condition cond) {
   // Block constant pool for the call instruction sequence.
   BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
-  int pre_position = pc_offset();
-#endif
-
+  Label start;
+  bind(&start);
 #if USE_BLX
   // On ARMv5 and after the recommended call sequence is:
   //  ldr ip, [pc, #...]
@@ -167,7 +161,7 @@
   // we have to do it explicitly.
   positions_recorder()->WriteRecordedPositions();
 
-  mov(ip, Operand(target, rmode), LeaveCC, cond);
+  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
   blx(ip, cond);
 
   ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@@ -175,59 +169,36 @@
   // Set lr for return at current pc + 8.
   mov(lr, Operand(pc), LeaveCC, cond);
   // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
-  mov(pc, Operand(target, rmode), LeaveCC, cond);
+  mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
   ASSERT(kCallTargetAddressOffset == kInstrSize);
 #endif
-
-#ifdef DEBUG
-  int post_position = pc_offset();
-  CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
+  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
 }
 
 
-int MacroAssembler::CallSize(
-    byte* target, RelocInfo::Mode rmode, Condition cond) {
-  return CallSize(reinterpret_cast<intptr_t>(target), rmode);
+int MacroAssembler::CallSize(Handle<Code> code,
+                             RelocInfo::Mode rmode,
+                             unsigned ast_id,
+                             Condition cond) {
+  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
 }
 
 
-void MacroAssembler::Call(
-    byte* target, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
-  int pre_position = pc_offset();
-#endif
-
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
-  Call(reinterpret_cast<intptr_t>(target), rmode, cond);
-
-#ifdef DEBUG
-  int post_position = pc_offset();
-  CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
-    Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
-  return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::Call(
-    Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
-  int pre_position = pc_offset();
-#endif
-
+void MacroAssembler::Call(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          unsigned ast_id,
+                          Condition cond) {
+  Label start;
+  bind(&start);
   ASSERT(RelocInfo::IsCodeTarget(rmode));
+  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+    SetRecordedAstId(ast_id);
+    rmode = RelocInfo::CODE_TARGET_WITH_ID;
+  }
   // 'code' is always generated ARM code, never THUMB code
-  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
-  int post_position = pc_offset();
-  CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+  Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+  ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
+            SizeOfCodeGeneratedSince(&start));
 }
 
 
@@ -274,14 +245,29 @@
 }
 
 
+void MacroAssembler::Push(Handle<Object> handle) {
+  mov(ip, Operand(handle));
+  push(ip);
+}
+
+
 void MacroAssembler::Move(Register dst, Handle<Object> value) {
   mov(dst, Operand(value));
 }
 
 
-void MacroAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
   if (!dst.is(src)) {
-    mov(dst, src);
+    mov(dst, src, LeaveCC, cond);
+  }
+}
+
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+  ASSERT(CpuFeatures::IsSupported(VFP3));
+  CpuFeatures::Scope scope(VFP3);
+  if (!dst.is(src)) {
+    vmov(dst, src);
   }
 }
 
@@ -297,7 +283,8 @@
              !src2.must_use_constant_pool() &&
              CpuFeatures::IsSupported(ARMv7) &&
              IsPowerOf2(src2.immediate() + 1)) {
-    ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
+    ubfx(dst, src1, 0,
+        WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
 
   } else {
     and_(dst, src1, src2, LeaveCC, cond);
@@ -405,31 +392,17 @@
 }
 
 
-void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
-  // Empty the const pool.
-  CheckConstPool(true, true);
-  add(pc, pc, Operand(index,
-                      LSL,
-                      Instruction::kInstrSizeLog2 - kSmiTagSize));
-  BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
-  nop();  // Jump table alignment.
-  for (int i = 0; i < targets.length(); i++) {
-    b(targets[i]);
-  }
-}
-
-
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond) {
-  ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
+  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
 }
 
 
 void MacroAssembler::StoreRoot(Register source,
                                Heap::RootListIndex index,
                                Condition cond) {
-  str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
+  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
 }
 
 
@@ -621,19 +594,36 @@
   ASSERT_EQ(0, dst1.code() % 2);
   ASSERT_EQ(dst1.code() + 1, dst2.code());
 
+  // V8 does not use this addressing mode, so the fallback code
+  // below doesn't support it yet.
+  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+
   // Generate two ldr instructions if ldrd is not available.
   if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     ldrd(dst1, dst2, src, cond);
   } else {
-    MemOperand src2(src);
-    src2.set_offset(src2.offset() + 4);
-    if (dst1.is(src.rn())) {
-      ldr(dst2, src2, cond);
-      ldr(dst1, src, cond);
-    } else {
-      ldr(dst1, src, cond);
-      ldr(dst2, src2, cond);
+    if ((src.am() == Offset) || (src.am() == NegOffset)) {
+      MemOperand src2(src);
+      src2.set_offset(src2.offset() + 4);
+      if (dst1.is(src.rn())) {
+        ldr(dst2, src2, cond);
+        ldr(dst1, src, cond);
+      } else {
+        ldr(dst1, src, cond);
+        ldr(dst2, src2, cond);
+      }
+    } else {  // PostIndex or NegPostIndex.
+      ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+      if (dst1.is(src.rn())) {
+        ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
+        ldr(dst1, src, cond);
+      } else {
+        MemOperand src2(src);
+        src2.set_offset(src2.offset() - 4);
+        ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
+        ldr(dst2, src2, cond);
+      }
     }
   }
 }
@@ -646,15 +636,26 @@
   ASSERT_EQ(0, src1.code() % 2);
   ASSERT_EQ(src1.code() + 1, src2.code());
 
+  // V8 does not use this addressing mode, so the fallback code
+  // below doesn't support it yet.
+  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+
   // Generate two str instructions if strd is not available.
   if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     strd(src1, src2, dst, cond);
   } else {
     MemOperand dst2(dst);
-    dst2.set_offset(dst2.offset() + 4);
-    str(src1, dst, cond);
-    str(src2, dst2, cond);
+    if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
+      dst2.set_offset(dst2.offset() + 4);
+      str(src1, dst, cond);
+      str(src2, dst2, cond);
+    } else {  // PostIndex or NegPostIndex.
+      ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+      dst2.set_offset(dst2.offset() - 4);
+      str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
+      str(src2, dst2, cond);
+    }
   }
 }
 
@@ -701,6 +702,23 @@
   vmrs(fpscr_flags, cond);
 }
 
+void MacroAssembler::Vmov(const DwVfpRegister dst,
+                          const double imm,
+                          const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation zero(0.0);
+  DoubleRepresentation value(imm);
+  // Handle special values first.
+  if (value.bits == zero.bits) {
+    vmov(dst, kDoubleRegZero, cond);
+  } else if (value.bits == minus_zero.bits) {
+    vneg(dst, kDoubleRegZero, cond);
+  } else {
+    vmov(dst, imm, cond);
+  }
+}
+
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   // r0-r3: preserved
@@ -742,9 +760,9 @@
   str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
-  mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   str(fp, MemOperand(ip));
-  mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   str(cp, MemOperand(ip));
 
   // Optionally save all double registers.
@@ -820,11 +838,11 @@
 
   // Clear top frame.
   mov(r3, Operand(0, RelocInfo::NONE));
-  mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   str(r3, MemOperand(ip));
 
   // Restore current context from top and clear it in debug mode.
-  mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   ldr(cp, MemOperand(ip));
 #ifdef DEBUG
   str(r3, MemOperand(ip));
@@ -839,7 +857,25 @@
 }
 
 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-  vmov(dst, r0, r1);
+  if (use_eabi_hardfloat()) {
+    Move(dst, d0);
+  } else {
+    vmov(dst, r0, r1);
+  }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be r5 to
+  // follow the calling convention which requires the call type to be
+  // in r5.
+  ASSERT(dst.is(r5));
+  if (call_kind == CALL_AS_FUNCTION) {
+    mov(dst, Operand(Smi::FromInt(1)));
+  } else {
+    mov(dst, Operand(Smi::FromInt(0)));
+  }
 }
 
 
@@ -849,7 +885,8 @@
                                     Register code_reg,
                                     Label* done,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -904,13 +941,13 @@
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
-      if (call_wrapper != NULL) {
-        call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
-      }
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      if (call_wrapper != NULL) call_wrapper->AfterCall();
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      SetCallKind(r5, call_kind);
+      Call(adaptor);
+      call_wrapper.AfterCall();
       b(done);
     } else {
+      SetCallKind(r5, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
@@ -922,17 +959,20 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
-                 call_wrapper);
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(r5, call_kind);
     Call(code);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(r5, call_kind);
     Jump(code);
   }
 
@@ -946,13 +986,17 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                CallKind call_kind) {
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(r5, call_kind);
     Call(code, rmode);
   } else {
+    SetCallKind(r5, call_kind);
     Jump(code, rmode);
   }
 
@@ -965,7 +1009,8 @@
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -982,13 +1027,14 @@
       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
@@ -1003,9 +1049,9 @@
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    InvokeCode(r3, expected, actual, flag);
+    InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
   } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
   }
 }
 
@@ -1023,9 +1069,9 @@
                                             Register scratch,
                                             Label* fail) {
   ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   b(lt, fail);
-  cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
+  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   b(gt, fail);
 }
 
@@ -1056,7 +1102,13 @@
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+
   // The pc (return address) is passed in register lr.
   if (try_location == IN_JAVASCRIPT) {
     if (type == TRY_CATCH_HANDLER) {
@@ -1064,14 +1116,10 @@
     } else {
       mov(r3, Operand(StackHandler::TRY_FINALLY));
     }
-    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
-           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
-           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
-    stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
+    stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
     // Save the current handler as the next handler.
-    mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+    mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
     ldr(r1, MemOperand(r3));
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
     push(r1);
     // Link this handler as the new current one.
     str(sp, MemOperand(r3));
@@ -1081,16 +1129,13 @@
     // The frame pointer does not point to a JS frame so we save NULL
     // for fp. We expect the code throwing an exception to check fp
     // before dereferencing it to restore the context.
-    mov(ip, Operand(0, RelocInfo::NONE));  // To save a NULL frame pointer.
-    mov(r6, Operand(StackHandler::ENTRY));
-    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
-           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
-           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
-    stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
+    mov(r5, Operand(StackHandler::ENTRY));  // State.
+    mov(r6, Operand(Smi::FromInt(0)));  // Indicates no context.
+    mov(r7, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
+    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
     // Save the current handler as the next handler.
-    mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+    mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
     ldr(r6, MemOperand(r7));
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
     push(r6);
     // Link this handler as the new current one.
     str(sp, MemOperand(r7));
@@ -1099,48 +1144,49 @@
 
 
 void MacroAssembler::PopTryHandler() {
-  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   pop(r1);
-  mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
   str(r1, MemOperand(ip));
 }
 
 
 void MacroAssembler::Throw(Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // r0 is expected to hold the exception.
   if (!value.is(r0)) {
     mov(r0, value);
   }
 
-  // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
   // Drop the sp to the top of the handler.
-  mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
 
-  // Restore the next handler and frame pointer, discard handler state.
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  // Restore the next handler.
   pop(r2);
   str(r2, MemOperand(r3));
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
 
-  // Before returning we restore the context from the frame pointer if
-  // not NULL.  The frame pointer is NULL in the exception handler of a
-  // JS entry frame.
-  cmp(fp, Operand(0, RelocInfo::NONE));
-  // Set cp to NULL if fp is NULL.
-  mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
-  // Restore cp otherwise.
-  ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+  // Restore context and frame pointer, discard state (r3).
+  ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
+
+  // If the handler is a JS frame, restore the context to the frame.
+  // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
+  // of them.
+  cmp(r3, Operand(StackHandler::ENTRY));
+  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+
 #ifdef DEBUG
   if (emit_debug_code()) {
     mov(lr, Operand(pc));
   }
 #endif
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   pop(pc);
 }
 
@@ -1148,15 +1194,19 @@
 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
                                       Register value) {
   // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // r0 is expected to hold the exception.
   if (!value.is(r0)) {
     mov(r0, value);
   }
 
   // Drop sp to the top stack handler.
-  mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
 
   // Unwind the handlers until the ENTRY handler is found.
@@ -1174,14 +1224,13 @@
   bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   pop(r2);
   str(r2, MemOperand(r3));
 
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
     ExternalReference external_caught(
-        Isolate::k_external_caught_exception_address, isolate());
+        Isolate::kExternalCaughtExceptionAddress, isolate());
     mov(r0, Operand(false, RelocInfo::NONE));
     mov(r2, Operand(external_caught));
     str(r0, MemOperand(r2));
@@ -1189,33 +1238,24 @@
     // Set pending exception and r0 to out of memory exception.
     Failure* out_of_memory = Failure::OutOfMemoryException();
     mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-    mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+    mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                       isolate())));
     str(r0, MemOperand(r2));
   }
 
   // Stack layout at this point. See also StackHandlerConstants.
   // sp ->   state (ENTRY)
+  //         cp
   //         fp
   //         lr
 
-  // Discard handler state (r2 is not used) and restore frame pointer.
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
-  // Before returning we restore the context from the frame pointer if
-  // not NULL.  The frame pointer is NULL in the exception handler of a
-  // JS entry frame.
-  cmp(fp, Operand(0, RelocInfo::NONE));
-  // Set cp to NULL if fp is NULL.
-  mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
-  // Restore cp otherwise.
-  ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+  // Restore context and frame pointer, discard state (r2).
+  ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
 #ifdef DEBUG
   if (emit_debug_code()) {
     mov(lr, Operand(pc));
   }
 #endif
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   pop(pc);
 }
 
@@ -1297,6 +1337,112 @@
 }
 
 
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiUntag(scratch);
+
+  // Xor original key with a seed.
+  eor(t0, t0, Operand(scratch));
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  mvn(scratch, Operand(t0));
+  add(t0, scratch, Operand(t0, LSL, 15));
+  // hash = hash ^ (hash >> 12);
+  eor(t0, t0, Operand(t0, LSR, 12));
+  // hash = hash + (hash << 2);
+  add(t0, t0, Operand(t0, LSL, 2));
+  // hash = hash ^ (hash >> 4);
+  eor(t0, t0, Operand(t0, LSR, 4));
+  // hash = hash * 2057;
+  mov(scratch, Operand(2057));
+  mul(t0, t0, scratch);
+  // hash = hash ^ (hash >> 16);
+  eor(t0, t0, Operand(t0, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+                                              Register elements,
+                                              Register key,
+                                              Register result,
+                                              Register t0,
+                                              Register t1,
+                                              Register t2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // t0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // t1 - used to hold the capacity mask of the dictionary
+  //
+  // t2 - used for the index into the dictionary.
+  Label done;
+
+  GetNumberHash(t0, t1);
+
+  // Compute the capacity mask.
+  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
+  sub(t1, t1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use t2 for index calculations and keep the hash intact in t0.
+    mov(t2, t0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(t2, t2, Operand(t1));
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
+
+    // Check if the key is identical to the name.
+    add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+    cmp(key, Operand(ip));
+    if (i != kProbes - 1) {
+      b(eq, &done);
+    } else {
+      b(ne, miss);
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a normal property.
+  // t2: elements + (index * kPointerSize)
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+  b(ne, miss);
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  ldr(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
 void MacroAssembler::AllocateInNewSpace(int object_size,
                                         Register result,
                                         Register scratch1,
@@ -1591,6 +1737,46 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kSlicedStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kSlicedAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
 void MacroAssembler::CompareObjectType(Register object,
                                        Register map,
                                        Register type_reg,
@@ -1616,12 +1802,22 @@
 }
 
 
+void MacroAssembler::CheckFastElements(Register map,
+                                       Register scratch,
+                                       Label* fail) {
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+  b(hi, fail);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1635,8 +1831,8 @@
                               Register scratch,
                               Heap::RootListIndex index,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1646,6 +1842,23 @@
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(ip, Operand(map));
+  cmp(scratch, ip);
+  Jump(success, RelocInfo::CODE_TARGET, eq);
+  bind(&fail);
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
@@ -1695,7 +1908,19 @@
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
+}
+
+
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Handle<Code> code(Code::cast(result));
+  Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
+  return result;
 }
 
 
@@ -1711,7 +1936,7 @@
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
   return result;
 }
 
@@ -2276,15 +2501,17 @@
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags,
-                                   CallWrapper* call_wrapper) {
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
   GetBuiltinEntry(r2, id);
-  if (flags == CALL_JS) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(r2));
+    SetCallKind(r5, CALL_AS_METHOD);
     Call(r2);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
-    ASSERT(flags == JUMP_JS);
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(r5, CALL_AS_METHOD);
     Jump(r2);
   }
 }
@@ -2368,6 +2595,9 @@
     LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
     cmp(elements, ip);
     b(eq, &ok);
+    LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
+    cmp(elements, ip);
+    b(eq, &ok);
     LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
     cmp(elements, ip);
     b(eq, &ok);
@@ -2430,12 +2660,9 @@
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
-    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
-    // Load the function context (which is the incoming, outer context).
-    ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     for (int i = 1; i < context_chain_length; i++) {
-      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
-      ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     }
   } else {
     // Slot is in the current function context.  Move it into the
@@ -2443,17 +2670,6 @@
     // cannot be allowed to destroy the context in esi).
     mov(dst, cp);
   }
-
-  // We should not have found a 'with' context by walking the context chain
-  // (i.e., the static scope chain and runtime context chain do not agree).
-  // A variable occurring in such a scope should have slot type LOOKUP and
-  // not CONTEXT.
-  if (emit_debug_code()) {
-    ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-    cmp(dst, ip);
-    Check(eq, "Yo dawg, I heard you liked function contexts "
-              "so I put function contexts in all your contexts");
-  }
 }
 
 
@@ -2475,7 +2691,7 @@
   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
     b(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2601,8 +2817,7 @@
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   and_(scratch1, first, Operand(second));
-  tst(scratch1, Operand(kSmiTagMask));
-  b(eq, failure);
+  JumpIfSmi(scratch1, failure);
   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
                                              second,
                                              scratch1,
@@ -2794,12 +3009,36 @@
 
 static const int kRegisterPassedArguments = 4;
 
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  int frame_alignment = ActivationFrameAlignment();
 
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  if (use_eabi_hardfloat()) {
+    // In the hard floating point calling convention, we can use
+    // all double registers to pass doubles.
+    if (num_double_arguments > DoubleRegister::kNumRegisters) {
+      stack_passed_words +=
+          2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+    }
+  } else {
+    // In the soft floating point calling convention, every double
+    // argument is passed using two registers.
+    num_reg_arguments += 2 * num_double_arguments;
+  }
   // Up to four simple arguments are passed in registers r0..r3.
-  int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
-                               0 : num_arguments - kRegisterPassedArguments;
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -2814,25 +3053,92 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+  if (use_eabi_hardfloat()) {
+    Move(d0, dreg);
+  } else {
+    vmov(r0, r1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
+  if (use_eabi_hardfloat()) {
+    if (dreg2.is(d0)) {
+      ASSERT(!dreg1.is(d1));
+      Move(d1, dreg2);
+      Move(d0, dreg1);
+    } else {
+      Move(d0, dreg1);
+      Move(d1, dreg2);
+    }
+  } else {
+    vmov(r0, r1, dreg1);
+    vmov(r2, r3, dreg2);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+                                             Register reg) {
+  if (use_eabi_hardfloat()) {
+    Move(d0, dreg);
+    Move(r0, reg);
+  } else {
+    Move(r2, reg);
+    vmov(r0, r1, dreg);
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(no_reg,
+                      function,
+                      ip,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                     Register scratch,
+                                     int num_reg_arguments,
+                                     int num_double_arguments) {
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, ip, num_arguments);
+  CallCFunction(function, num_arguments, 0);
 }
 
+
 void MacroAssembler::CallCFunction(Register function,
                                    Register scratch,
                                    int num_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_arguments);
+  CallCFunction(function, scratch, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
                                          ExternalReference function_reference,
                                          Register scratch,
-                                         int num_arguments) {
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -2861,9 +3167,9 @@
     function = scratch;
   }
   Call(function);
-  int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
-                               0 : num_arguments - kRegisterPassedArguments;
-  if (OS::ActivationFrameAlignment() > kPointerSize) {
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
+  if (ActivationFrameAlignment() > kPointerSize) {
     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
   } else {
     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
@@ -2891,6 +3197,55 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  Usat(output_reg, 8, Operand(input_reg));
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  Vmov(temp_double_reg, 0.0);
+  VFPCompareAndSetFlags(input_reg, temp_double_reg);
+  b(gt, &above_zero);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, Operand(0));
+  b(al, &done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  Vmov(temp_double_reg, 255.0);
+  VFPCompareAndSetFlags(input_reg, temp_double_reg);
+  b(le, &in_bounds);
+  mov(result_reg, Operand(255));
+  b(al, &done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  Vmov(temp_double_reg, 0.5);
+  vadd(temp_double_reg, input_reg, temp_double_reg);
+  vcvt_u32_f64(s0, temp_double_reg);
+  vmov(result_reg, s0);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  ldr(descriptors,
+      FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  bind(&not_smi);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8d817c0..0546e6a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,13 +29,11 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declaration.
-class CallWrapper;
-
 // ----------------------------------------------------------------------------
 // Static helper functions
 
@@ -53,13 +51,7 @@
 
 // Give alias names to registers
 const Register cp = { 8 };  // JavaScript context pointer
-const Register roots = { 10 };  // Roots array pointer.
-
-enum InvokeJSFlags {
-  CALL_JS,
-  JUMP_JS
-};
-
+const Register kRootRegister = { 10 };  // Roots array pointer.
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -98,16 +90,22 @@
 
   // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
-  void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
   static int CallSize(Register target, Condition cond = al);
   void Call(Register target, Condition cond = al);
-  static int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
-  void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
-  static int CallSize(Handle<Code> code,
+  static int CallSize(Address target,
                       RelocInfo::Mode rmode,
                       Condition cond = al);
-  void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  static int CallSize(Handle<Code> code,
+                      RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+                      unsigned ast_id = kNoASTId,
+                      Condition cond = al);
+  void Call(Handle<Code> code,
+            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            unsigned ast_id = kNoASTId,
+            Condition cond = al);
   void Ret(Condition cond = al);
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -144,11 +142,12 @@
             Condition cond = al);
 
   void Call(Label* target);
+
+  // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
-  // May do nothing if the registers are identical.
-  void Move(Register dst, Register src);
-  // Jumps to the label at the index given by the Smi in "index".
-  void SmiJumpTable(Register index, Vector<Label*> targets);
+  void Move(Register dst, Register src, Condition cond = al);
+  void Move(DoubleRegister dst, DoubleRegister src);
+
   // Load an object from the root table.
   void LoadRoot(Register destination,
                 Heap::RootListIndex index,
@@ -193,6 +192,9 @@
                    Register address,
                    Register scratch);
 
+  // Push a handle.
+  void Push(Handle<Object> handle);
+
   // Push two registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Condition cond = al) {
     ASSERT(!src1.is(src2));
@@ -312,6 +314,10 @@
                               const Register fpscr_flags,
                               const Condition cond = al);
 
+  void Vmov(const DwVfpRegister dst,
+            const double imm,
+            const Condition cond = al);
+
 
   // ---------------------------------------------------------------------------
   // Activation frames
@@ -344,32 +350,47 @@
                                     Register map,
                                     Register scratch);
 
+  void InitializeRootRegister() {
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    mov(kRootRegister, Operand(roots_address));
+  }
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in ecx. The method takes ecx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      CallKind call_kind);
 
   void IsObjectJSObjectType(Register heap_object,
                             Register map,
@@ -420,6 +441,17 @@
                               Register scratch,
                               Label* miss);
 
+  void GetNumberHash(Register t0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss,
+                                Register elements,
+                                Register key,
+                                Register result,
+                                Register t0,
+                                Register t1,
+                                Register t2);
+
+
   inline void MarkCode(NopMarkerTypes type) {
     nop(type);
   }
@@ -507,6 +539,16 @@
                                Register scratch1,
                                Register scratch2,
                                Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result,
+                                   Register length,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* gc_required);
+  void AllocateAsciiSlicedString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
 
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
@@ -561,14 +603,18 @@
 
   // Compare instance type in a map.  map contains a valid map object whose
   // object type should be compared with the given type.  This both
-  // sets the flags and leaves the object type in the type_reg register.  It
-  // leaves the heap object in the heap_object register unless the heap_object
-  // register is the same register as type_reg.
+  // sets the flags and leaves the object type in the type_reg register.
   void CompareInstanceType(Register map,
                            Register type_reg,
                            InstanceType type);
 
 
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map,
+                         Register scratch,
+                         Label* fail);
+
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
@@ -577,13 +623,24 @@
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
 
   void CheckMap(Register obj,
                 Register scratch,
                 Heap::RootListIndex index,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
 
   // Compare the object in a register to a value from the root list.
@@ -704,6 +761,11 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
+
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
@@ -742,15 +804,32 @@
                        int num_arguments,
                        int result_size);
 
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, non-register arguments must be stored in
   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
-  // are word sized.
+  // are word sized. If double arguments are used, this function assumes that
+  // all double arguments are stored before core registers; otherwise the
+  // correct alignment of the double values is not guaranteed.
   // Some compilers/platforms require the stack to be aligned when calling
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_arguments, Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
+
+  // There are two ways of passing double arguments on ARM, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
   // Calls a C function and cleans up the space for arguments allocated
   // by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -759,6 +838,12 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function, Register scratch,
+                     int num_reg_arguments,
+                     int num_double_arguments);
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
@@ -777,8 +862,8 @@
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flags,
-                     CallWrapper* call_wrapper = NULL);
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the code object for the given builtin in the target register and
   // setup the function in r1.
@@ -825,6 +910,15 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  // EABI variant for double arguments in use.
+  bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+    return true;
+#else
+    return false;
+#endif
+  }
+
   // ---------------------------------------------------------------------------
   // Number utilities
 
@@ -952,17 +1046,23 @@
                                  Register result);
 
 
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
                            Register scratch,
-                           int num_arguments);
+                           int num_reg_arguments,
+                           int num_double_arguments);
 
   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-  static int CallSize(intptr_t target,
-                      RelocInfo::Mode rmode,
-                      Condition cond = al);
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -971,7 +1071,8 @@
                       Register code_reg,
                       Label* done,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1032,21 +1133,6 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
-  CallWrapper() { }
-  virtual ~CallWrapper() { }
-  // Called just before emitting a call. Argument is the size of the generated
-  // call code.
-  virtual void BeforeCall(int call_size) = 0;
-  // Called just after emitting a call, i.e., at the return site for the call.
-  virtual void AfterCall() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 1c59823..cd76edb 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -899,13 +899,12 @@
       constant_offset - offset_of_pc_register_read;
     ASSERT(pc_offset_of_constant < 0);
     if (is_valid_memory_offset(pc_offset_of_constant)) {
-      masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+      Assembler::BlockConstPoolScope block_const_pool(masm_);
       __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
     } else {
       // Not a 12-bit offset, so it needs to be loaded from the constant
       // pool.
-      masm_->BlockConstPoolBefore(
-          masm_->pc_offset() + 2 * Assembler::kInstrSize);
+      Assembler::BlockConstPoolScope block_const_pool(masm_);
       __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
       __ ldr(r0, MemOperand(pc, r0));
     }
@@ -1035,12 +1034,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1049,7 +1049,7 @@
   MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
-    int delta = *code_handle - re_code;
+    int delta = code_handle->address() - re_code->address();
     // Overwrite the return address on the stack.
     *return_address += delta;
   }
@@ -1058,8 +1058,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1070,8 +1082,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1079,13 +1091,14 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
@@ -1185,8 +1198,7 @@
 
 void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
   __ CheckConstPool(false, false);
-  __ BlockConstPoolBefore(
-      masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+  Assembler::BlockConstPoolScope block_const_pool(masm_);
   backtrack_constant_pool_offset_ = masm_->pc_offset();
   for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
     __ emit(0);
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index d771e40..5c8ed06 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -28,6 +28,9 @@
 #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
 #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
 
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -113,6 +116,7 @@
   static int CheckStackGuardState(Address* return_address,
                                   Code* re_code,
                                   Address re_frame);
+
  private:
   // Offsets from frame_pointer() of function parameters and stored registers.
   static const int kFramePointer = 0;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index da554c2..6af5355 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -719,20 +719,21 @@
 }
 
 
-void Simulator::Initialize() {
-  if (Isolate::Current()->simulator_initialized()) return;
-  Isolate::Current()->set_simulator_initialized(true);
-  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
 }
 
 
-Simulator::Simulator() : isolate_(Isolate::Current()) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
     i_cache_ = new v8::internal::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
-  Initialize();
+  Initialize(isolate);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
@@ -848,17 +849,13 @@
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
-      Isolate::CurrentPerIsolateThreadData();
-  if (isolate_data == NULL) {
-    Isolate::EnterDefaultIsolate();
-    isolate_data = Isolate::CurrentPerIsolateThreadData();
-  }
+      isolate->FindOrAllocatePerThreadDataForThisThread();
   ASSERT(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
     // TODO(146): delete the simulator object when a thread/isolate goes away.
-    sim = new Simulator();
+    sim = new Simulator(isolate);
     isolate_data->set_simulator(sim);
   }
   return sim;
@@ -1009,26 +1006,74 @@
 }
 
 
-// For use in calls that take two double values, constructed from r0, r1, r2
-// and r3.
+// For use in calls that take two double values, constructed either
+// from r0-r3 or d0 and d1.
 void Simulator::GetFpArgs(double* x, double* y) {
-  // We use a char buffer to get around the strict-aliasing rules which
-  // otherwise allow the compiler to optimize away the copy.
-  char buffer[2 * sizeof(registers_[0])];
-  // Registers 0 and 1 -> x.
-  memcpy(buffer, registers_, sizeof(buffer));
-  memcpy(x, buffer, sizeof(buffer));
-  // Registers 2 and 3 -> y.
-  memcpy(buffer, registers_ + 2, sizeof(buffer));
-  memcpy(y, buffer, sizeof(buffer));
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+    *y = vfp_register[1];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+    // Registers 2 and 3 -> y.
+    memcpy(buffer, registers_ + 2, sizeof(*y));
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+// For use in calls that take one double value, constructed either
+// from r0 and r1 or d0.
+void Simulator::GetFpArgs(double* x) {
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+  }
 }
 
 
+// For use in calls that take one double value constructed either
+// from r0 and r1 or d0 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+    *y = registers_[1];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+    // Register 2 -> y.
+    memcpy(buffer, registers_ + 2, sizeof(*y));
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+
+// The return value is either in r0/r1 or d0.
 void Simulator::SetFpResult(const double& result) {
-  char buffer[2 * sizeof(registers_[0])];
-  memcpy(buffer, &result, sizeof(buffer));
-  // result -> registers 0 and 1.
-  memcpy(registers_, buffer, sizeof(buffer));
+  if (use_eabi_hardfloat()) {
+    char buffer[2 * sizeof(vfp_register[0])];
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to d0.
+    memcpy(vfp_register, buffer, sizeof(buffer));
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to r0 and r1.
+    memcpy(registers_, buffer, sizeof(buffer));
+  }
 }
 
 
@@ -1282,12 +1327,13 @@
 
 
 // Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right) {
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
   uint32_t uleft = static_cast<uint32_t>(left);
   uint32_t uright = static_cast<uint32_t>(right);
   uint32_t urest  = 0xffffffffU - uleft;
 
-  return (uright > urest);
+  return (uright > urest) ||
+         (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
 }
 
 
@@ -1684,27 +1730,92 @@
       int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
       int32_t arg4 = stack_pointer[0];
       int32_t arg5 = stack_pointer[1];
+      bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+      if (use_eabi_hardfloat()) {
+        // With the hard floating point calling convention, double
+        // arguments are passed in VFP registers. Fetch the arguments
+        // from there and call the builtin using soft floating point
+        // convention.
+        switch (redirection->type()) {
+        case ExternalReference::BUILTIN_FP_FP_CALL:
+        case ExternalReference::BUILTIN_COMPARE_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          arg2 = vfp_register[2];
+          arg3 = vfp_register[3];
+          break;
+        case ExternalReference::BUILTIN_FP_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          break;
+        case ExternalReference::BUILTIN_FP_INT_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          arg2 = get_register(0);
+          break;
+        default:
+          break;
+        }
+      }
       // This is dodgy but it works because the C entry stubs are never moved.
       // See comment in codegen-arm.cc and bug 1242173.
       int32_t saved_lr = get_register(lr);
       intptr_t external =
           reinterpret_cast<intptr_t>(redirection->external_function());
-      if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
-        SimulatorRuntimeFPCall target =
-            reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (fp_call) {
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
-          double x, y;
-          GetFpArgs(&x, &y);
-          PrintF("Call to host function at %p with args %f, %f",
-                 FUNCTION_ADDR(target), x, y);
+          SimulatorRuntimeFPCall target =
+              reinterpret_cast<SimulatorRuntimeFPCall>(external);
+          double dval0, dval1;
+          int32_t ival;
+          switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            GetFpArgs(&dval0, &dval1);
+            PrintF("Call to host function at %p with args %f, %f",
+                FUNCTION_ADDR(target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            GetFpArgs(&dval0);
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(target), dval0);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            GetFpArgs(&dval0, &ival);
+            PrintF("Call to host function at %p with args %f, %d",
+                FUNCTION_ADDR(target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+          }
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
           PrintF("\n");
         }
         CHECK(stack_aligned);
-        double result = target(arg0, arg1, arg2, arg3);
-        SetFpResult(result);
+        if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+          SimulatorRuntimeFPCall target =
+              reinterpret_cast<SimulatorRuntimeFPCall>(external);
+          double result = target(arg0, arg1, arg2, arg3);
+          SetFpResult(result);
+        } else {
+          SimulatorRuntimeCall target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+          int32_t lo_res = static_cast<int32_t>(result);
+          int32_t hi_res = static_cast<int32_t>(result >> 32);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned %08x\n", lo_res);
+          }
+          set_register(r0, lo_res);
+          set_register(r1, hi_res);
+        }
       } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
         SimulatorRuntimeDirectApiCall target =
             reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@@ -2209,8 +2320,15 @@
       }
 
       case ADC: {
-        Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
-        Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+        // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val + shifter_operand + GetCarry();
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+        }
         break;
       }
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index a16cae5..391ef69 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -68,7 +68,9 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
     return c_limit;
   }
 
@@ -143,7 +145,7 @@
     num_d_registers = 16
   };
 
-  Simulator();
+  explicit Simulator(Isolate* isolate);
   ~Simulator();
 
   // The currently executing Simulator instance. Potentially there can be one
@@ -179,7 +181,7 @@
   void Execute();
 
   // Call on program start.
-  static void Initialize();
+  static void Initialize(Isolate* isolate);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -200,6 +202,15 @@
   // below (bad_lr, end_sim_pc).
   bool has_bad_pc() const;
 
+  // EABI variant for double arguments in use.
+  bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+    return true;
+#else
+    return false;
+#endif
+  }
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
@@ -223,13 +234,17 @@
   void SetNZFlags(int32_t val);
   void SetCFlag(bool val);
   void SetVFlag(bool val);
-  bool CarryFrom(int32_t left, int32_t right);
+  bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
   bool BorrowFrom(int32_t left, int32_t right);
   bool OverflowFrom(int32_t alu_out,
                     int32_t left,
                     int32_t right,
                     bool addition);
 
+  inline int GetCarry() {
+    return c_flag_ ? 1 : 0;
+  };
+
   // Support for VFP.
   void Compute_FPSCR_Flags(double val1, double val2);
   void Copy_FPSCR_to_APSR();
@@ -306,9 +321,10 @@
       void* external_function,
       v8::internal::ExternalReference::Type type);
 
-  // For use in calls that take two double values, constructed from r0, r1, r2
-  // and r3.
+  // For use in calls that take double value arguments.
   void GetFpArgs(double* x, double* y);
+  void GetFpArgs(double* x);
+  void GetFpArgs(double* x, int32_t* y);
   void SetFpResult(const double& result);
   void TrashCallerSaveRegisters();
 
@@ -394,8 +410,9 @@
 // trouble down the line.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return Simulator::current(Isolate::Current())->StackLimit();
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 47d675b..f856592 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -95,12 +95,13 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register scratch0,
-                                             Register scratch1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -120,7 +121,7 @@
 
   // Check that receiver is a JSObject.
   __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
   __ b(lt, miss_label);
 
   // Load properties array.
@@ -136,71 +137,21 @@
   // Restore the temporarily used register.
   __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
 
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
 
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // scratch0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = scratch1;
-    // Capacity is smi 2^n.
-    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
-    __ sub(index, index, Operand(1));
-    __ and_(index, index, Operand(
-        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
-
-    Register entity_name = scratch1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    Register tmp = properties;
-    __ add(tmp, properties, Operand(index, LSL, 1));
-    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
-    ASSERT(!tmp.is(entity_name));
-    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
-    __ cmp(entity_name, tmp);
-    if (i != kProbes - 1) {
-      __ b(eq, &done);
-
-      // Stop if found the property.
-      __ cmp(entity_name, Operand(Handle<String>(name)));
-      __ b(eq, miss_label);
-
-      // Check if the entry name is not a symbol.
-      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
-      __ ldrb(entity_name,
-              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-      __ tst(entity_name, Operand(kIsSymbolMask));
-      __ b(eq, miss_label);
-
-      // Restore the properties.
-      __ ldr(properties,
-             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ b(ne, miss_label);
-    }
-  }
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
 }
 
 
@@ -238,8 +189,7 @@
   ASSERT(!extra2.is(no_reg));
 
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
   __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
@@ -331,8 +281,7 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -354,8 +303,7 @@
                                 Label* smi,
                                 Label* non_string_object) {
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, smi);
+  __ JumpIfSmi(receiver, smi);
 
   // Check that the object is a string.
   __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -430,8 +378,7 @@
   Label exit;
 
   // Check that the receiver isn't a smi.
-  __ tst(receiver_reg, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
+  __ JumpIfSmi(receiver_reg, miss_label);
 
   // Check that the map of the receiver hasn't changed.
   __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
@@ -480,8 +427,7 @@
     __ str(r0, FieldMemOperand(receiver_reg, offset));
 
     // Skip updating write barrier if storing a smi.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &exit);
+    __ JumpIfSmi(r0, &exit);
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
@@ -494,8 +440,7 @@
     __ str(r0, FieldMemOperand(scratch, offset));
 
     // Skip updating write barrier if storing a smi.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &exit);
+    __ JumpIfSmi(r0, &exit);
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
@@ -525,7 +470,8 @@
 static void GenerateCallFunction(MacroAssembler* masm,
                                  Object* object,
                                  const ParameterCount& arguments,
-                                 Label* miss) {
+                                 Label* miss,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r0: receiver
   //  -- r1: function to call
@@ -544,7 +490,10 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
 }
 
 
@@ -674,10 +623,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -805,8 +756,11 @@
                                                       arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -888,6 +842,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -1102,12 +1057,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
@@ -1199,8 +1159,7 @@
                                      String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1221,13 +1180,11 @@
                                         String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
+                  miss);
 
   // Return the constant value.
   __ mov(r0, Operand(Handle<Object>(value)));
@@ -1246,8 +1203,7 @@
                                                 String* name,
                                                 Label* miss) {
   // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1460,8 +1416,7 @@
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, miss);
+    __ JumpIfSmi(r0, miss);
   }
 
   // Check that the maps haven't changed.
@@ -1483,8 +1438,7 @@
     // the nice side effect that multiple closures based on the same
     // function can all use this call IC. Before we load through the
     // function, we have to verify that it still is a function.
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, miss);
+    __ JumpIfSmi(r1, miss);
     __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
     __ b(ne, miss);
 
@@ -1501,8 +1455,10 @@
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj = masm()->isolate()->stub_cache()->ComputeCallMiss(
-      arguments().immediate(), kind_);
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1527,14 +1483,13 @@
   // Get the receiver of the function from the stack into r0.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
   // Check that the receiver isn't a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r0, &miss);
 
   // Do the right check and compute the holder register.
   Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1594,8 +1549,11 @@
     __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
     // Check that the elements are in fast mode and writable.
-    __ CheckMap(elements, r0,
-                Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+    __ CheckMap(elements,
+                r0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
 
     if (argc == 1) {  // Otherwise fall through to call the builtin.
       Label exit, with_write_barrier, attempt_to_grow_elements;
@@ -1744,7 +1702,11 @@
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
   // Check that the elements are in fast mode and writable.
-  __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+  __ CheckMap(elements,
+              r0,
+              Heap::kFixedArrayMapRootIndex,
+              &call_builtin,
+              DONT_DO_SMI_CHECK);
 
   // Get the array's length into r4 and calculate new length.
   __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1815,7 +1777,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1899,7 +1863,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1988,8 +1954,7 @@
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &miss);
+    __ JumpIfSmi(r1, &miss);
 
     CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
                     &miss);
@@ -2006,8 +1971,7 @@
   // Check the code is a smi.
   Label slow;
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(code, Operand(kSmiTagMask));
-  __ b(ne, &slow);
+  __ JumpIfNotSmi(code, &slow);
 
   // Convert the smi code to uint16.
   __ and_(code, code, Operand(Smi::FromInt(0xffff)));
@@ -2023,7 +1987,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2086,7 +2050,7 @@
   __ Drop(argc + 1, eq);
   __ Ret(eq);
 
-  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
 
   Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
 
@@ -2171,7 +2135,7 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2209,8 +2173,7 @@
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &miss);
+    __ JumpIfSmi(r1, &miss);
 
     CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
                     &miss);
@@ -2247,7 +2210,7 @@
   // Check if the argument is a heap number and load its exponent and
   // sign.
   __ bind(&not_smi);
-  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
   __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -2273,7 +2236,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2299,6 +2262,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2312,8 +2276,7 @@
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss_before_stack_reserved);
+  __ JumpIfSmi(r1, &miss_before_stack_reserved);
 
   __ IncrementCounter(counters->call_const(), 1, r0, r3);
   __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
@@ -2367,8 +2330,7 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &miss);
+    __ JumpIfSmi(r1, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
@@ -2401,7 +2363,7 @@
       } else {
         // Check that the object is a two-byte string or a symbol.
         __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
-        __ b(hs, &miss);
+        __ b(ge, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
@@ -2418,8 +2380,7 @@
       } else {
         Label fast;
         // Check that the object is a smi or a heap number.
-        __ tst(r1, Operand(kSmiTagMask));
-        __ b(eq, &fast);
+        __ JumpIfSmi(r1, &fast);
         __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
         __ b(ne, &miss);
         __ bind(&fast);
@@ -2460,7 +2421,10 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2493,7 +2457,7 @@
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2);
+  CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2513,7 +2477,7 @@
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2571,15 +2535,19 @@
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
+    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
   }
 
   // Handle call cache miss.
@@ -2632,8 +2600,7 @@
   Label miss;
 
   // Check that the object isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r1, &miss);
 
   // Check that the map of the object hasn't changed.
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2680,8 +2647,7 @@
   Label miss;
 
   // Check that the object isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r1, &miss);
 
   // Check that the map of the object hasn't changed.
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2772,8 +2738,7 @@
   Label miss;
 
   // Check that receiver is not a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  __ JumpIfSmi(r0, &miss);
 
   // Check the maps of the full prototype chain.
   CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
@@ -2917,8 +2882,7 @@
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &miss);
+    __ JumpIfSmi(r0, &miss);
   }
 
   // Check that the map of the global has not changed.
@@ -3128,52 +3092,57 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r1,
+                 r2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
   //  -- r1    : receiver
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(r1, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss);
-
-  // Check that the map matches.
+  int receiver_count = receiver_maps->length();
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Handle<Map>(receiver->map())));
-  __ b(ne, &miss);
-
-  // Check that the key is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &miss);
-
-  // Get the elements array.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ AssertFastElements(r2);
-
-  // Check that the key is within bounds.
-  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(hs, &miss);
-
-  // Load the result and make sure it's not the hole.
-  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ ldr(r4,
-         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &miss);
-  __ mov(r0, r4);
-  __ Ret();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
+    __ cmp(r2, ip);
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+  }
 
   __ bind(&miss);
-  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3215,69 +3184,27 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
   //  -- r2    : receiver
   //  -- lr    : return address
   //  -- r3    : scratch
-  //  -- r4    : scratch (elements)
   // -----------------------------------
-  Label miss;
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r2,
+                 r3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
-  Register value_reg = r0;
-  Register key_reg = r1;
-  Register receiver_reg = r2;
-  Register scratch = r3;
-  Register elements_reg = r4;
-
-  // Check that the receiver isn't a smi.
-  __ tst(receiver_reg, Operand(kSmiTagMask));
-  __ b(eq, &miss);
-
-  // Check that the map matches.
-  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
-  __ b(ne, &miss);
-
-  // Check that the key is a smi.
-  __ tst(key_reg, Operand(kSmiTagMask));
-  __ b(ne, &miss);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ ldr(elements_reg,
-         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-  __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(Handle<Map>(factory()->fixed_array_map())));
-  __ b(ne, &miss);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-  } else {
-    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-  }
-  // Compare smis.
-  __ cmp(key_reg, scratch);
-  __ b(hs, &miss);
-
-  __ add(scratch,
-         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ str(value_reg,
-         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ RecordWrite(scratch,
-                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
-                 receiver_reg , elements_reg);
-
-  // value_reg (r0) is preserved.
-  // Done.
-  __ Ret();
-
-  __ bind(&miss);
-  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
@@ -3285,6 +3212,38 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(r2, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
+    __ cmp(r3, ip);
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
@@ -3311,8 +3270,7 @@
   // r1: constructor function
   // r7: undefined
   __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &generic_stub_call);
+  __ JumpIfSmi(r2, &generic_stub_call);
   __ CompareObjectType(r2, r3, r4, MAP_TYPE);
   __ b(ne, &generic_stub_call);
 
@@ -3429,86 +3387,138 @@
 }
 
 
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
-  switch (array_type) {
-    case kExternalByteArray:
-    case kExternalShortArray:
-    case kExternalIntArray:
-      return true;
-
-    case kExternalUnsignedByteArray:
-    case kExternalUnsignedShortArray:
-    case kExternalUnsignedIntArray:
-      return false;
-
-    default:
-      UNREACHABLE();
-      return false;
-  }
-}
+#undef __
+#define __ ACCESS_MASM(masm)
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+    MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
   //  -- r1     : receiver
   // -----------------------------------
-  Label slow, failed_allocation;
+  Label slow, miss_force_generic;
 
   Register key = r0;
   Register receiver = r1;
 
-  // Check that the object isn't a smi
-  __ JumpIfSmi(receiver, &slow);
+  __ JumpIfNotSmi(key, &miss_force_generic);
+  __ mov(r2, Operand(key, ASR, kSmiTagSize));
+  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
+  __ Ret();
+
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, r2, r3);
+
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+static bool IsElementTypeSigned(ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+      return true;
+
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      return false;
+
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      return false;
+  }
+  return false;
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow, failed_allocation;
+
+  Register key = r0;
+  Register receiver = r1;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
-
-  // Make sure that we've got the right map.
-  __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Handle<Map>(receiver_object->map())));
-  __ b(ne, &slow);
+  __ JumpIfNotSmi(key, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // r3: elements array
 
   // Check that the index is in range.
   __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
-  __ cmp(ip, Operand(key, ASR, kSmiTagSize));
+  __ cmp(key, ip);
   // Unsigned comparison catches both negative and too-large values.
-  __ b(lo, &slow);
+  __ b(hs, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
   // r3: base pointer of external storage
 
   // We are not untagging smi key and instead work with it
   // as if it was premultiplied by 2.
-  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
 
   Register value = r2;
-  switch (array_type) {
-    case kExternalByteArray:
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
       __ ldrsb(value, MemOperand(r3, key, LSR, 1));
       break;
-    case kExternalPixelArray:
-    case kExternalUnsignedByteArray:
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
       __ ldrb(value, MemOperand(r3, key, LSR, 1));
       break;
-    case kExternalShortArray:
+    case EXTERNAL_SHORT_ELEMENTS:
       __ ldrsh(value, MemOperand(r3, key, LSL, 0));
       break;
-    case kExternalUnsignedShortArray:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       __ ldrh(value, MemOperand(r3, key, LSL, 0));
       break;
-    case kExternalIntArray:
-    case kExternalUnsignedIntArray:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       __ ldr(value, MemOperand(r3, key, LSL, 1));
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         __ add(r2, r3, Operand(key, LSL, 1));
@@ -3517,18 +3527,36 @@
         __ ldr(value, MemOperand(r3, key, LSL, 1));
       }
       break;
-    default:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      if (CpuFeatures::IsSupported(VFP3)) {
+        CpuFeatures::Scope scope(VFP3);
+        __ add(r2, r3, Operand(key, LSL, 2));
+        __ vldr(d0, r2, 0);
+      } else {
+        __ add(r4, r3, Operand(key, LSL, 2));
+        // r4: pointer to the beginning of the double we want to load.
+        __ ldr(r2, MemOperand(r4, 0));
+        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
+      }
+      break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
       UNREACHABLE();
       break;
   }
 
   // For integer array types:
   // r2: value
-  // For floating-point array type
+  // For float array type:
   // s0: value (if VFP3 is supported)
   // r2: value (if VFP3 is not supported)
+  // For double array type:
+  // d0: value (if VFP3 is supported)
+  // r2/r3: value (if VFP3 is not supported)
 
-  if (array_type == kExternalIntArray) {
+  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
     // For the Int and UnsignedInt array types, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
@@ -3556,10 +3584,23 @@
       __ vstr(d0, r3, HeapNumber::kValueOffset);
       __ Ret();
     } else {
-      WriteInt32ToHeapNumberStub stub(value, r0, r3);
-      __ TailCallStub(&stub);
+      Register dst1 = r1;
+      Register dst2 = r3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              d0,
+                                              dst1,
+                                              dst2,
+                                              r9,
+                                              s0);
+      __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+      __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+      __ Ret();
     }
-  } else if (array_type == kExternalUnsignedIntArray) {
+  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
     // The test is different for unsigned int values. Since we need
     // the value to be in the range of a positive smi, we can't
     // handle either of the top two bits being set in the value.
@@ -3602,12 +3643,12 @@
 
       __ bind(&box_int_0);
       // Integer does not have leading zeros.
-      GenerateUInt2Double(masm(), hiword, loword, r4, 0);
+      GenerateUInt2Double(masm, hiword, loword, r4, 0);
       __ b(&done);
 
       __ bind(&box_int_1);
       // Integer has one leading zero.
-      GenerateUInt2Double(masm(), hiword, loword, r4, 1);
+      GenerateUInt2Double(masm, hiword, loword, r4, 1);
 
 
       __ bind(&done);
@@ -3624,7 +3665,7 @@
       __ mov(r0, r4);
       __ Ret();
     }
-  } else if (array_type == kExternalFloatArray) {
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
     if (CpuFeatures::IsSupported(VFP3)) {
@@ -3694,6 +3735,31 @@
       __ mov(r0, r3);
       __ Ret();
     }
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+      __ sub(r1, r2, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+      __ mov(r0, r2);
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+
+      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+      __ mov(r0, r4);
+      __ Ret();
+    }
 
   } else {
     // Tag integer as smi and return it.
@@ -3704,7 +3770,7 @@
   // Slow case, key and receiver still in r0 and r1.
   __ bind(&slow);
   __ IncrementCounter(
-      masm()->isolate()->counters()->keyed_load_external_array_slow(),
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
       1, r2, r3);
 
   // ---------- S t a t e --------------
@@ -3717,21 +3783,23 @@
 
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
-  return GetCode(flags);
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- r1     : key
   //  -- r2     : receiver
   //  -- lr     : return address
   // -----------------------------------
-  Label slow, check_heap_number;
+  Label slow, check_heap_number, miss_force_generic;
 
   // Register usage.
   Register value = r0;
@@ -3739,31 +3807,24 @@
   Register receiver = r2;
   // r3 mostly holds the elements array or the destination external array.
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
-  // Make sure that we've got the right map.
-  __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(receiver_object->map())));
-  __ b(ne, &slow);
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
-
   // Check that the index is in range
-  __ SmiUntag(r4, key);
   __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
-  __ cmp(r4, ip);
+  __ cmp(key, ip);
   // Unsigned comparison catches both negative and too-large values.
-  __ b(hs, &slow);
+  __ b(hs, &miss_force_generic);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // r3: external array.
-  // r4: key (integer).
-  if (array_type == kExternalPixelArray) {
+  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Double to pixel conversion is only implemented in the runtime for now.
     __ JumpIfNotSmi(value, &slow);
   } else {
@@ -3773,31 +3834,55 @@
   __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 
   // r3: base pointer of external storage.
-  // r4: key (integer).
   // r5: value (integer).
-  switch (array_type) {
-    case kExternalPixelArray:
+  switch (elements_kind) {
+    case EXTERNAL_PIXEL_ELEMENTS:
       // Clamp the value to [0..255].
       __ Usat(r5, 8, Operand(r5));
-      __ strb(r5, MemOperand(r3, r4, LSL, 0));
+      __ strb(r5, MemOperand(r3, key, LSR, 1));
       break;
-    case kExternalByteArray:
-    case kExternalUnsignedByteArray:
-      __ strb(r5, MemOperand(r3, r4, LSL, 0));
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ strb(r5, MemOperand(r3, key, LSR, 1));
       break;
-    case kExternalShortArray:
-    case kExternalUnsignedShortArray:
-      __ strh(r5, MemOperand(r3, r4, LSL, 1));
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ strh(r5, MemOperand(r3, key, LSL, 0));
       break;
-    case kExternalIntArray:
-    case kExternalUnsignedIntArray:
-      __ str(r5, MemOperand(r3, r4, LSL, 2));
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ str(r5, MemOperand(r3, key, LSL, 1));
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       // Perform int-to-float conversion and store to memory.
-      StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
+      __ SmiUntag(r4, key);
+      StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
       break;
-    default:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ add(r3, r3, Operand(key, LSL, 2));
+      // r3: effective address of the double element
+      FloatingPointHelper::Destination destination;
+      if (CpuFeatures::IsSupported(VFP3)) {
+        destination = FloatingPointHelper::kVFPRegisters;
+      } else {
+        destination = FloatingPointHelper::kCoreRegisters;
+      }
+      FloatingPointHelper::ConvertIntToDouble(
+          masm, r5, destination,
+          d0, r6, r7,  // These are: double_dst, dst1, dst2.
+          r4, s2);  // These are: scratch2, single_scratch.
+      if (destination == FloatingPointHelper::kVFPRegisters) {
+        CpuFeatures::Scope scope(VFP3);
+        __ vstr(d0, r3, 0);
+      } else {
+        __ str(r6, MemOperand(r3, 0));
+        __ str(r7, MemOperand(r3, Register::kSizeInBytes));
+      }
+      break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
       UNREACHABLE();
       break;
   }
@@ -3805,9 +3890,8 @@
   // Entry registers are intact, r0 holds the value which is the return value.
   __ Ret();
 
-  if (array_type != kExternalPixelArray) {
+  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     // r3: external array.
-    // r4: index (integer).
     __ bind(&check_heap_number);
     __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
     __ b(ne, &slow);
@@ -3815,7 +3899,6 @@
     __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 
     // r3: base pointer of external storage.
-    // r4: key (integer).
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
@@ -3823,51 +3906,46 @@
     if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
 
-      if (array_type == kExternalFloatArray) {
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         // vldr requires offset to be a multiple of 4 so we can not
         // include -kHeapObjectTag into it.
         __ sub(r5, r0, Operand(kHeapObjectTag));
         __ vldr(d0, r5, HeapNumber::kValueOffset);
-        __ add(r5, r3, Operand(r4, LSL, 2));
+        __ add(r5, r3, Operand(key, LSL, 1));
         __ vcvt_f32_f64(s0, d0);
         __ vstr(s0, r5, 0);
+      } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+        __ sub(r5, r0, Operand(kHeapObjectTag));
+        __ vldr(d0, r5, HeapNumber::kValueOffset);
+        __ add(r5, r3, Operand(key, LSL, 2));
+        __ vstr(d0, r5, 0);
       } else {
-        // Need to perform float-to-int conversion.
-        // Test for NaN or infinity (both give zero).
-        __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
         // Hoisted load.  vldr requires offset to be a multiple of 4 so we can
         // not include -kHeapObjectTag into it.
         __ sub(r5, value, Operand(kHeapObjectTag));
         __ vldr(d0, r5, HeapNumber::kValueOffset);
+        __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
 
-        __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-        // NaNs and Infinities have all-one exponents so they sign extend to -1.
-        __ cmp(r6, Operand(-1));
-        __ mov(r5, Operand(0), LeaveCC, eq);
-
-        // Not infinity or NaN simply convert to int.
-        if (IsElementTypeSigned(array_type)) {
-          __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
-        } else {
-          __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
-        }
-        __ vmov(r5, s0, ne);
-
-        switch (array_type) {
-          case kExternalByteArray:
-          case kExternalUnsignedByteArray:
-            __ strb(r5, MemOperand(r3, r4, LSL, 0));
+        switch (elements_kind) {
+          case EXTERNAL_BYTE_ELEMENTS:
+          case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+            __ strb(r5, MemOperand(r3, key, LSR, 1));
             break;
-          case kExternalShortArray:
-          case kExternalUnsignedShortArray:
-            __ strh(r5, MemOperand(r3, r4, LSL, 1));
+          case EXTERNAL_SHORT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+            __ strh(r5, MemOperand(r3, key, LSL, 0));
             break;
-          case kExternalIntArray:
-          case kExternalUnsignedIntArray:
-            __ str(r5, MemOperand(r3, r4, LSL, 2));
+          case EXTERNAL_INT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+            __ str(r5, MemOperand(r3, key, LSL, 1));
             break;
-          default:
+          case EXTERNAL_PIXEL_ELEMENTS:
+          case EXTERNAL_FLOAT_ELEMENTS:
+          case EXTERNAL_DOUBLE_ELEMENTS:
+          case FAST_ELEMENTS:
+          case FAST_DOUBLE_ELEMENTS:
+          case DICTIONARY_ELEMENTS:
+          case NON_STRICT_ARGUMENTS_ELEMENTS:
             UNREACHABLE();
             break;
         }
@@ -3881,7 +3959,7 @@
       __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
       __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
 
-      if (array_type == kExternalFloatArray) {
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         Label done, nan_or_infinity_or_zero;
         static const int kMantissaInHiWordShift =
             kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
@@ -3921,7 +3999,7 @@
         __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
 
         __ bind(&done);
-        __ str(r5, MemOperand(r3, r4, LSL, 2));
+        __ str(r5, MemOperand(r3, key, LSL, 1));
         // Entry registers are intact, r0 holds the value which is the return
         // value.
         __ Ret();
@@ -3933,8 +4011,14 @@
         __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
         __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
         __ b(&done);
+      } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+        __ add(r7, r3, Operand(key, LSL, 2));
+        // r7: effective address of destination element.
+        __ str(r6, MemOperand(r7, 0));
+        __ str(r5, MemOperand(r7, Register::kSizeInBytes));
+        __ Ret();
       } else {
-        bool is_signed_type = IsElementTypeSigned(array_type);
+        bool is_signed_type = IsElementTypeSigned(elements_kind);
         int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
         int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
 
@@ -3981,20 +4065,26 @@
         __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
 
         __ bind(&done);
-        switch (array_type) {
-          case kExternalByteArray:
-          case kExternalUnsignedByteArray:
-            __ strb(r5, MemOperand(r3, r4, LSL, 0));
+        switch (elements_kind) {
+          case EXTERNAL_BYTE_ELEMENTS:
+          case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+            __ strb(r5, MemOperand(r3, key, LSR, 1));
             break;
-          case kExternalShortArray:
-          case kExternalUnsignedShortArray:
-            __ strh(r5, MemOperand(r3, r4, LSL, 1));
+          case EXTERNAL_SHORT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+            __ strh(r5, MemOperand(r3, key, LSL, 0));
             break;
-          case kExternalIntArray:
-          case kExternalUnsignedIntArray:
-            __ str(r5, MemOperand(r3, r4, LSL, 2));
+          case EXTERNAL_INT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+            __ str(r5, MemOperand(r3, key, LSL, 1));
             break;
-          default:
+          case EXTERNAL_PIXEL_ELEMENTS:
+          case EXTERNAL_FLOAT_ELEMENTS:
+          case EXTERNAL_DOUBLE_ELEMENTS:
+          case FAST_ELEMENTS:
+          case FAST_DOUBLE_ELEMENTS:
+          case DICTIONARY_ELEMENTS:
+          case NON_STRICT_ARGUMENTS_ELEMENTS:
             UNREACHABLE();
             break;
         }
@@ -4002,28 +4092,334 @@
     }
   }
 
-  // Slow case: call runtime.
+  // Slow case, key and receiver still in r0 and r1.
   __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, r2, r3);
 
-  // Entry registers are intact.
   // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
   //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
   // -----------------------------------
 
-  // Push receiver, key and value for runtime call.
-  __ Push(r2, r1, r0);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
 
-  __ mov(r1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-  __ mov(r0, Operand(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
-  __ Push(r1, r0);
 
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
 
-  return GetCode(flags);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(r0, &miss_force_generic);
+
+  // Get the elements array.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ AssertFastElements(r2);
+
+  // Check that the key is within bounds.
+  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(hs, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ ldr(r4,
+         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &miss_force_generic);
+  __ mov(r0, r4);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  Register key_reg = r0;
+  Register receiver_reg = r1;
+  Register elements_reg = r2;
+  Register heap_number_reg = r2;
+  Register indexed_double_offset = r3;
+  Register scratch = r4;
+  Register scratch2 = r5;
+  Register scratch3 = r6;
+  Register heap_number_map = r7;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Get the elements array.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+  // Check that the key is within bounds.
+  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  __ cmp(key_reg, Operand(scratch));
+  __ b(hs, &miss_force_generic);
+
+  // Load the upper word of the double in the fixed array and test for NaN.
+  __ add(indexed_double_offset, elements_reg,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  __ b(&miss_force_generic, eq);
+
+  // Non-NaN. Allocate a new heap number and copy the double value into it.
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+                        heap_number_map, &slow_allocate_heapnumber);
+
+  // Don't need to reload the upper 32 bits of the double, it's already in
+  // scratch.
+  __ str(scratch, FieldMemOperand(heap_number_reg,
+                                  HeapNumber::kExponentOffset));
+  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
+                                  FixedArray::kHeaderSize));
+  __ str(scratch, FieldMemOperand(heap_number_reg,
+                                  HeapNumber::kMantissaOffset));
+
+  __ mov(r0, heap_number_reg);
+  __ Ret();
+
+  __ bind(&slow_allocate_heapnumber);
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label miss_force_generic;
+
+  Register value_reg = r0;
+  Register key_reg = r1;
+  Register receiver_reg = r2;
+  Register scratch = r3;
+  Register elements_reg = r4;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss_force_generic);
+
+  __ add(scratch,
+         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ str(value_reg,
+         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ RecordWrite(scratch,
+                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+                 receiver_reg , elements_reg);
+
+  // value_reg (r0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+    MacroAssembler* masm,
+    bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  //  -- r4    : scratch
+  //  -- r5    : scratch
+  // -----------------------------------
+  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+
+  Register value_reg = r0;
+  Register key_reg = r1;
+  Register receiver_reg = r2;
+  Register scratch = r3;
+  Register elements_reg = r4;
+  Register mantissa_reg = r5;
+  Register exponent_reg = r6;
+  Register scratch4 = r7;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ ldr(scratch,
+           FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis, unsigned compare catches both negative and out-of-bound
+  // indexes.
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss_force_generic);
+
+  // Handle smi values specially.
+  __ JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  __ CheckMap(value_reg,
+              scratch,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  __ cmp(exponent_reg, scratch);
+  __ b(ge, &maybe_nan);
+
+  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  __ bind(&have_double_value);
+  __ add(scratch, elements_reg,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ str(exponent_reg, FieldMemOperand(scratch, offset));
+  __ Ret();
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ b(gt, &is_nan);
+  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  __ cmp(mantissa_reg, Operand(0));
+  __ b(eq, &have_double_value);
+  __ bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  __ jmp(&have_double_value);
+
+  __ bind(&smi_value);
+  __ add(scratch, elements_reg,
+         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ add(scratch, scratch,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  // scratch is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(VFP3)) {
+    destination = FloatingPointHelper::kVFPRegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  __ SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(
+      masm,
+      untagged_value,
+      destination,
+      d0,
+      mantissa_reg,
+      exponent_reg,
+      scratch4,
+      s2);
+  if (destination == FloatingPointHelper::kVFPRegisters) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vstr(d0, scratch, 0);
+  } else {
+    __ str(mantissa_reg, MemOperand(scratch, 0));
+    __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+  }
+  __ Ret();
+
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/array.js b/src/array.js
index 6ed1476..98fe3ac 100644
--- a/src/array.js
+++ b/src/array.js
@@ -67,6 +67,25 @@
 }
 
 
+function SparseJoinWithSeparator(array, len, convert, separator) {
+  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+  var totalLength = 0;
+  var elements = new InternalArray(keys.length * 2);
+  var previousKey = -1;
+  for (var i = 0; i < keys.length; i++) {
+    var key = keys[i];
+    if (key != previousKey) {  // keys may contain duplicates.
+      var e = array[key];
+      if (!IS_STRING(e)) e = convert(e);
+      elements[i * 2] = key;
+      elements[i * 2 + 1] = e;
+      previousKey = key;
+    }
+  }
+  return %SparseJoinWithSeparator(elements, len, separator);
+}
+
+
 // Optimized for sparse arrays if separator is ''.
 function SparseJoin(array, len, convert) {
   var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
@@ -110,8 +129,12 @@
 
   // Attempt to convert the elements.
   try {
-    if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
-      return SparseJoin(array, length, convert);
+    if (UseSparseVariant(array, length, is_array)) {
+      if (separator.length == 0) {
+        return SparseJoin(array, length, convert);
+      } else {
+        return SparseJoinWithSeparator(array, length, convert, separator);
+      }
     }
 
     // Fast case for one-element arrays.
@@ -129,10 +152,8 @@
       var elements_length = 0;
       for (var i = 0; i < length; i++) {
         var e = array[i];
-        if (!IS_UNDEFINED(e)) {
-          if (!IS_STRING(e)) e = convert(e);
-          elements[elements_length++] = e;
-        }
+        if (!IS_STRING(e)) e = convert(e);
+        elements[elements_length++] = e;
       }
       elements.length = elements_length;
       var result = %_FastAsciiArrayJoin(elements, '');
@@ -151,11 +172,12 @@
     } else {
       for (var i = 0; i < length; i++) {
         var e = array[i];
-        if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
-        else {
-          if (!IS_STRING(e)) e = convert(e);
-          elements[i] = e;
+        if (IS_NUMBER(e)) {
+          e = %_NumberToString(e);
+        } else if (!IS_STRING(e)) {
+          e = convert(e);
         }
+        elements[i] = e;
       }
     }
     var result = %_FastAsciiArrayJoin(elements, separator);
@@ -186,7 +208,7 @@
     // Call ToString if toLocaleString is not a function.
     // See issue 877615.
     var e_obj = ToObject(e);
-    if (IS_FUNCTION(e_obj.toLocaleString))
+    if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
       return ToString(e_obj.toLocaleString());
     else
       return ToString(e);
@@ -375,6 +397,11 @@
 
 
 function ArrayJoin(separator) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.join"]);
+  }
+
   if (IS_UNDEFINED(separator)) {
     separator = ',';
   } else if (!IS_STRING(separator)) {
@@ -391,6 +418,11 @@
 // Removes the last element from the array and returns it. See
 // ECMA-262, section 15.4.4.6.
 function ArrayPop() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.pop"]);
+  }
+
   var n = TO_UINT32(this.length);
   if (n == 0) {
     this.length = n;
@@ -407,6 +439,11 @@
 // Appends the arguments to the end of the array and returns the new
 // length of the array. See ECMA-262, section 15.4.4.7.
 function ArrayPush() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.push"]);
+  }
+
   var n = TO_UINT32(this.length);
   var m = %_ArgumentsLength();
   for (var i = 0; i < m; i++) {
@@ -418,6 +455,11 @@
 
 
 function ArrayConcat(arg1) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.concat"]);
+  }
+
   var arg_count = %_ArgumentsLength();
   var arrays = new InternalArray(1 + arg_count);
   arrays[0] = this;
@@ -474,6 +516,11 @@
 
 
 function ArrayReverse() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reverse"]);
+  }
+
   var j = TO_UINT32(this.length) - 1;
 
   if (UseSparseVariant(this, j, IS_ARRAY(this))) {
@@ -505,6 +552,11 @@
 
 
 function ArrayShift() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.shift"]);
+  }
+
   var len = TO_UINT32(this.length);
 
   if (len === 0) {
@@ -526,6 +578,11 @@
 
 
 function ArrayUnshift(arg1) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.unshift"]);
+  }
+
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
@@ -545,6 +602,11 @@
 
 
 function ArraySlice(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.slice"]);
+  }
+
   var len = TO_UINT32(this.length);
   var start_i = TO_INTEGER(start);
   var end_i = len;
@@ -569,7 +631,9 @@
 
   if (end_i < start_i) return result;
 
-  if (IS_ARRAY(this)) {
+  if (IS_ARRAY(this) &&
+      (end_i > 1000) &&
+      (%EstimateNumberOfElements(this) < end_i)) {
     SmartSlice(this, start_i, end_i - start_i, len, result);
   } else {
     SimpleSlice(this, start_i, end_i - start_i, len, result);
@@ -582,6 +646,11 @@
 
 
 function ArraySplice(start, delete_count) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.splice"]);
+  }
+
   var num_arguments = %_ArgumentsLength();
 
   var len = TO_UINT32(this.length);
@@ -653,10 +722,15 @@
 
 
 function ArraySort(comparefn) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.sort"]);
+  }
+
   // In-place QuickSort algorithm.
   // For short (length <= 22) arrays, insertion sort is used for efficiency.
 
-  if (!IS_FUNCTION(comparefn)) {
+  if (!IS_SPEC_FUNCTION(comparefn)) {
     comparefn = function (x, y) {
       if (x === y) return 0;
       if (%_IsSmi(x) && %_IsSmi(y)) {
@@ -668,14 +742,14 @@
       else return x < y ? -1 : 1;
     };
   }
-  var global_receiver = %GetGlobalReceiver();
+  var receiver = %GetDefaultReceiver(comparefn);
 
   function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
       var element = a[i];
       for (var j = i - 1; j >= from; j--) {
         var tmp = a[j];
-        var order = %_CallFunction(global_receiver, tmp, element, comparefn);
+        var order = %_CallFunction(receiver, tmp, element, comparefn);
         if (order > 0) {
           a[j + 1] = tmp;
         } else {
@@ -697,14 +771,14 @@
     var v1 = a[to - 1];
     var middle_index = from + ((to - from) >> 1);
     var v2 = a[middle_index];
-    var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
+    var c01 = %_CallFunction(receiver, v0, v1, comparefn);
     if (c01 > 0) {
       // v1 < v0, so swap them.
       var tmp = v0;
       v0 = v1;
       v1 = tmp;
     } // v0 <= v1.
-    var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
+    var c02 = %_CallFunction(receiver, v0, v2, comparefn);
     if (c02 >= 0) {
       // v2 <= v0 <= v1.
       var tmp = v0;
@@ -713,7 +787,7 @@
       v1 = tmp;
     } else {
       // v0 <= v1 && v0 < v2
-      var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
+      var c12 = %_CallFunction(receiver, v1, v2, comparefn);
       if (c12 > 0) {
         // v0 <= v2 < v1
         var tmp = v1;
@@ -734,7 +808,7 @@
     // From i to high_start are elements that haven't been compared yet.
     partition: for (var i = low_end + 1; i < high_start; i++) {
       var element = a[i];
-      var order = %_CallFunction(global_receiver, element, pivot, comparefn);
+      var order = %_CallFunction(receiver, element, pivot, comparefn);
       if (order < 0) {
         %_SwapElements(a, i, low_end);
         low_end++;
@@ -743,7 +817,7 @@
           high_start--;
           if (high_start == i) break partition;
           var top_elem = a[high_start];
-          order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
+          order = %_CallFunction(receiver, top_elem, pivot, comparefn);
         } while (order > 0);
         %_SwapElements(a, i, high_start);
         if (order < 0) {
@@ -914,18 +988,26 @@
 // preserving the semantics, since the calls to the receiver function can add
 // or delete elements from the array.
 function ArrayFilter(f, receiver) {
-  if (!IS_FUNCTION(f)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.filter"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
+  if (IS_NULL_OR_UNDEFINED(receiver)) {
+    receiver = %GetDefaultReceiver(f) || receiver;
+  }
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
-  var length = this.length;
+  var length = ToUint32(this.length);
   var result = [];
   var result_length = 0;
   for (var i = 0; i < length; i++) {
     var current = this[i];
     if (!IS_UNDEFINED(current) || i in this) {
-      if (f.call(receiver, current, i, this)) {
+      if (%_CallFunction(receiver, current, i, this, f)) {
         result[result_length++] = current;
       }
     }
@@ -935,16 +1017,24 @@
 
 
 function ArrayForEach(f, receiver) {
-  if (!IS_FUNCTION(f)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.forEach"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
+  if (IS_NULL_OR_UNDEFINED(receiver)) {
+    receiver = %GetDefaultReceiver(f) || receiver;
+  }
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
   var length =  TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
     var current = this[i];
     if (!IS_UNDEFINED(current) || i in this) {
-      f.call(receiver, current, i, this);
+      %_CallFunction(receiver, current, i, this, f);
     }
   }
 }
@@ -953,16 +1043,24 @@
 // Executes the function once for each element present in the
 // array until it finds one where callback returns true.
 function ArraySome(f, receiver) {
-  if (!IS_FUNCTION(f)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.some"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
+  if (IS_NULL_OR_UNDEFINED(receiver)) {
+    receiver = %GetDefaultReceiver(f) || receiver;
+  }
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
   var length = TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
     var current = this[i];
     if (!IS_UNDEFINED(current) || i in this) {
-      if (f.call(receiver, current, i, this)) return true;
+      if (%_CallFunction(receiver, current, i, this, f)) return true;
     }
   }
   return false;
@@ -970,25 +1068,41 @@
 
 
 function ArrayEvery(f, receiver) {
-  if (!IS_FUNCTION(f)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.every"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
+  if (IS_NULL_OR_UNDEFINED(receiver)) {
+    receiver = %GetDefaultReceiver(f) || receiver;
+  }
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
   var length = TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
     var current = this[i];
     if (!IS_UNDEFINED(current) || i in this) {
-      if (!f.call(receiver, current, i, this)) return false;
+      if (!%_CallFunction(receiver, current, i, this, f)) return false;
     }
   }
   return true;
 }
 
 function ArrayMap(f, receiver) {
-  if (!IS_FUNCTION(f)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.map"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
+  if (IS_NULL_OR_UNDEFINED(receiver)) {
+    receiver = %GetDefaultReceiver(f) || receiver;
+  }
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
   var length = TO_UINT32(this.length);
@@ -997,7 +1111,7 @@
   for (var i = 0; i < length; i++) {
     var current = this[i];
     if (!IS_UNDEFINED(current) || i in this) {
-      accumulator[i] = f.call(receiver, current, i, this);
+      accumulator[i] = %_CallFunction(receiver, current, i, this, f);
     }
   }
   %MoveArrayContents(accumulator, result);
@@ -1006,6 +1120,11 @@
 
 
 function ArrayIndexOf(element, index) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.indexOf"]);
+  }
+
   var length = TO_UINT32(this.length);
   if (length == 0) return -1;
   if (IS_UNDEFINED(index)) {
@@ -1063,6 +1182,11 @@
 
 
 function ArrayLastIndexOf(element, index) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.lastIndexOf"]);
+  }
+
   var length = TO_UINT32(this.length);
   if (length == 0) return -1;
   if (%_ArgumentsLength() < 2) {
@@ -1116,12 +1240,18 @@
 
 
 function ArrayReduce(callback, current) {
-  if (!IS_FUNCTION(callback)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reduce"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
+
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping.
-  var length = this.length;
+  var length = ToUint32(this.length);
   var i = 0;
 
   find_initial: if (%_ArgumentsLength() < 2) {
@@ -1135,20 +1265,26 @@
     throw MakeTypeError('reduce_no_initial', []);
   }
 
+  var receiver = %GetDefaultReceiver(callback);
   for (; i < length; i++) {
     var element = this[i];
     if (!IS_UNDEFINED(element) || i in this) {
-      current = callback.call(null, current, element, i, this);
+      current = %_CallFunction(receiver, current, element, i, this, callback);
     }
   }
   return current;
 }
 
 function ArrayReduceRight(callback, current) {
-  if (!IS_FUNCTION(callback)) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reduceRight"]);
+  }
+
+  if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
-  var i = this.length - 1;
+  var i = ToUint32(this.length) - 1;
 
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i >= 0; i--) {
@@ -1161,10 +1297,11 @@
     throw MakeTypeError('reduce_no_initial', []);
   }
 
+  var receiver = %GetDefaultReceiver(callback);
   for (; i >= 0; i--) {
     var element = this[i];
     if (!IS_UNDEFINED(element) || i in this) {
-      current = callback.call(null, current, element, i, this);
+      current = %_CallFunction(receiver, current, element, i, this, callback);
     }
   }
   return current;
@@ -1177,12 +1314,13 @@
 
 
 // -------------------------------------------------------------------
-function SetupArray() {
-  // Setup non-enumerable constructor property on the Array.prototype
+function SetUpArray() {
+  %CheckIsBootstrapping();
+  // Set up non-enumerable constructor property on the Array.prototype
   // object.
   %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
 
-  // Setup non-enumerable functions on the Array object.
+  // Set up non-enumerable functions on the Array object.
   InstallFunctions($Array, DONT_ENUM, $Array(
     "isArray", ArrayIsArray
   ));
@@ -1200,7 +1338,7 @@
     return f;
   }
 
-  // Setup non-enumerable functions of the Array.prototype object and
+  // Set up non-enumerable functions of the Array.prototype object and
   // set their names.
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
@@ -1231,19 +1369,13 @@
   %FinishArrayPrototypeSetup($Array.prototype);
 
   // The internal Array prototype doesn't need to be fancy, since it's never
-  // exposed to user code, so no hidden prototypes or DONT_ENUM attributes
-  // are necessary.
-  // The null __proto__ ensures that we never inherit any user created
-  // getters or setters from, e.g., Object.prototype.
-  InternalArray.prototype.__proto__ = null;
-  // Adding only the functions that are actually used, and a toString.
-  InternalArray.prototype.join = getFunction("join", ArrayJoin);
-  InternalArray.prototype.pop = getFunction("pop", ArrayPop);
-  InternalArray.prototype.push = getFunction("push", ArrayPush);
-  InternalArray.prototype.toString = function() {
-    return "Internal Array, length " + this.length;
-  };
+  // exposed to user code.
+  // Adding only the functions that are actually used.
+  SetUpLockedPrototype(InternalArray, $Array(), $Array(
+    "join", getFunction("join", ArrayJoin),
+    "pop", getFunction("pop", ArrayPop),
+    "push", getFunction("push", ArrayPush)
+  ));
 }
 
-
-SetupArray();
+SetUpArray();
diff --git a/src/assembler.cc b/src/assembler.cc
index bfecc77..ad5f350 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// Copyright (c) 2011 Sun Microsystems Inc.
 // All Rights Reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -69,9 +69,12 @@
 const double DoubleConstant::min_int = kMinInt;
 const double DoubleConstant::one_half = 0.5;
 const double DoubleConstant::minus_zero = -0.0;
-const double DoubleConstant::nan = OS::nan_value();
+const double DoubleConstant::uint8_max_value = 255;
+const double DoubleConstant::zero = 0.0;
+const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
+const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
 const double DoubleConstant::negative_infinity = -V8_INFINITY;
-const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
 // -----------------------------------------------------------------------------
 // Implementation of AssemblerBase
@@ -99,58 +102,85 @@
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfoWriter and RelocIterator
 //
+// Relocation information is written backwards in memory, from high addresses
+// towards low addresses, byte by byte.  Therefore, in the encodings listed
+// below, the first byte listed it at the highest address, and successive
+// bytes in the record are at progressively lower addresses.
+//
 // Encoding
 //
 // The most common modes are given single-byte encodings.  Also, it is
 // easy to identify the type of reloc info and skip unwanted modes in
 // an iteration.
 //
-// The encoding relies on the fact that there are less than 14
-// different relocation modes.
+// The encoding relies on the fact that there are fewer than 14
+// different non-compactly encoded relocation modes.
 //
-// embedded_object:    [6 bits pc delta] 00
+// The first byte of a relocation record has a tag in its low 2 bits:
+// Here are the record schemes, depending on the low tag and optional higher
+// tags.
 //
-// code_taget:         [6 bits pc delta] 01
+// Low tag:
+//   00: embedded_object:      [6-bit pc delta] 00
 //
-// position:           [6 bits pc delta] 10,
-//                     [7 bits signed data delta] 0
+//   01: code_target:          [6-bit pc delta] 01
 //
-// statement_position: [6 bits pc delta] 10,
-//                     [7 bits signed data delta] 1
+//   10: short_data_record:    [6-bit pc delta] 10 followed by
+//                             [6-bit data delta] [2-bit data type tag]
 //
-// any nondata mode:   00 [4 bits rmode] 11,  // rmode: 0..13 only
-//                     00 [6 bits pc delta]
+//   11: long_record           [2-bit high tag][4 bit middle_tag] 11
+//                             followed by variable data depending on type.
 //
-// pc-jump:            00 1111 11,
-//                     00 [6 bits pc delta]
+//  2-bit data type tags, used in short_data_record and data_jump long_record:
+//   code_target_with_id: 00
+//   position:            01
+//   statement_position:  10
+//   comment:             11 (not used in short_data_record)
 //
-// pc-jump:            01 1111 11,
-// (variable length)   7 - 26 bit pc delta, written in chunks of 7
-//                     bits, the lowest 7 bits written first.
+//  Long record format:
+//    4-bit middle_tag:
+//      0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
+//         (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
+//          and is between 0000 and 1100)
+//        The format is:
+//                              00 [4 bit middle_tag] 11 followed by
+//                              00 [6 bit pc delta]
 //
-// data-jump + pos:    00 1110 11,
-//                     signed intptr_t, lowest byte written first
+//      1101: not used (would allow one more relocation mode to be added)
+//      1110: long_data_record
+//        The format is:       [2-bit data_type_tag] 1110 11
+//                             signed intptr_t, lowest byte written first
+//                             (except data_type code_target_with_id, which
+//                             is followed by a signed int, not intptr_t.)
 //
-// data-jump + st.pos: 01 1110 11,
-//                     signed intptr_t, lowest byte written first
-//
-// data-jump + comm.:  10 1110 11,
-//                     signed intptr_t, lowest byte written first
-//
+//      1111: long_pc_jump
+//        The format is:
+//          pc-jump:             00 1111 11,
+//                               00 [6 bits pc delta]
+//        or
+//          pc-jump (variable length):
+//                               01 1111 11,
+//                               [7 bits data] 0
+//                                  ...
+//                               [7 bits data] 1
+//               (Bits 6..31 of pc delta, with leading zeroes
+//                dropped, and last non-zero chunk tagged with 1.)
+
+
 const int kMaxRelocModes = 14;
 
 const int kTagBits = 2;
 const int kTagMask = (1 << kTagBits) - 1;
 const int kExtraTagBits = 4;
-const int kPositionTypeTagBits = 1;
-const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+const int kLocatableTypeTagBits = 2;
+const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
 
 const int kEmbeddedObjectTag = 0;
 const int kCodeTargetTag = 1;
-const int kPositionTag = 2;
+const int kLocatableTag = 2;
 const int kDefaultTag = 3;
 
-const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
 
 const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
@@ -164,11 +194,12 @@
 const int kLastChunkTag = 1;
 
 
-const int kDataJumpTag = kPCJumpTag - 1;
+const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
 
-const int kNonstatementPositionTag = 0;
-const int kStatementPositionTag = 1;
-const int kCommentTag = 2;
+const int kCodeWithIdTag = 0;
+const int kNonstatementPositionTag = 1;
+const int kStatementPositionTag = 2;
+const int kCommentTag = 3;
 
 
 uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -176,7 +207,7 @@
   // Otherwise write a variable length PC jump for the bits that do
   // not fit in the kSmallPCDeltaBits bits.
   if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
-  WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+  WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
   uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
   ASSERT(pc_jump > 0);
   // Write kChunkBits size chunks of the pc_jump.
@@ -199,7 +230,7 @@
 
 
 void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
-  *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
+  *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
 }
 
 
@@ -218,11 +249,20 @@
 }
 
 
+void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
+  WriteExtraTag(kDataJumpExtraTag, top_tag);
+  for (int i = 0; i < kIntSize; i++) {
+    *--pos_ = static_cast<byte>(data_delta);
+    // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    data_delta = data_delta >> kBitsPerByte;
+  }
+}
+
 void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
-  WriteExtraTag(kDataJumpTag, top_tag);
+  WriteExtraTag(kDataJumpExtraTag, top_tag);
   for (int i = 0; i < kIntptrSize; i++) {
     *--pos_ = static_cast<byte>(data_delta);
-  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
     data_delta = data_delta >> kBitsPerByte;
   }
 }
@@ -233,7 +273,8 @@
   byte* begin_pos = pos_;
 #endif
   ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
+  ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
+         kMaxRelocModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
   RelocInfo::Mode rmode = rinfo->rmode();
@@ -244,35 +285,48 @@
   } else if (rmode == RelocInfo::CODE_TARGET) {
     WriteTaggedPC(pc_delta, kCodeTargetTag);
     ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
-  } else if (RelocInfo::IsPosition(rmode)) {
-    // Use signed delta-encoding for data.
-    intptr_t data_delta = rinfo->data() - last_data_;
-    int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
-                                                    : kStatementPositionTag;
-    // Check if data is small enough to fit in a tagged byte.
-    // We cannot use is_intn because data_delta is not an int32_t.
-    if (data_delta >= -(1 << (kSmallDataBits-1)) &&
-        data_delta < 1 << (kSmallDataBits-1)) {
-      WriteTaggedPC(pc_delta, kPositionTag);
-      WriteTaggedData(data_delta, pos_type_tag);
-      last_data_ = rinfo->data();
+  } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+    // Use signed delta-encoding for id.
+    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    int id_delta = static_cast<int>(rinfo->data()) - last_id_;
+    // Check if delta is small enough to fit in a tagged byte.
+    if (is_intn(id_delta, kSmallDataBits)) {
+      WriteTaggedPC(pc_delta, kLocatableTag);
+      WriteTaggedData(id_delta, kCodeWithIdTag);
     } else {
       // Otherwise, use costly encoding.
-      WriteExtraTaggedPC(pc_delta, kPCJumpTag);
-      WriteExtraTaggedData(data_delta, pos_type_tag);
-      last_data_ = rinfo->data();
+      WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+      WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
     }
+    last_id_ = static_cast<int>(rinfo->data());
+  } else if (RelocInfo::IsPosition(rmode)) {
+    // Use signed delta-encoding for position.
+    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
+    int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
+                                                      : kStatementPositionTag;
+    // Check if delta is small enough to fit in a tagged byte.
+    if (is_intn(pos_delta, kSmallDataBits)) {
+      WriteTaggedPC(pc_delta, kLocatableTag);
+      WriteTaggedData(pos_delta, pos_type_tag);
+    } else {
+      // Otherwise, use costly encoding.
+      WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+      WriteExtraTaggedIntData(pos_delta, pos_type_tag);
+    }
+    last_position_ = static_cast<int>(rinfo->data());
   } else if (RelocInfo::IsComment(rmode)) {
     // Comments are normally not generated, so we use the costly encoding.
-    WriteExtraTaggedPC(pc_delta, kPCJumpTag);
-    WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
-    last_data_ = rinfo->data();
+    WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+    WriteExtraTaggedData(rinfo->data(), kCommentTag);
     ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
   } else {
+    ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
+    int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
     // For all other modes we simply use the mode as the extra tag.
     // None of these modes need a data component.
-    ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
-    WriteExtraTaggedPC(pc_delta, rmode);
+    ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
+    WriteExtraTaggedPC(pc_delta, saved_mode);
   }
   last_pc_ = rinfo->pc();
 #ifdef DEBUG
@@ -306,12 +360,32 @@
 }
 
 
+void RelocIterator::AdvanceReadId() {
+  int x = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+  }
+  last_id_ += x;
+  rinfo_.data_ = last_id_;
+}
+
+
+void RelocIterator::AdvanceReadPosition() {
+  int x = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+  }
+  last_position_ += x;
+  rinfo_.data_ = last_position_;
+}
+
+
 void RelocIterator::AdvanceReadData() {
   intptr_t x = 0;
   for (int i = 0; i < kIntptrSize; i++) {
     x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
   }
-  rinfo_.data_ += x;
+  rinfo_.data_ = x;
 }
 
 
@@ -331,27 +405,33 @@
 }
 
 
-inline int RelocIterator::GetPositionTypeTag() {
-  return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+inline int RelocIterator::GetLocatableTypeTag() {
+  return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
 }
 
 
-inline void RelocIterator::ReadTaggedData() {
+inline void RelocIterator::ReadTaggedId() {
   int8_t signed_b = *pos_;
   // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
-  rinfo_.data_ += signed_b >> kPositionTypeTagBits;
+  last_id_ += signed_b >> kLocatableTypeTagBits;
+  rinfo_.data_ = last_id_;
 }
 
 
-inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
-  if (tag == kStatementPositionTag) {
-    return RelocInfo::STATEMENT_POSITION;
-  } else if (tag == kNonstatementPositionTag) {
-    return RelocInfo::POSITION;
-  } else {
-    ASSERT(tag == kCommentTag);
-    return RelocInfo::COMMENT;
-  }
+inline void RelocIterator::ReadTaggedPosition() {
+  int8_t signed_b = *pos_;
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+  last_position_ += signed_b >> kLocatableTypeTagBits;
+  rinfo_.data_ = last_position_;
+}
+
+
+static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
+  ASSERT(tag == kNonstatementPositionTag ||
+         tag == kStatementPositionTag);
+  return (tag == kNonstatementPositionTag) ?
+         RelocInfo::POSITION :
+         RelocInfo::STATEMENT_POSITION;
 }
 
 
@@ -370,37 +450,64 @@
     } else if (tag == kCodeTargetTag) {
       ReadTaggedPC();
       if (SetMode(RelocInfo::CODE_TARGET)) return;
-    } else if (tag == kPositionTag) {
+    } else if (tag == kLocatableTag) {
       ReadTaggedPC();
       Advance();
-      // Check if we want source positions.
-      if (mode_mask_ & RelocInfo::kPositionMask) {
-        ReadTaggedData();
-        if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
+      int locatable_tag = GetLocatableTypeTag();
+      if (locatable_tag == kCodeWithIdTag) {
+        if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+          ReadTaggedId();
+          return;
+        }
+      } else {
+        // Compact encoding is never used for comments,
+        // so it must be a position.
+        ASSERT(locatable_tag == kNonstatementPositionTag ||
+               locatable_tag == kStatementPositionTag);
+        if (mode_mask_ & RelocInfo::kPositionMask) {
+          ReadTaggedPosition();
+          if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+        }
       }
     } else {
       ASSERT(tag == kDefaultTag);
       int extra_tag = GetExtraTag();
-      if (extra_tag == kPCJumpTag) {
+      if (extra_tag == kPCJumpExtraTag) {
         int top_tag = GetTopTag();
         if (top_tag == kVariableLengthPCJumpTopTag) {
           AdvanceReadVariableLengthPCJump();
         } else {
           AdvanceReadPC();
         }
-      } else if (extra_tag == kDataJumpTag) {
-        // Check if we want debug modes (the only ones with data).
-        if (mode_mask_ & RelocInfo::kDebugMask) {
-          int top_tag = GetTopTag();
-          AdvanceReadData();
-          if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+      } else if (extra_tag == kDataJumpExtraTag) {
+        int locatable_tag = GetTopTag();
+        if (locatable_tag == kCodeWithIdTag) {
+          if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+            AdvanceReadId();
+            return;
+          }
+          Advance(kIntSize);
+        } else if (locatable_tag != kCommentTag) {
+          ASSERT(locatable_tag == kNonstatementPositionTag ||
+                 locatable_tag == kStatementPositionTag);
+          if (mode_mask_ & RelocInfo::kPositionMask) {
+            AdvanceReadPosition();
+            if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+          } else {
+            Advance(kIntSize);
+          }
         } else {
-          // Otherwise, just skip over the data.
+          ASSERT(locatable_tag == kCommentTag);
+          if (SetMode(RelocInfo::COMMENT)) {
+            AdvanceReadData();
+            return;
+          }
           Advance(kIntptrSize);
         }
       } else {
         AdvanceReadPC();
-        if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
+        int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
+        if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
       }
     }
   }
@@ -416,6 +523,8 @@
   end_ = code->relocation_start();
   done_ = false;
   mode_mask_ = mode_mask;
+  last_id_ = 0;
+  last_position_ = 0;
   if (mode_mask_ == 0) pos_ = end_;
   next();
 }
@@ -429,6 +538,8 @@
   end_ = pos_ - desc.reloc_size;
   done_ = false;
   mode_mask_ = mode_mask;
+  last_id_ = 0;
+  last_position_ = 0;
   if (mode_mask_ == 0) pos_ = end_;
   next();
 }
@@ -456,6 +567,8 @@
       return "debug break";
     case RelocInfo::CODE_TARGET:
       return "code target";
+    case RelocInfo::CODE_TARGET_WITH_ID:
+      return "code target with id";
     case RelocInfo::GLOBAL_PROPERTY_CELL:
       return "global property cell";
     case RelocInfo::RUNTIME_ENTRY:
@@ -502,6 +615,9 @@
     Code* code = Code::GetCodeFromTargetAddress(target_address());
     PrintF(out, " (%s)  (%p)", Code::Kind2String(code->kind()),
            target_address());
+    if (rmode_ == CODE_TARGET_WITH_ID) {
+      PrintF(" (id=%d)", static_cast<int>(data_));
+    }
   } else if (IsPosition(rmode_)) {
     PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
   } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
@@ -535,6 +651,7 @@
 #endif
     case CONSTRUCT_CALL:
     case CODE_TARGET_CONTEXT:
+    case CODE_TARGET_WITH_ID:
     case CODE_TARGET: {
       // convert inline target address to code object
       Address addr = target_address();
@@ -787,15 +904,33 @@
 }
 
 
+ExternalReference ExternalReference::address_of_zero() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::zero)));
+}
+
+
+ExternalReference ExternalReference::address_of_uint8_max_value() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::uint8_max_value)));
+}
+
+
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
       const_cast<double*>(&DoubleConstant::negative_infinity)));
 }
 
 
-ExternalReference ExternalReference::address_of_nan() {
+ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::nan)));
+      const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
+}
+
+
+ExternalReference ExternalReference::address_of_the_hole_nan() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::the_hole_nan)));
 }
 
 
@@ -899,7 +1034,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_sin_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -907,7 +1042,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_cos_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -915,7 +1050,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_log_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -958,7 +1093,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(power_double_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_FP_CALL));
 }
 
 
@@ -966,7 +1101,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(power_double_int),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_INT_CALL));
 }
 
 
@@ -999,17 +1134,16 @@
     default:
       UNREACHABLE();
   }
-  // Passing true as 2nd parameter indicates that they return an fp value.
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(function),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_FP_CALL));
 }
 
 
 ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(native_compare_doubles),
-                                    BUILTIN_CALL));
+                                    BUILTIN_COMPARE_CALL));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 395bbd5..d58034d 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -35,6 +35,7 @@
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
 
+#include "allocation.h"
 #include "gdb-jit.h"
 #include "runtime.h"
 #include "token.h"
@@ -42,7 +43,7 @@
 namespace v8 {
 namespace internal {
 
-
+const unsigned kNoASTId = -1;
 // -----------------------------------------------------------------------------
 // Platform independent assembler base class.
 
@@ -66,8 +67,11 @@
   static const double min_int;
   static const double one_half;
   static const double minus_zero;
+  static const double zero;
+  static const double uint8_max_value;
   static const double negative_infinity;
-  static const double nan;
+  static const double canonical_non_hole_nan;
+  static const double the_hole_nan;
 };
 
 
@@ -79,18 +83,32 @@
 
 class Label BASE_EMBEDDED {
  public:
-  INLINE(Label())                 { Unuse(); }
-  INLINE(~Label())                { ASSERT(!is_linked()); }
+  enum Distance {
+    kNear, kFar
+  };
 
-  INLINE(void Unuse())            { pos_ = 0; }
+  INLINE(Label()) {
+    Unuse();
+    UnuseNear();
+  }
 
-  INLINE(bool is_bound() const)  { return pos_ <  0; }
-  INLINE(bool is_unused() const)  { return pos_ == 0; }
-  INLINE(bool is_linked() const)  { return pos_ >  0; }
+  INLINE(~Label()) {
+    ASSERT(!is_linked());
+    ASSERT(!is_near_linked());
+  }
+
+  INLINE(void Unuse()) { pos_ = 0; }
+  INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+
+  INLINE(bool is_bound() const) { return pos_ <  0; }
+  INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
+  INLINE(bool is_linked() const) { return pos_ >  0; }
+  INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
 
   // Returns the position of bound or linked labels. Cannot be used
   // for unused labels.
   int pos() const;
+  int near_link_pos() const { return near_link_pos_ - 1; }
 
  private:
   // pos_ encodes both the binding state (via its sign)
@@ -101,13 +119,21 @@
   // pos_ >  0  linked label, pos() returns the last reference position
   int pos_;
 
+  // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
+  int near_link_pos_;
+
   void bind_to(int pos)  {
     pos_ = -pos - 1;
     ASSERT(is_bound());
   }
-  void link_to(int pos)  {
-    pos_ =  pos + 1;
-    ASSERT(is_linked());
+  void link_to(int pos, Distance distance = kFar) {
+    if (distance == kNear) {
+      near_link_pos_ = pos + 1;
+      ASSERT(is_near_linked());
+    } else {
+      pos_ = pos + 1;
+      ASSERT(is_linked());
+    }
   }
 
   friend class Assembler;
@@ -118,57 +144,6 @@
 
 
 // -----------------------------------------------------------------------------
-// NearLabels are labels used for short jumps (in Intel jargon).
-// NearLabels should be used if it can be guaranteed that the jump range is
-// within -128 to +127. We already use short jumps when jumping backwards,
-// so using a NearLabel will only have performance impact if used for forward
-// jumps.
-class NearLabel BASE_EMBEDDED {
- public:
-  NearLabel() { Unuse(); }
-  ~NearLabel() { ASSERT(!is_linked()); }
-
-  void Unuse() {
-    pos_ = -1;
-    unresolved_branches_ = 0;
-#ifdef DEBUG
-    for (int i = 0; i < kMaxUnresolvedBranches; i++) {
-      unresolved_positions_[i] = -1;
-    }
-#endif
-  }
-
-  int pos() {
-    ASSERT(is_bound());
-    return pos_;
-  }
-
-  bool is_bound() { return pos_ >= 0; }
-  bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
-  bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
-
-  void bind_to(int position) {
-    ASSERT(!is_bound());
-    pos_ = position;
-  }
-
-  void link_to(int position) {
-    ASSERT(!is_bound());
-    ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
-    unresolved_positions_[unresolved_branches_++] = position;
-  }
-
- private:
-  static const int kMaxUnresolvedBranches = 8;
-  int pos_;
-  int unresolved_branches_;
-  int unresolved_positions_[kMaxUnresolvedBranches];
-
-  friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
 // Relocation information
 
 
@@ -196,7 +171,7 @@
   // where we are not sure to have enough space for patching in during
   // lazy deoptimization. This is the case if we have indirect calls for which
   // we do not normally record relocation info.
-  static const char* kFillerCommentString;
+  static const char* const kFillerCommentString;
 
   // The minimum size of a comment is equal to three bytes for the extra tagged
   // pc + the tag for the data, and kPointerSize for the actual pointer to the
@@ -211,10 +186,11 @@
 
   enum Mode {
     // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
+    CODE_TARGET,  // Code target which is not any of the above.
+    CODE_TARGET_WITH_ID,
     CONSTRUCT_CALL,  // code target that is a call to a JavaScript constructor.
     CODE_TARGET_CONTEXT,  // Code target used for contextual loads and stores.
     DEBUG_BREAK,  // Code target for the debugger statement.
-    CODE_TARGET,  // Code target which is not any of the above.
     EMBEDDED_OBJECT,
     GLOBAL_PROPERTY_CELL,
 
@@ -230,10 +206,12 @@
 
     // add more as needed
     // Pseudo-types
-    NUMBER_OF_MODES,  // must be no greater than 14 - see RelocInfoWriter
+    NUMBER_OF_MODES,  // There are at most 14 modes with noncompact encoding.
     NONE,  // never recorded
-    LAST_CODE_ENUM = CODE_TARGET,
-    LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
+    LAST_CODE_ENUM = DEBUG_BREAK,
+    LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
+    // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
+    LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
   };
 
 
@@ -363,7 +341,8 @@
 
   static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
   static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
-  static const int kDebugMask = kPositionMask | 1 << COMMENT;
+  static const int kDataMask =
+      (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
   static const int kApplyMask;  // Modes affected by apply. Depends on arch.
 
  private:
@@ -374,6 +353,19 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
+#ifdef V8_TARGET_ARCH_MIPS
+  // Code and Embedded Object pointers in mips are stored split
+  // across two consecutive 32-bit instructions. Heap management
+  // routines expect to access these pointers indirectly. The following
+  // location provides a place for these pointers to exist natually
+  // when accessed via the Iterator.
+  Object *reconstructed_obj_ptr_;
+  // External-reference pointers are also split across instruction-pairs
+  // in mips, but are accessed via indirect pointers. This location
+  // provides a place for that pointer to exist naturally. Its address
+  // is returned by RelocInfo::target_reference_address().
+  Address reconstructed_adr_ptr_;
+#endif  // V8_TARGET_ARCH_MIPS
   friend class RelocIterator;
 };
 
@@ -382,9 +374,14 @@
 // lower addresses.
 class RelocInfoWriter BASE_EMBEDDED {
  public:
-  RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
-  RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
-                                         last_data_(0) {}
+  RelocInfoWriter() : pos_(NULL),
+                      last_pc_(NULL),
+                      last_id_(0),
+                      last_position_(0) {}
+  RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
+                                         last_pc_(pc),
+                                         last_id_(0),
+                                         last_position_(0) {}
 
   byte* pos() const { return pos_; }
   byte* last_pc() const { return last_pc_; }
@@ -409,13 +406,15 @@
   inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
   inline void WriteTaggedPC(uint32_t pc_delta, int tag);
   inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+  inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
   inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
   inline void WriteTaggedData(intptr_t data_delta, int tag);
   inline void WriteExtraTag(int extra_tag, int top_tag);
 
   byte* pos_;
   byte* last_pc_;
-  intptr_t last_data_;
+  int last_id_;
+  int last_position_;
   DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
 };
 
@@ -457,12 +456,13 @@
   int GetTopTag();
   void ReadTaggedPC();
   void AdvanceReadPC();
+  void AdvanceReadId();
+  void AdvanceReadPosition();
   void AdvanceReadData();
   void AdvanceReadVariableLengthPCJump();
-  int GetPositionTypeTag();
-  void ReadTaggedData();
-
-  static RelocInfo::Mode DebugInfoModeFromTag(int tag);
+  int GetLocatableTypeTag();
+  void ReadTaggedId();
+  void ReadTaggedPosition();
 
   // If the given mode is wanted, set it in rinfo_ and return true.
   // Else return false. Used for efficiently skipping unwanted modes.
@@ -475,6 +475,8 @@
   RelocInfo rinfo_;
   bool done_;
   int mode_mask_;
+  int last_id_;
+  int last_position_;
   DISALLOW_COPY_AND_ASSIGN(RelocIterator);
 };
 
@@ -503,9 +505,21 @@
     // MaybeObject* f(v8::internal::Arguments).
     BUILTIN_CALL,  // default
 
+    // Builtin that takes float arguments and returns an int.
+    // int f(double, double).
+    BUILTIN_COMPARE_CALL,
+
     // Builtin call that returns floating point.
     // double f(double, double).
-    FP_RETURN_CALL,
+    BUILTIN_FP_FP_CALL,
+
+    // Builtin call that returns floating point.
+    // double f(double).
+    BUILTIN_FP_CALL,
+
+    // Builtin call that returns floating point.
+    // double f(double, int).
+    BUILTIN_FP_INT_CALL,
 
     // Direct call to API function callback.
     // Handle<Value> f(v8::Arguments&)
@@ -613,8 +627,11 @@
   static ExternalReference address_of_min_int();
   static ExternalReference address_of_one_half();
   static ExternalReference address_of_minus_zero();
+  static ExternalReference address_of_zero();
+  static ExternalReference address_of_uint8_max_value();
   static ExternalReference address_of_negative_infinity();
-  static ExternalReference address_of_nan();
+  static ExternalReference address_of_canonical_non_hole_nan();
+  static ExternalReference address_of_the_hole_nan();
 
   static ExternalReference math_sin_double_function(Isolate* isolate);
   static ExternalReference math_cos_double_function(Isolate* isolate);
@@ -649,10 +666,11 @@
 
   // This lets you register a function that rewrites all external references.
   // Used by the ARM simulator to catch calls to external references.
-  static void set_redirector(ExternalReferenceRedirector* redirector) {
+  static void set_redirector(Isolate* isolate,
+                             ExternalReferenceRedirector* redirector) {
     // We can't stack them.
-    ASSERT(Isolate::Current()->external_reference_redirector() == NULL);
-    Isolate::Current()->set_external_reference_redirector(
+    ASSERT(isolate->external_reference_redirector() == NULL);
+    isolate->set_external_reference_redirector(
         reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
   }
 
@@ -819,6 +837,28 @@
 double power_double_int(double x, int y);
 double power_double_double(double x, double y);
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class CallWrapper {
+ public:
+  CallWrapper() { }
+  virtual ~CallWrapper() { }
+  // Called just before emitting a call. Argument is the size of the generated
+  // call code.
+  virtual void BeforeCall(int call_size) const = 0;
+  // Called just after emitting a call, i.e., at the return site for the call.
+  virtual void AfterCall() const = 0;
+};
+
+class NullCallWrapper : public CallWrapper {
+ public:
+  NullCallWrapper() { }
+  virtual ~NullCallWrapper() { }
+  virtual void BeforeCall(int call_size) const { }
+  virtual void AfterCall() const { }
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_ASSEMBLER_H_
diff --git a/src/ast-inl.h b/src/ast-inl.h
index d80684a..731ad2f 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -31,73 +31,83 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
 
 
-SwitchStatement::SwitchStatement(ZoneStringList* labels)
-    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+SwitchStatement::SwitchStatement(Isolate* isolate,
+                                 ZoneStringList* labels)
+    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
       tag_(NULL), cases_(NULL) {
 }
 
 
-Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
-    : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+Block::Block(Isolate* isolate,
+             ZoneStringList* labels,
+             int capacity,
+             bool is_initializer_block)
+    : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
       statements_(capacity),
-      is_initializer_block_(is_initializer_block) {
+      is_initializer_block_(is_initializer_block),
+      block_scope_(NULL) {
 }
 
 
-BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+BreakableStatement::BreakableStatement(Isolate* isolate,
+                                       ZoneStringList* labels,
+                                       Type type)
     : labels_(labels),
       type_(type),
-      entry_id_(GetNextId()),
-      exit_id_(GetNextId()) {
+      entry_id_(GetNextId(isolate)),
+      exit_id_(GetNextId(isolate)) {
   ASSERT(labels == NULL || labels->length() > 0);
 }
 
 
-IterationStatement::IterationStatement(ZoneStringList* labels)
-    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
+    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
       body_(NULL),
       continue_target_(),
-      osr_entry_id_(GetNextId()) {
+      osr_entry_id_(GetNextId(isolate)) {
 }
 
 
-DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels),
+DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
       cond_(NULL),
       condition_position_(-1),
-      continue_id_(GetNextId()),
-      back_edge_id_(GetNextId()) {
+      continue_id_(GetNextId(isolate)),
+      back_edge_id_(GetNextId(isolate)) {
 }
 
 
-WhileStatement::WhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels),
+WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
       cond_(NULL),
       may_have_function_literal_(true),
-      body_id_(GetNextId()) {
+      body_id_(GetNextId(isolate)) {
 }
 
 
-ForStatement::ForStatement(ZoneStringList* labels)
-    : IterationStatement(labels),
+ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
       init_(NULL),
       cond_(NULL),
       next_(NULL),
       may_have_function_literal_(true),
       loop_variable_(NULL),
-      continue_id_(GetNextId()),
-      body_id_(GetNextId()) {
+      continue_id_(GetNextId(isolate)),
+      body_id_(GetNextId(isolate)) {
 }
 
 
-ForInStatement::ForInStatement(ZoneStringList* labels)
-    : IterationStatement(labels), each_(NULL), enumerable_(NULL),
-      assignment_id_(GetNextId()) {
+ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
+      each_(NULL),
+      enumerable_(NULL),
+      assignment_id_(GetNextId(isolate)) {
 }
 
 
diff --git a/src/ast.cc b/src/ast.cc
index 8ab09b3..418cc43 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -36,20 +36,9 @@
 namespace v8 {
 namespace internal {
 
-AstSentinels::AstSentinels()
-    : this_proxy_(true),
-      identifier_proxy_(false),
-      valid_left_hand_side_sentinel_(),
-      this_property_(&this_proxy_, NULL, 0),
-      call_sentinel_(NULL, NULL, 0) {
-}
-
-
 // ----------------------------------------------------------------------------
 // All the Accept member functions for each syntax tree node type.
 
-void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); }
-
 #define DECL_ACCEPT(type)                                       \
   void type::Accept(AstVisitor* v) { v->Visit##type(this); }
 AST_NODE_LIST(DECL_ACCEPT)
@@ -72,8 +61,9 @@
 }
 
 
-VariableProxy::VariableProxy(Variable* var)
-    : name_(var->name()),
+VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
+    : Expression(isolate),
+      name_(var->name()),
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
       inside_with_(false),
@@ -83,29 +73,23 @@
 }
 
 
-VariableProxy::VariableProxy(Handle<String> name,
+VariableProxy::VariableProxy(Isolate* isolate,
+                             Handle<String> name,
                              bool is_this,
                              bool inside_with,
                              int position)
-  : name_(name),
-    var_(NULL),
-    is_this_(is_this),
-    inside_with_(inside_with),
-    is_trivial_(false),
-    position_(position) {
+    : Expression(isolate),
+      name_(name),
+      var_(NULL),
+      is_this_(is_this),
+      inside_with_(inside_with),
+      is_trivial_(false),
+      position_(position) {
   // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
 }
 
 
-VariableProxy::VariableProxy(bool is_this)
-  : var_(NULL),
-    is_this_(is_this),
-    inside_with_(false),
-    is_trivial_(false) {
-}
-
-
 void VariableProxy::BindTo(Variable* var) {
   ASSERT(var_ == NULL);  // must be bound only once
   ASSERT(var != NULL);  // must bind
@@ -120,26 +104,31 @@
 }
 
 
-Assignment::Assignment(Token::Value op,
+Assignment::Assignment(Isolate* isolate,
+                       Token::Value op,
                        Expression* target,
                        Expression* value,
                        int pos)
-    : op_(op),
+    : Expression(isolate),
+      op_(op),
       target_(target),
       value_(value),
       pos_(pos),
       binary_operation_(NULL),
       compound_load_id_(kNoNumber),
-      assignment_id_(GetNextId()),
+      assignment_id_(GetNextId(isolate)),
       block_start_(false),
       block_end_(false),
-      is_monomorphic_(false),
-      receiver_types_(NULL) {
+      is_monomorphic_(false) {
   ASSERT(Token::IsAssignmentOp(op));
   if (is_compound()) {
     binary_operation_ =
-        new BinaryOperation(binary_op(), target, value, pos + 1);
-    compound_load_id_ = GetNextId();
+        new(isolate->zone()) BinaryOperation(isolate,
+                                             binary_op(),
+                                             target,
+                                             value,
+                                             pos + 1);
+    compound_load_id_ = GetNextId(isolate);
   }
 }
 
@@ -186,8 +175,9 @@
 
 
 ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+  Isolate* isolate = Isolate::Current();
   emit_store_ = true;
-  key_ = new Literal(value->name());
+  key_ = new(isolate->zone()) Literal(isolate, value->name());
   value_ = value;
   kind_ = is_getter ? GETTER : SETTER;
 }
@@ -293,11 +283,11 @@
 
 void TargetCollector::AddTarget(Label* target) {
   // Add the label to the collector, but discard duplicates.
-  int length = targets_->length();
+  int length = targets_.length();
   for (int i = 0; i < length; i++) {
-    if (targets_->at(i) == target) return;
+    if (targets_[i] == target) return;
   }
-  targets_->Add(target);
+  targets_.Add(target);
 }
 
 
@@ -337,12 +327,56 @@
 }
 
 
-BinaryOperation::BinaryOperation(Assignment* assignment) {
-  ASSERT(assignment->is_compound());
-  op_ = assignment->binary_op();
-  left_ = assignment->target();
-  right_ = assignment->value();
-  pos_ = assignment->position();
+bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
+                                              Handle<String>* check) {
+  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+
+  UnaryOperation* left_unary = left_->AsUnaryOperation();
+  UnaryOperation* right_unary = right_->AsUnaryOperation();
+  Literal* left_literal = left_->AsLiteral();
+  Literal* right_literal = right_->AsLiteral();
+
+  // Check for the pattern: typeof <expression> == <string literal>.
+  if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+      right_literal != NULL && right_literal->handle()->IsString()) {
+    *expr = left_unary->expression();
+    *check = Handle<String>::cast(right_literal->handle());
+    return true;
+  }
+
+  // Check for the pattern: <string literal> == typeof <expression>.
+  if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
+      left_literal != NULL && left_literal->handle()->IsString()) {
+    *expr = right_unary->expression();
+    *check = Handle<String>::cast(left_literal->handle());
+    return true;
+  }
+
+  return false;
+}
+
+
+bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
+  if (op_ != Token::EQ_STRICT) return false;
+
+  UnaryOperation* left_unary = left_->AsUnaryOperation();
+  UnaryOperation* right_unary = right_->AsUnaryOperation();
+
+  // Check for the pattern: <expression> === void <literal>.
+  if (right_unary != NULL && right_unary->op() == Token::VOID &&
+      right_unary->expression()->AsLiteral() != NULL) {
+    *expr = left_;
+    return true;
+  }
+
+  // Check for the pattern: void <literal> === <expression>.
+  if (left_unary != NULL && left_unary->op() == Token::VOID &&
+      left_unary->expression()->AsLiteral() != NULL) {
+    *expr = right_;
+    return true;
+  }
+
+  return false;
 }
 
 
@@ -350,8 +384,7 @@
 // Inlining support
 
 bool Declaration::IsInlineable() const {
-  UNREACHABLE();
-  return false;
+  return proxy()->var()->IsStackAllocated() && fun() == NULL;
 }
 
 
@@ -361,23 +394,12 @@
 }
 
 
-bool Slot::IsInlineable() const {
-  UNREACHABLE();
-  return false;
-}
-
-
 bool ForInStatement::IsInlineable() const {
   return false;
 }
 
 
-bool WithEnterStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool WithExitStatement::IsInlineable() const {
+bool WithStatement::IsInlineable() const {
   return false;
 }
 
@@ -402,19 +424,13 @@
 }
 
 
-bool CatchExtensionObject::IsInlineable() const {
-  return false;
-}
-
-
 bool DebuggerStatement::IsInlineable() const {
   return false;
 }
 
 
 bool Throw::IsInlineable() const {
-  // TODO(1143): Make functions containing throw inlineable.
-  return false;
+  return exception()->IsInlineable();
 }
 
 
@@ -440,12 +456,6 @@
 }
 
 
-bool ValidLeftHandSideSentinel::IsInlineable() const {
-  UNREACHABLE();
-  return false;
-}
-
-
 bool ForStatement::IsInlineable() const {
   return (init() == NULL || init()->IsInlineable())
       && (cond() == NULL || cond()->IsInlineable())
@@ -519,7 +529,7 @@
 
 
 bool VariableProxy::IsInlineable() const {
-  return var()->is_global() || var()->IsStackAllocated();
+  return var()->IsUnallocated() || var()->IsStackAllocated();
 }
 
 
@@ -603,7 +613,8 @@
 
 void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
   // Record type feedback from the oracle in the AST.
-  is_monomorphic_ = oracle->LoadIsMonomorphic(this);
+  is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
+  receiver_types_.Clear();
   if (key()->IsPropertyName()) {
     if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
       is_array_length_ = true;
@@ -616,16 +627,15 @@
       Literal* lit_key = key()->AsLiteral();
       ASSERT(lit_key != NULL && lit_key->handle()->IsString());
       Handle<String> name = Handle<String>::cast(lit_key->handle());
-      ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
-      receiver_types_ = types;
+      oracle->LoadReceiverTypes(this, name, &receiver_types_);
     }
   } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
     is_string_access_ = true;
   } else if (is_monomorphic_) {
-    monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
-    if (monomorphic_receiver_type_->has_external_array_elements()) {
-      set_external_array_type(oracle->GetKeyedLoadExternalArrayType(this));
-    }
+    receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this));
+  } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
+    receiver_types_.Reserve(kMaxKeyedPolymorphism);
+    oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
   }
 }
 
@@ -633,31 +643,32 @@
 void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
   Property* prop = target()->AsProperty();
   ASSERT(prop != NULL);
-  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+  receiver_types_.Clear();
   if (prop->key()->IsPropertyName()) {
     Literal* lit_key = prop->key()->AsLiteral();
     ASSERT(lit_key != NULL && lit_key->handle()->IsString());
     Handle<String> name = Handle<String>::cast(lit_key->handle());
-    ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
-    receiver_types_ = types;
+    oracle->StoreReceiverTypes(this, name, &receiver_types_);
   } else if (is_monomorphic_) {
-    // Record receiver type for monomorphic keyed loads.
-    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
-    if (monomorphic_receiver_type_->has_external_array_elements()) {
-      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
-    }
+    // Record receiver type for monomorphic keyed stores.
+    receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
+  } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+    receiver_types_.Reserve(kMaxKeyedPolymorphism);
+    oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
   }
 }
 
 
 void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+  receiver_types_.Clear();
   if (is_monomorphic_) {
-    // Record receiver type for monomorphic keyed loads.
-    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
-    if (monomorphic_receiver_type_->has_external_array_elements()) {
-      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
-    }
+    // Record receiver type for monomorphic keyed stores.
+    receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
+  } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+    receiver_types_.Reserve(kMaxKeyedPolymorphism);
+    oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
   }
 }
 
@@ -733,22 +744,22 @@
 }
 
 
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+                              CallKind call_kind) {
   Property* property = expression()->AsProperty();
   ASSERT(property != NULL);
   // Specialize for the receiver types seen at runtime.
   Literal* key = property->key()->AsLiteral();
   ASSERT(key != NULL && key->handle()->IsString());
   Handle<String> name = Handle<String>::cast(key->handle());
-  receiver_types_ = oracle->CallReceiverTypes(this, name);
+  receiver_types_.Clear();
+  oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) {
-    if (receiver_types_ != NULL) {
-      int length = receiver_types_->length();
-      for (int i = 0; i < length; i++) {
-        Handle<Map> map = receiver_types_->at(i);
-        ASSERT(!map.is_null() && *map != NULL);
-      }
+    int length = receiver_types_.length();
+    for (int i = 0; i < length; i++) {
+      Handle<Map> map = receiver_types_.at(i);
+      ASSERT(!map.is_null() && *map != NULL);
     }
   }
 #endif
@@ -756,9 +767,9 @@
   check_type_ = oracle->GetCallCheckType(this);
   if (is_monomorphic_) {
     Handle<Map> map;
-    if (receiver_types_ != NULL && receiver_types_->length() > 0) {
+    if (receiver_types_.length() > 0) {
       ASSERT(check_type_ == RECEIVER_MAP_CHECK);
-      map = receiver_types_->at(0);
+      map = receiver_types_.at(0);
     } else {
       ASSERT(check_type_ != RECEIVER_MAP_CHECK);
       holder_ = Handle<JSObject>(
@@ -958,7 +969,7 @@
  public:
   RegExpUnparser();
   void VisitCharacterRange(CharacterRange that);
-  SmartPointer<const char> ToString() { return stream_.ToCString(); }
+  SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
 #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
   FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
 #undef MAKE_CASE
@@ -1113,7 +1124,7 @@
 }
 
 
-SmartPointer<const char> RegExpTree::ToString() {
+SmartArrayPointer<const char> RegExpTree::ToString() {
   RegExpUnparser unparser;
   Accept(&unparser, NULL);
   return unparser.ToString();
@@ -1152,14 +1163,16 @@
 }
 
 
-CaseClause::CaseClause(Expression* label,
+CaseClause::CaseClause(Isolate* isolate,
+                       Expression* label,
                        ZoneList<Statement*>* statements,
                        int pos)
     : label_(label),
       statements_(statements),
       position_(pos),
       compare_type_(NONE),
-      entry_id_(AstNode::GetNextId()) {
+      compare_id_(AstNode::GetNextId(isolate)),
+      entry_id_(AstNode::GetNextId(isolate)) {
 }
 
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 65a25a9..b56205f 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,10 +28,12 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
+#include "allocation.h"
 #include "execution.h"
 #include "factory.h"
 #include "jsregexp.h"
 #include "runtime.h"
+#include "small-pointer-list.h"
 #include "token.h"
 #include "variables.h"
 
@@ -59,8 +61,7 @@
   V(ContinueStatement)                          \
   V(BreakStatement)                             \
   V(ReturnStatement)                            \
-  V(WithEnterStatement)                         \
-  V(WithExitStatement)                          \
+  V(WithStatement)                              \
   V(SwitchStatement)                            \
   V(DoWhileStatement)                           \
   V(WhileStatement)                             \
@@ -79,7 +80,6 @@
   V(RegExpLiteral)                              \
   V(ObjectLiteral)                              \
   V(ArrayLiteral)                               \
-  V(CatchExtensionObject)                       \
   V(Assignment)                                 \
   V(Throw)                                      \
   V(Property)                                   \
@@ -133,12 +133,20 @@
 
   static const int kNoNumber = -1;
   static const int kFunctionEntryId = 2;  // Using 0 could disguise errors.
+  // This AST id identifies the point after the declarations have been
+  // visited. We need it to capture the environment effects of declarations
+  // that emit code (function declarations).
+  static const int kDeclarationsId = 3;
 
-  AstNode() : id_(GetNextId()) {
-    Isolate* isolate = Isolate::Current();
+  // Override ZoneObject's new to count allocated AST nodes.
+  void* operator new(size_t size, Zone* zone) {
+    Isolate* isolate = zone->isolate();
     isolate->set_ast_node_count(isolate->ast_node_count() + 1);
+    return zone->New(static_cast<int>(size));
   }
 
+  AstNode() {}
+
   virtual ~AstNode() { }
 
   virtual void Accept(AstVisitor* v) = 0;
@@ -156,31 +164,28 @@
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
-  virtual Slot* AsSlot() { return NULL; }
 
   // True if the node is simple enough for us to inline calls containing it.
   virtual bool IsInlineable() const = 0;
 
   static int Count() { return Isolate::Current()->ast_node_count(); }
   static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
-  unsigned id() const { return id_; }
 
  protected:
-  static unsigned GetNextId() {
-    Isolate* isolate = Isolate::Current();
-    unsigned tmp = isolate->ast_node_id();
-    isolate->set_ast_node_id(tmp + 1);
-    return tmp;
+  static unsigned GetNextId(Isolate* isolate) {
+    return ReserveIdRange(isolate, 1);
   }
-  static unsigned ReserveIdRange(int n) {
-    Isolate* isolate = Isolate::Current();
+
+  static unsigned ReserveIdRange(Isolate* isolate, int n) {
     unsigned tmp = isolate->ast_node_id();
     isolate->set_ast_node_id(tmp + n);
     return tmp;
   }
 
  private:
-  unsigned id_;
+  // Hidden to prevent accidental usage. It would have to load the
+  // current zone from the TLS.
+  void* operator new(size_t size);
 
   friend class CaseClause;  // Generates AST IDs.
 };
@@ -205,6 +210,36 @@
 };
 
 
+class SmallMapList {
+ public:
+  SmallMapList() {}
+  explicit SmallMapList(int capacity) : list_(capacity) {}
+
+  void Reserve(int capacity) { list_.Reserve(capacity); }
+  void Clear() { list_.Clear(); }
+
+  bool is_empty() const { return list_.is_empty(); }
+  int length() const { return list_.length(); }
+
+  void Add(Handle<Map> handle) {
+    list_.Add(handle.location());
+  }
+
+  Handle<Map> at(int i) const {
+    return Handle<Map>(list_.at(i));
+  }
+
+  Handle<Map> first() const { return at(0); }
+  Handle<Map> last() const { return at(length() - 1); }
+
+ private:
+  // The list stores pointers to Map*, that is Map**, so it's GC safe.
+  SmallPointerList<Map*> list_;
+
+  DISALLOW_COPY_AND_ASSIGN(SmallMapList);
+};
+
+
 class Expression: public AstNode {
  public:
   enum Context {
@@ -219,7 +254,9 @@
     kTest
   };
 
-  Expression() {}
+  explicit Expression(Isolate* isolate)
+      : id_(GetNextId(isolate)),
+        test_id_(GetNextId(isolate)) {}
 
   virtual int position() const {
     UNREACHABLE();
@@ -261,37 +298,23 @@
     UNREACHABLE();
     return false;
   }
-  virtual ZoneMapList* GetReceiverTypes() {
+  virtual SmallMapList* GetReceiverTypes() {
     UNREACHABLE();
     return NULL;
   }
-  virtual Handle<Map> GetMonomorphicReceiverType() {
-    UNREACHABLE();
-    return Handle<Map>();
+  Handle<Map> GetMonomorphicReceiverType() {
+    ASSERT(IsMonomorphic());
+    SmallMapList* types = GetReceiverTypes();
+    ASSERT(types != NULL && types->length() == 1);
+    return types->at(0);
   }
 
-  ExternalArrayType external_array_type() const {
-    return external_array_type_;
-  }
-  void set_external_array_type(ExternalArrayType array_type) {
-    external_array_type_ = array_type;
-  }
+  unsigned id() const { return id_; }
+  unsigned test_id() const { return test_id_; }
 
  private:
-  ExternalArrayType external_array_type_;
-};
-
-
-/**
- * A sentinel used during pre parsing that represents some expression
- * that is a valid left hand side without having to actually build
- * the expression.
- */
-class ValidLeftHandSideSentinel: public Expression {
- public:
-  virtual bool IsValidLeftHandSide() { return true; }
-  virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
-  virtual bool IsInlineable() const;
+  unsigned id_;
+  unsigned test_id_;
 };
 
 
@@ -320,7 +343,7 @@
   int ExitId() const { return exit_id_; }
 
  protected:
-  inline BreakableStatement(ZoneStringList* labels, Type type);
+  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
 
  private:
   ZoneStringList* labels_;
@@ -333,7 +356,10 @@
 
 class Block: public BreakableStatement {
  public:
-  inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
+  inline Block(Isolate* isolate,
+               ZoneStringList* labels,
+               int capacity,
+               bool is_initializer_block);
 
   DECLARE_NODE_TYPE(Block)
 
@@ -354,21 +380,31 @@
   ZoneList<Statement*>* statements() { return &statements_; }
   bool is_initializer_block() const { return is_initializer_block_; }
 
+  Scope* block_scope() const { return block_scope_; }
+  void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
+
  private:
   ZoneList<Statement*> statements_;
   bool is_initializer_block_;
+  Scope* block_scope_;
 };
 
 
 class Declaration: public AstNode {
  public:
-  Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
+  Declaration(VariableProxy* proxy,
+              Variable::Mode mode,
+              FunctionLiteral* fun,
+              Scope* scope)
       : proxy_(proxy),
         mode_(mode),
-        fun_(fun) {
-    ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+        fun_(fun),
+        scope_(scope) {
+    ASSERT(mode == Variable::VAR ||
+           mode == Variable::CONST ||
+           mode == Variable::LET);
     // At the moment there are no "const functions"'s in JavaScript...
-    ASSERT(fun == NULL || mode == Variable::VAR);
+    ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
   }
 
   DECLARE_NODE_TYPE(Declaration)
@@ -377,11 +413,15 @@
   Variable::Mode mode() const { return mode_; }
   FunctionLiteral* fun() const { return fun_; }  // may be NULL
   virtual bool IsInlineable() const;
+  Scope* scope() const { return scope_; }
 
  private:
   VariableProxy* proxy_;
   Variable::Mode mode_;
   FunctionLiteral* fun_;
+
+  // Nested scope from which the declaration originated.
+  Scope* scope_;
 };
 
 
@@ -395,12 +435,13 @@
   // Bailout support.
   int OsrEntryId() const { return osr_entry_id_; }
   virtual int ContinueId() const = 0;
+  virtual int StackCheckId() const = 0;
 
   // Code generation
   Label* continue_target()  { return &continue_target_; }
 
  protected:
-  explicit inline IterationStatement(ZoneStringList* labels);
+  inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
 
   void Initialize(Statement* body) {
     body_ = body;
@@ -415,7 +456,7 @@
 
 class DoWhileStatement: public IterationStatement {
  public:
-  explicit inline DoWhileStatement(ZoneStringList* labels);
+  inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(DoWhileStatement)
 
@@ -433,6 +474,7 @@
 
   // Bailout support.
   virtual int ContinueId() const { return continue_id_; }
+  virtual int StackCheckId() const { return back_edge_id_; }
   int BackEdgeId() const { return back_edge_id_; }
 
   virtual bool IsInlineable() const;
@@ -447,7 +489,7 @@
 
 class WhileStatement: public IterationStatement {
  public:
-  explicit inline WhileStatement(ZoneStringList* labels);
+  inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(WhileStatement)
 
@@ -467,6 +509,7 @@
 
   // Bailout support.
   virtual int ContinueId() const { return EntryId(); }
+  virtual int StackCheckId() const { return body_id_; }
   int BodyId() const { return body_id_; }
 
  private:
@@ -479,7 +522,7 @@
 
 class ForStatement: public IterationStatement {
  public:
-  explicit inline ForStatement(ZoneStringList* labels);
+  inline ForStatement(Isolate* isolate, ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(ForStatement)
 
@@ -506,6 +549,7 @@
 
   // Bailout support.
   virtual int ContinueId() const { return continue_id_; }
+  virtual int StackCheckId() const { return body_id_; }
   int BodyId() const { return body_id_; }
 
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -527,7 +571,7 @@
 
 class ForInStatement: public IterationStatement {
  public:
-  explicit inline ForInStatement(ZoneStringList* labels);
+  inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(ForInStatement)
 
@@ -544,6 +588,7 @@
   // Bailout support.
   int AssignmentId() const { return assignment_id_; }
   virtual int ContinueId() const { return EntryId(); }
+  virtual int StackCheckId() const { return EntryId(); }
 
  private:
   Expression* each_;
@@ -617,37 +662,30 @@
 };
 
 
-class WithEnterStatement: public Statement {
+class WithStatement: public Statement {
  public:
-  explicit WithEnterStatement(Expression* expression, bool is_catch_block)
-      : expression_(expression), is_catch_block_(is_catch_block) { }
+  WithStatement(Expression* expression, Statement* statement)
+      : expression_(expression), statement_(statement) { }
 
-  DECLARE_NODE_TYPE(WithEnterStatement)
+  DECLARE_NODE_TYPE(WithStatement)
 
   Expression* expression() const { return expression_; }
+  Statement* statement() const { return statement_; }
 
-  bool is_catch_block() const { return is_catch_block_; }
   virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
-  bool is_catch_block_;
-};
-
-
-class WithExitStatement: public Statement {
- public:
-  WithExitStatement() { }
-
-  virtual bool IsInlineable() const;
-
-  DECLARE_NODE_TYPE(WithExitStatement)
+  Statement* statement_;
 };
 
 
 class CaseClause: public ZoneObject {
  public:
-  CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
+  CaseClause(Isolate* isolate,
+             Expression* label,
+             ZoneList<Statement*>* statements,
+             int pos);
 
   bool is_default() const { return label_ == NULL; }
   Expression* label() const {
@@ -661,6 +699,7 @@
   void set_position(int pos) { position_ = pos; }
 
   int EntryId() { return entry_id_; }
+  int CompareId() { return compare_id_; }
 
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
@@ -674,13 +713,14 @@
   int position_;
   enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
   CompareTypeFeedback compare_type_;
+  int compare_id_;
   int entry_id_;
 };
 
 
 class SwitchStatement: public BreakableStatement {
  public:
-  explicit inline SwitchStatement(ZoneStringList* labels);
+  inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(SwitchStatement)
 
@@ -706,14 +746,16 @@
 // given if-statement has a then- or an else-part containing code.
 class IfStatement: public Statement {
  public:
-  IfStatement(Expression* condition,
+  IfStatement(Isolate* isolate,
+              Expression* condition,
               Statement* then_statement,
               Statement* else_statement)
       : condition_(condition),
         then_statement_(then_statement),
         else_statement_(else_statement),
-        then_id_(GetNextId()),
-        else_id_(GetNextId()) {
+        if_id_(GetNextId(isolate)),
+        then_id_(GetNextId(isolate)),
+        else_id_(GetNextId(isolate)) {
   }
 
   DECLARE_NODE_TYPE(IfStatement)
@@ -727,6 +769,7 @@
   Statement* then_statement() const { return then_statement_; }
   Statement* else_statement() const { return else_statement_; }
 
+  int IfId() const { return if_id_; }
   int ThenId() const { return then_id_; }
   int ElseId() const { return else_id_; }
 
@@ -734,6 +777,7 @@
   Expression* condition_;
   Statement* then_statement_;
   Statement* else_statement_;
+  int if_id_;
   int then_id_;
   int else_id_;
 };
@@ -743,9 +787,7 @@
 // stack in the compiler; this should probably be reworked.
 class TargetCollector: public AstNode {
  public:
-  explicit TargetCollector(ZoneList<Label*>* targets)
-      : targets_(targets) {
-  }
+  TargetCollector(): targets_(0) { }
 
   // Adds a jump target to the collector. The collector stores a pointer not
   // a copy of the target to make binding work, so make sure not to pass in
@@ -756,11 +798,11 @@
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
   virtual TargetCollector* AsTargetCollector() { return this; }
 
-  ZoneList<Label*>* targets() { return targets_; }
+  ZoneList<Label*>* targets() { return &targets_; }
   virtual bool IsInlineable() const;
 
  private:
-  ZoneList<Label*>* targets_;
+  ZoneList<Label*> targets_;
 };
 
 
@@ -786,21 +828,25 @@
 class TryCatchStatement: public TryStatement {
  public:
   TryCatchStatement(Block* try_block,
-                    VariableProxy* catch_var,
+                    Scope* scope,
+                    Variable* variable,
                     Block* catch_block)
       : TryStatement(try_block),
-        catch_var_(catch_var),
+        scope_(scope),
+        variable_(variable),
         catch_block_(catch_block) {
   }
 
   DECLARE_NODE_TYPE(TryCatchStatement)
 
-  VariableProxy* catch_var() const { return catch_var_; }
+  Scope* scope() { return scope_; }
+  Variable* variable() { return variable_; }
   Block* catch_block() const { return catch_block_; }
   virtual bool IsInlineable() const;
 
  private:
-  VariableProxy* catch_var_;
+  Scope* scope_;
+  Variable* variable_;
   Block* catch_block_;
 };
 
@@ -838,7 +884,8 @@
 
 class Literal: public Expression {
  public:
-  explicit Literal(Handle<Object> handle) : handle_(handle) { }
+  Literal(Isolate* isolate, Handle<Object> handle)
+      : Expression(isolate), handle_(handle) { }
 
   DECLARE_NODE_TYPE(Literal)
 
@@ -891,8 +938,14 @@
 // Base class for literals that needs space in the corresponding JSFunction.
 class MaterializedLiteral: public Expression {
  public:
-  explicit MaterializedLiteral(int literal_index, bool is_simple, int depth)
-      : literal_index_(literal_index), is_simple_(is_simple), depth_(depth) {}
+  MaterializedLiteral(Isolate* isolate,
+                      int literal_index,
+                      bool is_simple,
+                      int depth)
+      : Expression(isolate),
+        literal_index_(literal_index),
+        is_simple_(is_simple),
+        depth_(depth) {}
 
   virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
 
@@ -948,14 +1001,15 @@
     bool emit_store_;
   };
 
-  ObjectLiteral(Handle<FixedArray> constant_properties,
+  ObjectLiteral(Isolate* isolate,
+                Handle<FixedArray> constant_properties,
                 ZoneList<Property*>* properties,
                 int literal_index,
                 bool is_simple,
                 bool fast_elements,
                 int depth,
                 bool has_function)
-      : MaterializedLiteral(literal_index, is_simple, depth),
+      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
         constant_properties_(constant_properties),
         properties_(properties),
         fast_elements_(fast_elements),
@@ -994,10 +1048,11 @@
 // Node for capturing a regexp literal.
 class RegExpLiteral: public MaterializedLiteral {
  public:
-  RegExpLiteral(Handle<String> pattern,
+  RegExpLiteral(Isolate* isolate,
+                Handle<String> pattern,
                 Handle<String> flags,
                 int literal_index)
-      : MaterializedLiteral(literal_index, false, 1),
+      : MaterializedLiteral(isolate, literal_index, false, 1),
         pattern_(pattern),
         flags_(flags) {}
 
@@ -1015,15 +1070,16 @@
 // for minimizing the work when constructing it at runtime.
 class ArrayLiteral: public MaterializedLiteral {
  public:
-  ArrayLiteral(Handle<FixedArray> constant_elements,
+  ArrayLiteral(Isolate* isolate,
+               Handle<FixedArray> constant_elements,
                ZoneList<Expression*>* values,
                int literal_index,
                bool is_simple,
                int depth)
-      : MaterializedLiteral(literal_index, is_simple, depth),
+      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
         constant_elements_(constant_elements),
         values_(values),
-        first_element_id_(ReserveIdRange(values->length())) {}
+        first_element_id_(ReserveIdRange(isolate, values->length())) {}
 
   DECLARE_NODE_TYPE(ArrayLiteral)
 
@@ -1040,45 +1096,12 @@
 };
 
 
-// Node for constructing a context extension object for a catch block.
-// The catch context extension object has one property, the catch
-// variable, which should be DontDelete.
-class CatchExtensionObject: public Expression {
- public:
-  CatchExtensionObject(Literal* key, VariableProxy* value)
-      : key_(key), value_(value) {
-  }
-
-  DECLARE_NODE_TYPE(CatchExtensionObject)
-
-  Literal* key() const { return key_; }
-  VariableProxy* value() const { return value_; }
-  virtual bool IsInlineable() const;
-
- private:
-  Literal* key_;
-  VariableProxy* value_;
-};
-
-
 class VariableProxy: public Expression {
  public:
-  explicit VariableProxy(Variable* var);
+  VariableProxy(Isolate* isolate, Variable* var);
 
   DECLARE_NODE_TYPE(VariableProxy)
 
-  // Type testing & conversion
-  virtual Property* AsProperty() {
-    return var_ == NULL ? NULL : var_->AsProperty();
-  }
-
-  Variable* AsVariable() {
-    if (this == NULL || var_ == NULL) return NULL;
-    Expression* rewrite = var_->rewrite();
-    if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
-    return NULL;
-  }
-
   virtual bool IsValidLeftHandSide() {
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
@@ -1095,10 +1118,7 @@
     return !is_this() && name().is_identical_to(n);
   }
 
-  bool IsArguments() {
-    Variable* variable = AsVariable();
-    return (variable == NULL) ? false : variable->is_arguments();
-  }
+  bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
 
   Handle<String> name() const { return name_; }
   Variable* var() const { return var_; }
@@ -1119,95 +1139,31 @@
   bool is_trivial_;
   int position_;
 
-  VariableProxy(Handle<String> name,
+  VariableProxy(Isolate* isolate,
+                Handle<String> name,
                 bool is_this,
                 bool inside_with,
                 int position = RelocInfo::kNoPosition);
-  explicit VariableProxy(bool is_this);
 
   friend class Scope;
 };
 
 
-class VariableProxySentinel: public VariableProxy {
- public:
-  virtual bool IsValidLeftHandSide() { return !is_this(); }
-
- private:
-  explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
-
-  friend class AstSentinels;
-};
-
-
-class Slot: public Expression {
- public:
-  enum Type {
-    // A slot in the parameter section on the stack. index() is
-    // the parameter index, counting left-to-right, starting at 0.
-    PARAMETER,
-
-    // A slot in the local section on the stack. index() is
-    // the variable index in the stack frame, starting at 0.
-    LOCAL,
-
-    // An indexed slot in a heap context. index() is the
-    // variable index in the context object on the heap,
-    // starting at 0. var()->scope() is the corresponding
-    // scope.
-    CONTEXT,
-
-    // A named slot in a heap context. var()->name() is the
-    // variable name in the context object on the heap,
-    // with lookup starting at the current context. index()
-    // is invalid.
-    LOOKUP
-  };
-
-  Slot(Variable* var, Type type, int index)
-      : var_(var), type_(type), index_(index) {
-    ASSERT(var != NULL);
-  }
-
-  virtual void Accept(AstVisitor* v);
-
-  virtual Slot* AsSlot() { return this; }
-
-  bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
-
-  // Accessors
-  Variable* var() const { return var_; }
-  Type type() const { return type_; }
-  int index() const { return index_; }
-  bool is_arguments() const { return var_->is_arguments(); }
-  virtual bool IsInlineable() const;
-
- private:
-  Variable* var_;
-  Type type_;
-  int index_;
-};
-
-
 class Property: public Expression {
  public:
-  // Synthetic properties are property lookups introduced by the system,
-  // to objects that aren't visible to the user. Function calls to synthetic
-  // properties should use the global object as receiver, not the base object
-  // of the resolved Reference.
-  enum Type { NORMAL, SYNTHETIC };
-  Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
-      : obj_(obj),
+  Property(Isolate* isolate,
+           Expression* obj,
+           Expression* key,
+           int pos)
+      : Expression(isolate),
+        obj_(obj),
         key_(key),
         pos_(pos),
-        type_(type),
-        receiver_types_(NULL),
         is_monomorphic_(false),
         is_array_length_(false),
         is_string_length_(false),
         is_string_access_(false),
-        is_function_prototype_(false),
-        is_arguments_access_(false) { }
+        is_function_prototype_(false) { }
 
   DECLARE_NODE_TYPE(Property)
 
@@ -1217,55 +1173,44 @@
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   virtual int position() const { return pos_; }
-  bool is_synthetic() const { return type_ == SYNTHETIC; }
 
   bool IsStringLength() const { return is_string_length_; }
   bool IsStringAccess() const { return is_string_access_; }
   bool IsFunctionPrototype() const { return is_function_prototype_; }
 
-  // Marks that this is actually an argument rewritten to a keyed property
-  // accessing the argument through the arguments shadow object.
-  void set_is_arguments_access(bool is_arguments_access) {
-    is_arguments_access_ = is_arguments_access;
-  }
-  bool is_arguments_access() const { return is_arguments_access_; }
-
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
-  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
   virtual bool IsArrayLength() { return is_array_length_; }
-  virtual Handle<Map> GetMonomorphicReceiverType() {
-    return monomorphic_receiver_type_;
-  }
 
  private:
   Expression* obj_;
   Expression* key_;
   int pos_;
-  Type type_;
 
-  ZoneMapList* receiver_types_;
+  SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
   bool is_array_length_ : 1;
   bool is_string_length_ : 1;
   bool is_string_access_ : 1;
   bool is_function_prototype_ : 1;
-  bool is_arguments_access_ : 1;
-  Handle<Map> monomorphic_receiver_type_;
 };
 
 
 class Call: public Expression {
  public:
-  Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : expression_(expression),
+  Call(Isolate* isolate,
+       Expression* expression,
+       ZoneList<Expression*>* arguments,
+       int pos)
+      : Expression(isolate),
+        expression_(expression),
         arguments_(arguments),
         pos_(pos),
         is_monomorphic_(false),
         check_type_(RECEIVER_MAP_CHECK),
-        receiver_types_(NULL),
-        return_id_(GetNextId()) {
+        return_id_(GetNextId(isolate)) {
   }
 
   DECLARE_NODE_TYPE(Call)
@@ -1276,8 +1221,9 @@
   ZoneList<Expression*>* arguments() const { return arguments_; }
   virtual int position() const { return pos_; }
 
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle,
+                          CallKind call_kind);
+  virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   CheckType check_type() const { return check_type_; }
   Handle<JSFunction> target() { return target_; }
@@ -1302,7 +1248,7 @@
 
   bool is_monomorphic_;
   CheckType check_type_;
-  ZoneMapList* receiver_types_;
+  SmallMapList receiver_types_;
   Handle<JSFunction> target_;
   Handle<JSObject> holder_;
   Handle<JSGlobalPropertyCell> cell_;
@@ -1311,40 +1257,16 @@
 };
 
 
-class AstSentinels {
- public:
-  ~AstSentinels() { }
-
-  // Returns a property singleton property access on 'this'.  Used
-  // during preparsing.
-  Property* this_property() { return &this_property_; }
-  VariableProxySentinel* this_proxy() { return &this_proxy_; }
-  VariableProxySentinel* identifier_proxy() { return &identifier_proxy_; }
-  ValidLeftHandSideSentinel* valid_left_hand_side_sentinel() {
-    return &valid_left_hand_side_sentinel_;
-  }
-  Call* call_sentinel() { return &call_sentinel_; }
-  EmptyStatement* empty_statement() { return &empty_statement_; }
-
- private:
-  AstSentinels();
-  VariableProxySentinel this_proxy_;
-  VariableProxySentinel identifier_proxy_;
-  ValidLeftHandSideSentinel valid_left_hand_side_sentinel_;
-  Property this_property_;
-  Call call_sentinel_;
-  EmptyStatement empty_statement_;
-
-  friend class Isolate;
-
-  DISALLOW_COPY_AND_ASSIGN(AstSentinels);
-};
-
-
 class CallNew: public Expression {
  public:
-  CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : expression_(expression), arguments_(arguments), pos_(pos) { }
+  CallNew(Isolate* isolate,
+          Expression* expression,
+          ZoneList<Expression*>* arguments,
+          int pos)
+      : Expression(isolate),
+        expression_(expression),
+        arguments_(arguments),
+        pos_(pos) { }
 
   DECLARE_NODE_TYPE(CallNew)
 
@@ -1367,10 +1289,14 @@
 // implemented in JavaScript (see "v8natives.js").
 class CallRuntime: public Expression {
  public:
-  CallRuntime(Handle<String> name,
+  CallRuntime(Isolate* isolate,
+              Handle<String> name,
               const Runtime::Function* function,
               ZoneList<Expression*>* arguments)
-      : name_(name), function_(function), arguments_(arguments) { }
+      : Expression(isolate),
+        name_(name),
+        function_(function),
+        arguments_(arguments) { }
 
   DECLARE_NODE_TYPE(CallRuntime)
 
@@ -1390,8 +1316,11 @@
 
 class UnaryOperation: public Expression {
  public:
-  UnaryOperation(Token::Value op, Expression* expression)
-      : op_(op), expression_(expression) {
+  UnaryOperation(Isolate* isolate,
+                 Token::Value op,
+                 Expression* expression,
+                 int pos)
+      : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
     ASSERT(Token::IsUnaryOp(op));
   }
 
@@ -1403,29 +1332,29 @@
 
   Token::Value op() const { return op_; }
   Expression* expression() const { return expression_; }
+  virtual int position() const { return pos_; }
 
  private:
   Token::Value op_;
   Expression* expression_;
+  int pos_;
 };
 
 
 class BinaryOperation: public Expression {
  public:
-  BinaryOperation(Token::Value op,
+  BinaryOperation(Isolate* isolate,
+                  Token::Value op,
                   Expression* left,
                   Expression* right,
                   int pos)
-      : op_(op), left_(left), right_(right), pos_(pos) {
+      : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
     ASSERT(Token::IsBinaryOp(op));
     right_id_ = (op == Token::AND || op == Token::OR)
-        ? static_cast<int>(GetNextId())
+        ? static_cast<int>(GetNextId(isolate))
         : AstNode::kNoNumber;
   }
 
-  // Create the binary operation corresponding to a compound assignment.
-  explicit BinaryOperation(Assignment* assignment);
-
   DECLARE_NODE_TYPE(BinaryOperation)
 
   virtual bool IsInlineable() const;
@@ -1453,13 +1382,18 @@
 
 class CountOperation: public Expression {
  public:
-  CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
-      : op_(op),
+  CountOperation(Isolate* isolate,
+                 Token::Value op,
+                 bool is_prefix,
+                 Expression* expr,
+                 int pos)
+      : Expression(isolate),
+        op_(op),
         is_prefix_(is_prefix),
         expression_(expr),
         pos_(pos),
-        assignment_id_(GetNextId()),
-        count_id_(GetNextId()) { }
+        assignment_id_(GetNextId(isolate)),
+        count_id_(GetNextId(isolate)) {}
 
   DECLARE_NODE_TYPE(CountOperation)
 
@@ -1480,9 +1414,7 @@
 
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
-  virtual Handle<Map> GetMonomorphicReceiverType() {
-    return monomorphic_receiver_type_;
-  }
+  virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
 
   // Bailout support.
   int AssignmentId() const { return assignment_id_; }
@@ -1496,17 +1428,23 @@
   int pos_;
   int assignment_id_;
   int count_id_;
-  Handle<Map> monomorphic_receiver_type_;
+  SmallMapList receiver_types_;
 };
 
 
 class CompareOperation: public Expression {
  public:
-  CompareOperation(Token::Value op,
+  CompareOperation(Isolate* isolate,
+                   Token::Value op,
                    Expression* left,
                    Expression* right,
                    int pos)
-      : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
+      : Expression(isolate),
+        op_(op),
+        left_(left),
+        right_(right),
+        pos_(pos),
+        compare_type_(NONE) {
     ASSERT(Token::IsCompareOp(op));
   }
 
@@ -1524,6 +1462,10 @@
   bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
   bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
 
+  // Match special cases.
+  bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
+  bool IsLiteralCompareUndefined(Expression** expr);
+
  private:
   Token::Value op_;
   Expression* left_;
@@ -1537,8 +1479,8 @@
 
 class CompareToNull: public Expression {
  public:
-  CompareToNull(bool is_strict, Expression* expression)
-      : is_strict_(is_strict), expression_(expression) { }
+  CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
+      : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
 
   DECLARE_NODE_TYPE(CompareToNull)
 
@@ -1556,18 +1498,20 @@
 
 class Conditional: public Expression {
  public:
-  Conditional(Expression* condition,
+  Conditional(Isolate* isolate,
+              Expression* condition,
               Expression* then_expression,
               Expression* else_expression,
               int then_expression_position,
               int else_expression_position)
-      : condition_(condition),
+      : Expression(isolate),
+        condition_(condition),
         then_expression_(then_expression),
         else_expression_(else_expression),
         then_expression_position_(then_expression_position),
         else_expression_position_(else_expression_position),
-        then_id_(GetNextId()),
-        else_id_(GetNextId()) {
+        then_id_(GetNextId(isolate)),
+        else_id_(GetNextId(isolate)) {
   }
 
   DECLARE_NODE_TYPE(Conditional)
@@ -1597,7 +1541,11 @@
 
 class Assignment: public Expression {
  public:
-  Assignment(Token::Value op, Expression* target, Expression* value, int pos);
+  Assignment(Isolate* isolate,
+             Token::Value op,
+             Expression* target,
+             Expression* value,
+             int pos);
 
   DECLARE_NODE_TYPE(Assignment)
 
@@ -1628,10 +1576,7 @@
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
-  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
-  virtual Handle<Map> GetMonomorphicReceiverType() {
-    return monomorphic_receiver_type_;
-  }
+  virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
 
   // Bailout support.
   int CompoundLoadId() const { return compound_load_id_; }
@@ -1650,15 +1595,14 @@
   bool block_end_;
 
   bool is_monomorphic_;
-  ZoneMapList* receiver_types_;
-  Handle<Map> monomorphic_receiver_type_;
+  SmallMapList receiver_types_;
 };
 
 
 class Throw: public Expression {
  public:
-  Throw(Expression* exception, int pos)
-      : exception_(exception), pos_(pos) {}
+  Throw(Isolate* isolate, Expression* exception, int pos)
+      : Expression(isolate), exception_(exception), pos_(pos) {}
 
   DECLARE_NODE_TYPE(Throw)
 
@@ -1674,7 +1618,14 @@
 
 class FunctionLiteral: public Expression {
  public:
-  FunctionLiteral(Handle<String> name,
+  enum Type {
+    ANONYMOUS_EXPRESSION,
+    NAMED_EXPRESSION,
+    DECLARATION
+  };
+
+  FunctionLiteral(Isolate* isolate,
+                  Handle<String> name,
                   Scope* scope,
                   ZoneList<Statement*>* body,
                   int materialized_literal_count,
@@ -1684,8 +1635,10 @@
                   int num_parameters,
                   int start_position,
                   int end_position,
-                  bool is_expression)
-      : name_(name),
+                  Type type,
+                  bool has_duplicate_parameters)
+      : Expression(isolate),
+        name_(name),
         scope_(scope),
         body_(body),
         materialized_literal_count_(materialized_literal_count),
@@ -1696,10 +1649,13 @@
         num_parameters_(num_parameters),
         start_position_(start_position),
         end_position_(end_position),
-        is_expression_(is_expression),
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(HEAP->empty_string()),
-        pretenure_(false) { }
+        is_expression_(type != DECLARATION),
+        is_anonymous_(type == ANONYMOUS_EXPRESSION),
+        pretenure_(false),
+        has_duplicate_parameters_(has_duplicate_parameters) {
+  }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
@@ -1711,6 +1667,7 @@
   int start_position() const { return start_position_; }
   int end_position() const { return end_position_; }
   bool is_expression() const { return is_expression_; }
+  bool is_anonymous() const { return is_anonymous_; }
   bool strict_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
@@ -1739,6 +1696,8 @@
   void set_pretenure(bool value) { pretenure_ = value; }
   virtual bool IsInlineable() const;
 
+  bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+
  private:
   Handle<String> name_;
   Scope* scope_;
@@ -1750,18 +1709,21 @@
   int num_parameters_;
   int start_position_;
   int end_position_;
-  bool is_expression_;
   int function_token_position_;
   Handle<String> inferred_name_;
+  bool is_expression_;
+  bool is_anonymous_;
   bool pretenure_;
+  bool has_duplicate_parameters_;
 };
 
 
 class SharedFunctionInfoLiteral: public Expression {
  public:
-  explicit SharedFunctionInfoLiteral(
+  SharedFunctionInfoLiteral(
+      Isolate* isolate,
       Handle<SharedFunctionInfo> shared_function_info)
-      : shared_function_info_(shared_function_info) { }
+      : Expression(isolate), shared_function_info_(shared_function_info) { }
 
   DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
 
@@ -1777,6 +1739,7 @@
 
 class ThisFunction: public Expression {
  public:
+  explicit ThisFunction(Isolate* isolate) : Expression(isolate) {}
   DECLARE_NODE_TYPE(ThisFunction)
   virtual bool IsInlineable() const;
 };
@@ -1812,7 +1775,7 @@
   // expression.
   virtual Interval CaptureRegisters() { return Interval::Empty(); }
   virtual void AppendToText(RegExpText* text);
-  SmartPointer<const char> ToString();
+  SmartArrayPointer<const char> ToString();
 #define MAKE_ASTYPE(Name)                                                  \
   virtual RegExp##Name* As##Name();                                        \
   virtual bool Is##Name();
@@ -1947,6 +1910,7 @@
   uc16 standard_type() { return set_.standard_set_type(); }
   ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
   bool is_negated() { return is_negated_; }
+
  private:
   CharacterSet set_;
   bool is_negated_;
@@ -2031,6 +1995,7 @@
   bool is_non_greedy() { return type_ == NON_GREEDY; }
   bool is_greedy() { return type_ == GREEDY; }
   RegExpTree* body() { return body_; }
+
  private:
   RegExpTree* body_;
   int min_;
@@ -2063,6 +2028,7 @@
   int index() { return index_; }
   static int StartRegister(int index) { return index * 2; }
   static int EndRegister(int index) { return index * 2 + 1; }
+
  private:
   RegExpTree* body_;
   int index_;
@@ -2093,6 +2059,7 @@
   bool is_positive() { return is_positive_; }
   int capture_count() { return capture_count_; }
   int capture_from() { return capture_from_; }
+
  private:
   RegExpTree* body_;
   bool is_positive_;
@@ -2162,9 +2129,6 @@
   void SetStackOverflow() { stack_overflow_ = true; }
   void ClearStackOverflow() { stack_overflow_ = false; }
 
-  // Nodes not appearing in the AST, including slots.
-  virtual void VisitSlot(Slot* node) { UNREACHABLE(); }
-
   // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
diff --git a/src/atomicops_internals_x86_gcc.cc b/src/atomicops_internals_x86_gcc.cc
index a572564..181c202 100644
--- a/src/atomicops_internals_x86_gcc.cc
+++ b/src/atomicops_internals_x86_gcc.cc
@@ -57,6 +57,9 @@
 
 #if defined(cpuid)        // initialize the struct only on x86
 
+namespace v8 {
+namespace internal {
+
 // Set the flags so that code will run correctly and conservatively, so even
 // if we haven't been initialized yet, we're probably single threaded, and our
 // default values should hopefully be pretty safe.
@@ -65,8 +68,14 @@
   false,          // no SSE2
 };
 
+} }  // namespace v8::internal
+
+namespace {
+
 // Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
+void AtomicOps_Internalx86CPUFeaturesInit() {
+  using v8::internal::AtomicOps_Internalx86CPUFeatures;
+
   uint32_t eax;
   uint32_t ebx;
   uint32_t ecx;
@@ -107,8 +116,6 @@
   AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
 }
 
-namespace {
-
 class AtomicOpsx86Initializer {
  public:
   AtomicOpsx86Initializer() {
diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h
index 3f17fa0..6e55b50 100644
--- a/src/atomicops_internals_x86_gcc.h
+++ b/src/atomicops_internals_x86_gcc.h
@@ -30,6 +30,9 @@
 #ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
 
+namespace v8 {
+namespace internal {
+
 // This struct is not part of the public API of this module; clients may not
 // use it.
 // Features of this x86.  Values may not be correct before main() is run,
@@ -43,9 +46,6 @@
 
 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
-namespace v8 {
-namespace internal {
-
 // 32-bit low-level operations on any platform.
 
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
diff --git a/src/bignum-dtoa.cc b/src/bignum-dtoa.cc
index 088dd79..a961690 100644
--- a/src/bignum-dtoa.cc
+++ b/src/bignum-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,10 @@
 
 #include <math.h>
 
-#include "v8.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
+
 #include "bignum-dtoa.h"
 
 #include "bignum.h"
diff --git a/src/bignum.cc b/src/bignum.cc
index a973974..9436322 100644
--- a/src/bignum.cc
+++ b/src/bignum.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
-#include "bignum.h"
+#include "../include/v8stdint.h"
 #include "utils.h"
+#include "bignum.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/bignum.h b/src/bignum.h
index 1d2bff6..dcc4fa7 100644
--- a/src/bignum.h
+++ b/src/bignum.h
@@ -92,6 +92,7 @@
   static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
     return PlusCompare(a, b, c) < 0;
   }
+
  private:
   typedef uint32_t Chunk;
   typedef uint64_t DoubleChunk;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 5b87640..f07e625 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,8 +47,9 @@
 
 NativesExternalStringResource::NativesExternalStringResource(
     Bootstrapper* bootstrapper,
-    const char* source)
-    : data_(source), length_(StrLength(source)) {
+    const char* source,
+    size_t length)
+    : data_(source), length_(length) {
   if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
     bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
   }
@@ -75,16 +76,18 @@
   if (heap->natives_source_cache()->get(index)->IsUndefined()) {
     if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
       // We can use external strings for the natives.
+      Vector<const char> source = Natives::GetRawScriptSource(index);
       NativesExternalStringResource* resource =
           new NativesExternalStringResource(this,
-              Natives::GetScriptSource(index).start());
+                                            source.start(),
+                                            source.length());
       Handle<String> source_code =
           factory->NewExternalStringFromAscii(resource);
       heap->natives_source_cache()->set(index, *source_code);
     } else {
       // Old snapshot code can't cope with external strings at all.
       Handle<String> source_code =
-        factory->NewStringFromAscii(Natives::GetScriptSource(index));
+        factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
       heap->natives_source_cache()->set(index, *source_code);
     }
   }
@@ -141,7 +144,8 @@
 
 class Genesis BASE_EMBEDDED {
  public:
-  Genesis(Handle<Object> global_object,
+  Genesis(Isolate* isolate,
+          Handle<Object> global_object,
           v8::Handle<v8::ObjectTemplate> global_template,
           v8::ExtensionConfiguration* extensions);
   ~Genesis() { }
@@ -150,8 +154,13 @@
 
   Genesis* previous() { return previous_; }
 
+  Isolate* isolate() const { return isolate_; }
+  Factory* factory() const { return isolate_->factory(); }
+  Heap* heap() const { return isolate_->heap(); }
+
  private:
   Handle<Context> global_context_;
+  Isolate* isolate_;
 
   // There may be more than one active genesis object: When GC is
   // triggered during environment creation there may be weak handle
@@ -163,9 +172,9 @@
   // Creates some basic objects. Used for creating a context from scratch.
   void CreateRoots();
   // Creates the empty function.  Used for creating a context from scratch.
-  Handle<JSFunction> CreateEmptyFunction();
+  Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
   // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
-  Handle<JSFunction> CreateThrowTypeErrorFunction(Builtins::Name builtin);
+  Handle<JSFunction> GetThrowTypeErrorFunction();
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
   // Creates the global objects using the global and the template passed in
@@ -190,10 +199,13 @@
   // New context initialization.  Used for creating a context from scratch.
   void InitializeGlobal(Handle<GlobalObject> inner_global,
                         Handle<JSFunction> empty_function);
+  void InitializeExperimentalGlobal();
   // Installs the contents of the native .js files on the global objects.
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
+  void InstallExperimentalNativeFunctions();
   bool InstallNatives();
+  bool InstallExperimentalNatives();
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
@@ -239,7 +251,8 @@
       Handle<FixedArray> arguments,
       Handle<FixedArray> caller);
 
-  static bool CompileBuiltin(int index);
+  static bool CompileBuiltin(Isolate* isolate, int index);
+  static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
   static bool CompileNative(Vector<const char> name, Handle<String> source);
   static bool CompileScriptCached(Vector<const char> name,
                                   Handle<String> source,
@@ -256,6 +269,7 @@
   // These are the final, writable prototype, maps.
   Handle<Map> function_instance_map_writable_prototype_;
   Handle<Map> strict_mode_function_instance_map_writable_prototype_;
+  Handle<JSFunction> throw_type_error_function;
 
   BootstrapperActive active_;
   friend class Bootstrapper;
@@ -269,12 +283,13 @@
 
 
 Handle<Context> Bootstrapper::CreateEnvironment(
+    Isolate* isolate,
     Handle<Object> global_object,
     v8::Handle<v8::ObjectTemplate> global_template,
     v8::ExtensionConfiguration* extensions) {
   HandleScope scope;
   Handle<Context> env;
-  Genesis genesis(global_object, global_template, extensions);
+  Genesis genesis(isolate, global_object, global_template, extensions);
   env = genesis.result();
   if (!env.is_null()) {
     if (InstallExtensions(env, extensions)) {
@@ -287,15 +302,16 @@
 
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
+  Factory* factory = object->GetIsolate()->factory();
   Handle<Map> old_to_map = Handle<Map>(object->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(*proto);
   object->set_map(*new_to_map);
 }
 
 
 void Bootstrapper::DetachGlobal(Handle<Context> env) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = env->GetIsolate()->factory();
   JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
   SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
                      factory->null_value());
@@ -322,7 +338,7 @@
                                           Handle<JSObject> prototype,
                                           Builtins::Name call,
                                           bool is_ecma_native) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = target->GetIsolate();
   Factory* factory = isolate->factory();
   Handle<String> symbol = factory->LookupAsciiSymbol(name);
   Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
@@ -334,7 +350,14 @@
                                       prototype,
                                       call_code,
                                       is_ecma_native);
-  SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM);
+  PropertyAttributes attributes;
+  if (target->IsJSBuiltinsObject()) {
+    attributes =
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+  } else {
+    attributes = DONT_ENUM;
+  }
+  SetLocalPropertyNoThrow(target, symbol, function, attributes);
   if (is_ecma_native) {
     function->shared()->set_instance_class_name(*symbol);
   }
@@ -344,30 +367,32 @@
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
     PrototypePropertyMode prototypeMode) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // Add length.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
     descriptors->Set(0, &d);
   }
   {  // Add name.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
     descriptors->Set(1, &d);
   }
   {  // Add arguments.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionArguments);
-    CallbacksDescriptor d(*factory->arguments_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionArguments);
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
     descriptors->Set(2, &d);
   }
   {  // Add caller.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionCaller);
-    CallbacksDescriptor d(*factory->caller_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
     descriptors->Set(3, &d);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -375,8 +400,9 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
     descriptors->Set(4, &d);
   }
   descriptors->Sort();
@@ -385,7 +411,7 @@
 
 
 Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeFunctionInstanceDescriptor(prototype_mode);
   map->set_instance_descriptors(*descriptors);
@@ -394,7 +420,7 @@
 }
 
 
-Handle<JSFunction> Genesis::CreateEmptyFunction() {
+Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   // Allocate the map for function instances. Maps are allocated first and their
   // prototypes patched later, once empty function is created.
 
@@ -422,7 +448,6 @@
   function_instance_map_writable_prototype_ =
       CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
 
-  Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -491,28 +516,31 @@
     PrototypePropertyMode prototypeMode,
     Handle<FixedArray> arguments,
     Handle<FixedArray> caller) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(
       DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // length
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
     descriptors->Set(0, &d);
   }
   {  // name
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
     descriptors->Set(1, &d);
   }
   {  // arguments
-    CallbacksDescriptor d(*factory->arguments_symbol(), *arguments, attributes);
+    CallbacksDescriptor d(*factory()->arguments_symbol(),
+                          *arguments,
+                          attributes);
     descriptors->Set(2, &d);
   }
   {  // caller
-    CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
     descriptors->Set(3, &d);
   }
 
@@ -521,8 +549,9 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
     descriptors->Set(4, &d);
   }
 
@@ -532,25 +561,22 @@
 
 
 // ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
-    Builtins::Name builtin) {
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
+Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
+  if (throw_type_error_function.is_null()) {
+    Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+    throw_type_error_function =
+      factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+    Handle<Code> code(isolate()->builtins()->builtin(
+        Builtins::kStrictModePoisonPill));
+    throw_type_error_function->set_map(
+        global_context()->function_map());
+    throw_type_error_function->set_code(*code);
+    throw_type_error_function->shared()->set_code(*code);
+    throw_type_error_function->shared()->DontAdaptArguments();
 
-  Handle<String> name = factory->LookupAsciiSymbol("ThrowTypeError");
-  Handle<JSFunction> throw_type_error =
-      factory->NewFunctionWithoutPrototype(name, kStrictMode);
-  Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(builtin));
-
-  throw_type_error->set_map(global_context()->strict_mode_function_map());
-  throw_type_error->set_code(*code);
-  throw_type_error->shared()->set_code(*code);
-  throw_type_error->shared()->DontAdaptArguments();
-
-  PreventExtensions(throw_type_error);
-
-  return throw_type_error;
+    PreventExtensions(throw_type_error_function);
+  }
+  return throw_type_error_function;
 }
 
 
@@ -559,7 +585,7 @@
     Handle<JSFunction> empty_function,
     Handle<FixedArray> arguments_callbacks,
     Handle<FixedArray> caller_callbacks) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeStrictFunctionInstanceDescriptor(prototype_mode,
                                               arguments_callbacks,
@@ -574,7 +600,7 @@
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
   // Create the callbacks arrays for ThrowTypeError functions.
   // The get/set callacks are filled in after the maps are created below.
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = empty->GetIsolate()->factory();
   Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
   Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
@@ -607,23 +633,21 @@
       CreateStrictModeFunctionMap(
           ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
 
-  // Create the ThrowTypeError function instances.
-  Handle<JSFunction> arguments_throw =
-      CreateThrowTypeErrorFunction(Builtins::kStrictFunctionArguments);
-  Handle<JSFunction> caller_throw =
-      CreateThrowTypeErrorFunction(Builtins::kStrictFunctionCaller);
+  // Create the ThrowTypeError function instance.
+  Handle<JSFunction> throw_function =
+      GetThrowTypeErrorFunction();
 
   // Complete the callback fixed arrays.
-  arguments->set(0, *arguments_throw);
-  arguments->set(1, *arguments_throw);
-  caller->set(0, *caller_throw);
-  caller->set(1, *caller_throw);
+  arguments->set(0, *throw_function);
+  arguments->set(1, *throw_function);
+  caller->set(0, *throw_function);
+  caller->set(1, *throw_function);
 }
 
 
 static void AddToWeakGlobalContextList(Context* context) {
   ASSERT(context->IsGlobalContext());
-  Heap* heap = Isolate::Current()->heap();
+  Heap* heap = context->GetIsolate()->heap();
 #ifdef DEBUG
   { // NOLINT
     ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
@@ -641,15 +665,14 @@
 
 
 void Genesis::CreateRoots() {
-  Isolate* isolate = Isolate::Current();
   // Allocate the global context FixedArray first and then patch the
   // closure and extension object later (we need the empty function
   // and the global object, but in order to create those, we need the
   // global context).
-  global_context_ = Handle<Context>::cast(isolate->global_handles()->Create(
-              *isolate->factory()->NewGlobalContext()));
+  global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+              *factory()->NewGlobalContext()));
   AddToWeakGlobalContextList(*global_context_);
-  isolate->set_context(*global_context());
+  isolate()->set_context(*global_context());
 
   // Allocate the message listeners object.
   {
@@ -692,17 +715,13 @@
     }
   }
 
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
-
   if (js_global_template.is_null()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     js_global_function =
-        factory->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
-                             JSGlobalObject::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                               JSGlobalObject::kSize, code, true);
     // Change the constructor property of the prototype of the
     // hidden global function to refer to the Object function.
     Handle<JSObject> prototype =
@@ -710,20 +729,20 @@
             JSObject::cast(js_global_function->instance_prototype()));
     SetLocalPropertyNoThrow(
         prototype,
-        factory->constructor_symbol(),
-        isolate->object_function(),
+        factory()->constructor_symbol(),
+        isolate()->object_function(),
         NONE);
   } else {
     Handle<FunctionTemplateInfo> js_global_constructor(
         FunctionTemplateInfo::cast(js_global_template->constructor()));
     js_global_function =
-        factory->CreateApiFunction(js_global_constructor,
-                                   factory->InnerGlobalObject);
+        factory()->CreateApiFunction(js_global_constructor,
+                                     factory()->InnerGlobalObject);
   }
 
   js_global_function->initial_map()->set_is_hidden_prototype();
   Handle<GlobalObject> inner_global =
-      factory->NewGlobalObject(js_global_function);
+      factory()->NewGlobalObject(js_global_function);
   if (inner_global_out != NULL) {
     *inner_global_out = inner_global;
   }
@@ -731,23 +750,23 @@
   // Step 2: create or re-initialize the global proxy object.
   Handle<JSFunction> global_proxy_function;
   if (global_template.IsEmpty()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     global_proxy_function =
-        factory->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
-                             JSGlobalProxy::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                               JSGlobalProxy::kSize, code, true);
   } else {
     Handle<ObjectTemplateInfo> data =
         v8::Utils::OpenHandle(*global_template);
     Handle<FunctionTemplateInfo> global_constructor(
             FunctionTemplateInfo::cast(data->constructor()));
     global_proxy_function =
-        factory->CreateApiFunction(global_constructor,
-                                   factory->OuterGlobalObject);
+        factory()->CreateApiFunction(global_constructor,
+                                     factory()->OuterGlobalObject);
   }
 
-  Handle<String> global_name = factory->LookupAsciiSymbol("global");
+  Handle<String> global_name = factory()->LookupAsciiSymbol("global");
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
 
@@ -761,7 +780,7 @@
         Handle<JSGlobalProxy>::cast(global_object));
   } else {
     return Handle<JSGlobalProxy>::cast(
-        factory->NewJSObject(global_proxy_function, TENURED));
+        factory()->NewJSObject(global_proxy_function, TENURED));
   }
 }
 
@@ -786,7 +805,7 @@
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   ForceSetProperty(builtins_global,
-                   FACTORY->LookupAsciiSymbol("global"),
+                   factory()->LookupAsciiSymbol("global"),
                    inner_global,
                    attributes);
   // Setup the reference from the global object to the builtins object.
@@ -803,7 +822,6 @@
   // --- G l o b a l   C o n t e x t ---
   // Use the empty function as closure (no scope info).
   global_context()->set_closure(*empty_function);
-  global_context()->set_fcontext(*global_context());
   global_context()->set_previous(NULL);
   // Set extension and global object.
   global_context()->set_extension(*inner_global);
@@ -814,7 +832,7 @@
   // object reinitialization.
   global_context()->set_security_token(*inner_global);
 
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = inner_global->GetIsolate();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -841,10 +859,10 @@
     // is 1.
     array_function->shared()->set_length(1);
     Handle<DescriptorArray> array_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory->CopyAppendForeignDescriptor(
             factory->empty_descriptor_array(),
             factory->length_symbol(),
-            factory->NewProxy(&Accessors::ArrayLength),
+            factory->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
     // Cache the fast JavaScript array map
@@ -884,10 +902,10 @@
     global_context()->set_string_function(*string_fun);
     // Add 'length' property to strings.
     Handle<DescriptorArray> string_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory->CopyAppendForeignDescriptor(
             factory->empty_descriptor_array(),
             factory->length_symbol(),
-            factory->NewProxy(&Accessors::StringLength),
+            factory->NewForeign(&Accessors::StringLength),
             static_cast<PropertyAttributes>(DONT_ENUM |
                                             DONT_DELETE |
                                             READ_ONLY));
@@ -1044,6 +1062,24 @@
 #endif
   }
 
+  {  // --- aliased_arguments_boilerplate_
+    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+    new_map->set_pre_allocated_property_fields(2);
+    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+    // Set up a well-formed parameter map to make assertions happy.
+    Handle<FixedArray> elements = factory->NewFixedArray(2);
+    elements->set_map(heap->non_strict_arguments_elements_map());
+    Handle<FixedArray> array;
+    array = factory->NewFixedArray(0);
+    elements->set(0, *array);
+    array = factory->NewFixedArray(0);
+    elements->set(1, *array);
+    result->set_elements(*elements);
+    global_context()->set_aliased_arguments_boilerplate(*result);
+  }
+
   {  // --- strict mode arguments boilerplate
     const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1052,16 +1088,14 @@
     Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
     Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
-    Handle<JSFunction> callee_throw =
-        CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCallee);
-    Handle<JSFunction> caller_throw =
-        CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCaller);
+    Handle<JSFunction> throw_function =
+        GetThrowTypeErrorFunction();
 
     // Install the ThrowTypeError functions.
-    callee->set(0, *callee_throw);
-    callee->set(1, *callee_throw);
-    caller->set(0, *caller_throw);
-    caller->set(1, *caller_throw);
+    callee->set(0, *throw_function);
+    callee->set(1, *throw_function);
+    caller->set(0, *throw_function);
+    caller->set(1, *throw_function);
 
     // Create the descriptor array for the arguments object.
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
@@ -1133,7 +1167,7 @@
 
 
   {
-    // Setup the call-as-function delegate.
+    // Set up the call-as-function delegate.
     Handle<Code> code =
         Handle<Code>(isolate->builtins()->builtin(
             Builtins::kHandleApiCallAsFunction));
@@ -1145,7 +1179,7 @@
   }
 
   {
-    // Setup the call-as-constructor delegate.
+    // Set up the call-as-constructor delegate.
     Handle<Code> code =
         Handle<Code>(isolate->builtins()->builtin(
             Builtins::kHandleApiCallAsConstructor));
@@ -1164,17 +1198,41 @@
 }
 
 
-bool Genesis::CompileBuiltin(int index) {
+void Genesis::InitializeExperimentalGlobal() {
+  Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+
+  // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
+  // longer need to live behind a flag, so WeakMap gets added to the snapshot.
+  if (FLAG_harmony_weakmaps) {  // -- W e a k M a p
+    Handle<JSObject> prototype =
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
+    InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+                    prototype, Builtins::kIllegal, true);
+  }
+}
+
+
+bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
   Vector<const char> name = Natives::GetScriptName(index);
   Handle<String> source_code =
-      Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
+      isolate->bootstrapper()->NativesSourceLookup(index);
+  return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+  Vector<const char> name = ExperimentalNatives::GetScriptName(index);
+  Factory* factory = isolate->factory();
+  Handle<String> source_code =
+      factory->NewStringFromAscii(
+          ExperimentalNatives::GetRawScriptSource(index));
   return CompileNative(name, source_code);
 }
 
 
 bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = source->GetIsolate();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate->debugger()->set_compiling_natives(true);
 #endif
@@ -1199,7 +1257,7 @@
                                   v8::Extension* extension,
                                   Handle<Context> top_context,
                                   bool use_runtime_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = source->GetIsolate()->factory();
   HandleScope scope;
   Handle<SharedFunctionInfo> function_info;
 
@@ -1239,22 +1297,21 @@
                      ? top_context->builtins()
                      : top_context->global());
   bool has_pending_exception;
-  Handle<Object> result =
-      Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+  Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
   if (has_pending_exception) return false;
   return true;
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                        \
-  Handle<String> var##_name = factory->LookupAsciiSymbol(name);                \
-  Object* var##_native =                                                       \
-      global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name); \
+#define INSTALL_NATIVE(Type, name, var)                                       \
+  Handle<String> var##_name = factory()->LookupAsciiSymbol(name);             \
+  Object* var##_native =                                                      \
+      global_context()->builtins()->GetPropertyNoExceptionThrown(             \
+           *var##_name);                                                      \
   global_context()->set_##var(Type::cast(var##_native));
 
 
 void Genesis::InstallNativeFunctions() {
-  Factory* factory = Isolate::Current()->factory();
   HandleScope scope;
   INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
   INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
@@ -1272,30 +1329,36 @@
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
 }
 
+void Genesis::InstallExperimentalNativeFunctions() {
+  if (FLAG_harmony_proxies) {
+    INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
+    INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
+    INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
+  }
+}
+
 #undef INSTALL_NATIVE
 
 
 bool Genesis::InstallNatives() {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
 
   // Create a function for the builtins object. Allocate space for the
   // JavaScript builtins, a reference to the builtins object
   // (itself) and a reference to the global_context directly in the object.
   Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(Builtins::kIllegal));
+      isolate()->builtins()->builtin(Builtins::kIllegal));
   Handle<JSFunction> builtins_fun =
-      factory->NewFunction(factory->empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
-                           JSBuiltinsObject::kSize, code, true);
+      factory()->NewFunction(factory()->empty_symbol(),
+                             JS_BUILTINS_OBJECT_TYPE,
+                             JSBuiltinsObject::kSize, code, true);
 
-  Handle<String> name = factory->LookupAsciiSymbol("builtins");
+  Handle<String> name = factory()->LookupAsciiSymbol("builtins");
   builtins_fun->shared()->set_instance_class_name(*name);
 
   // Allocate the builtins object.
   Handle<JSBuiltinsObject> builtins =
-      Handle<JSBuiltinsObject>::cast(factory->NewGlobalObject(builtins_fun));
+      Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
   builtins->set_builtins(*builtins);
   builtins->set_global_context(*global_context());
   builtins->set_global_receiver(*builtins);
@@ -1306,7 +1369,7 @@
   // global object.
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  Handle<String> global_symbol = factory->LookupAsciiSymbol("global");
+  Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
   Handle<Object> global_obj(global_context()->global());
   SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
 
@@ -1315,12 +1378,13 @@
 
   // Create a bridge function that has context in the global context.
   Handle<JSFunction> bridge =
-      factory->NewFunction(factory->empty_symbol(), factory->undefined_value());
-  ASSERT(bridge->context() == *isolate->global_context());
+      factory()->NewFunction(factory()->empty_symbol(),
+                             factory()->undefined_value());
+  ASSERT(bridge->context() == *isolate()->global_context());
 
   // Allocate the builtins context.
   Handle<Context> context =
-    factory->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+    factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
   context->set_global(*builtins);  // override builtins global object
 
   global_context()->set_runtime_context(*context);
@@ -1329,123 +1393,127 @@
     // Builtin functions for Script.
     Handle<JSFunction> script_fun =
         InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(script_fun, prototype);
     global_context()->set_script_function(*script_fun);
 
     // Add 'source' and 'data' property to scripts.
     PropertyAttributes common_attributes =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-    Handle<Proxy> proxy_source = factory->NewProxy(&Accessors::ScriptSource);
+    Handle<Foreign> foreign_source =
+        factory()->NewForeign(&Accessors::ScriptSource);
     Handle<DescriptorArray> script_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->LookupAsciiSymbol("source"),
-            proxy_source,
+        factory()->CopyAppendForeignDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->LookupAsciiSymbol("source"),
+            foreign_source,
             common_attributes);
-    Handle<Proxy> proxy_name = factory->NewProxy(&Accessors::ScriptName);
+    Handle<Foreign> foreign_name =
+        factory()->NewForeign(&Accessors::ScriptName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("name"),
-            proxy_name,
+            factory()->LookupAsciiSymbol("name"),
+            foreign_name,
             common_attributes);
-    Handle<Proxy> proxy_id = factory->NewProxy(&Accessors::ScriptId);
+    Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("id"),
-            proxy_id,
+            factory()->LookupAsciiSymbol("id"),
+            foreign_id,
             common_attributes);
-    Handle<Proxy> proxy_line_offset =
-        factory->NewProxy(&Accessors::ScriptLineOffset);
+    Handle<Foreign> foreign_line_offset =
+        factory()->NewForeign(&Accessors::ScriptLineOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_offset"),
-            proxy_line_offset,
+            factory()->LookupAsciiSymbol("line_offset"),
+            foreign_line_offset,
             common_attributes);
-    Handle<Proxy> proxy_column_offset =
-        factory->NewProxy(&Accessors::ScriptColumnOffset);
+    Handle<Foreign> foreign_column_offset =
+        factory()->NewForeign(&Accessors::ScriptColumnOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("column_offset"),
-            proxy_column_offset,
+            factory()->LookupAsciiSymbol("column_offset"),
+            foreign_column_offset,
             common_attributes);
-    Handle<Proxy> proxy_data = factory->NewProxy(&Accessors::ScriptData);
+    Handle<Foreign> foreign_data =
+        factory()->NewForeign(&Accessors::ScriptData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("data"),
-            proxy_data,
+            factory()->LookupAsciiSymbol("data"),
+            foreign_data,
             common_attributes);
-    Handle<Proxy> proxy_type = factory->NewProxy(&Accessors::ScriptType);
+    Handle<Foreign> foreign_type =
+        factory()->NewForeign(&Accessors::ScriptType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("type"),
-            proxy_type,
+            factory()->LookupAsciiSymbol("type"),
+            foreign_type,
             common_attributes);
-    Handle<Proxy> proxy_compilation_type =
-        factory->NewProxy(&Accessors::ScriptCompilationType);
+    Handle<Foreign> foreign_compilation_type =
+        factory()->NewForeign(&Accessors::ScriptCompilationType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("compilation_type"),
-            proxy_compilation_type,
+            factory()->LookupAsciiSymbol("compilation_type"),
+            foreign_compilation_type,
             common_attributes);
-    Handle<Proxy> proxy_line_ends =
-        factory->NewProxy(&Accessors::ScriptLineEnds);
+    Handle<Foreign> foreign_line_ends =
+        factory()->NewForeign(&Accessors::ScriptLineEnds);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_ends"),
-            proxy_line_ends,
+            factory()->LookupAsciiSymbol("line_ends"),
+            foreign_line_ends,
             common_attributes);
-    Handle<Proxy> proxy_context_data =
-        factory->NewProxy(&Accessors::ScriptContextData);
+    Handle<Foreign> foreign_context_data =
+        factory()->NewForeign(&Accessors::ScriptContextData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("context_data"),
-            proxy_context_data,
+            factory()->LookupAsciiSymbol("context_data"),
+            foreign_context_data,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_script =
-        factory->NewProxy(&Accessors::ScriptEvalFromScript);
+    Handle<Foreign> foreign_eval_from_script =
+        factory()->NewForeign(&Accessors::ScriptEvalFromScript);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script"),
-            proxy_eval_from_script,
+            factory()->LookupAsciiSymbol("eval_from_script"),
+            foreign_eval_from_script,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_script_position =
-        factory->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
+    Handle<Foreign> foreign_eval_from_script_position =
+        factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script_position"),
-            proxy_eval_from_script_position,
+            factory()->LookupAsciiSymbol("eval_from_script_position"),
+            foreign_eval_from_script_position,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_function_name =
-        factory->NewProxy(&Accessors::ScriptEvalFromFunctionName);
+    Handle<Foreign> foreign_eval_from_function_name =
+        factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_function_name"),
-            proxy_eval_from_function_name,
+            factory()->LookupAsciiSymbol("eval_from_function_name"),
+            foreign_eval_from_function_name,
             common_attributes);
 
     Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
     script_map->set_instance_descriptors(*script_descriptors);
 
     // Allocate the empty script.
-    Handle<Script> script = factory->NewScript(factory->empty_string());
+    Handle<Script> script = factory()->NewScript(factory()->empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    heap->public_set_empty_script(*script);
+    heap()->public_set_empty_script(*script);
   }
   {
     // Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1454,10 +1522,10 @@
     Handle<JSFunction> opaque_reference_fun =
         InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
                         JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(opaque_reference_fun, prototype);
     global_context()->set_opaque_reference_function(*opaque_reference_fun);
   }
@@ -1476,23 +1544,23 @@
                         "InternalArray",
                         JS_ARRAY_TYPE,
                         JSArray::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kArrayCode,
                         true);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(array_function, prototype);
 
     array_function->shared()->set_construct_stub(
-        isolate->builtins()->builtin(Builtins::kArrayConstructCode));
+        isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->length_symbol(),
-            factory->NewProxy(&Accessors::ArrayLength),
+        factory()->CopyAppendForeignDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->length_symbol(),
+            factory()->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
     array_function->initial_map()->set_instance_descriptors(
@@ -1508,8 +1576,7 @@
   for (int i = Natives::GetDebuggerCount();
        i < Natives::GetBuiltinsCount();
        i++) {
-    Vector<const char> name = Natives::GetScriptName(i);
-    if (!CompileBuiltin(i)) return false;
+    if (!CompileBuiltin(isolate(), i)) return false;
     // TODO(ager): We really only need to install the JS builtin
     // functions on the builtins object after compiling and running
     // runtime.js.
@@ -1527,9 +1594,9 @@
       HeapObject::cast(string_function->initial_map()->prototype())->map());
 
   // Install Function.prototype.call and apply.
-  { Handle<String> key = factory->function_class_symbol();
+  { Handle<String> key = factory()->function_class_symbol();
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(GetProperty(isolate->global(), key));
+        Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
     Handle<JSObject> proto =
         Handle<JSObject>(JSObject::cast(function->instance_prototype()));
 
@@ -1573,7 +1640,7 @@
 
     // Add initial map.
     Handle<Map> initial_map =
-        factory->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+        factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
     initial_map->set_constructor(*array_constructor);
 
     // Set prototype on map.
@@ -1587,13 +1654,13 @@
     ASSERT_EQ(1, array_descriptors->number_of_descriptors());
 
     Handle<DescriptorArray> reresult_descriptors =
-        factory->NewDescriptorArray(3);
+        factory()->NewDescriptorArray(3);
 
     reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
 
     int enum_index = 0;
     {
-      FieldDescriptor index_field(heap->index_symbol(),
+      FieldDescriptor index_field(heap()->index_symbol(),
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
@@ -1601,7 +1668,7 @@
     }
 
     {
-      FieldDescriptor input_field(heap->input_symbol(),
+      FieldDescriptor input_field(heap()->input_symbol(),
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
@@ -1617,7 +1684,6 @@
     global_context()->set_regexp_result_map(*initial_map);
   }
 
-
 #ifdef DEBUG
   builtins->Verify();
 #endif
@@ -1626,10 +1692,32 @@
 }
 
 
+bool Genesis::InstallExperimentalNatives() {
+  for (int i = ExperimentalNatives::GetDebuggerCount();
+       i < ExperimentalNatives::GetBuiltinsCount();
+       i++) {
+    if (FLAG_harmony_proxies &&
+        strcmp(ExperimentalNatives::GetScriptName(i).start(),
+               "native proxy.js") == 0) {
+      if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+    }
+    if (FLAG_harmony_weakmaps &&
+        strcmp(ExperimentalNatives::GetScriptName(i).start(),
+               "native weakmap.js") == 0) {
+      if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+    }
+  }
+
+  InstallExperimentalNativeFunctions();
+
+  return true;
+}
+
+
 static Handle<JSObject> ResolveBuiltinIdHolder(
     Handle<Context> global_context,
     const char* holder_expr) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   Handle<GlobalObject> global(global_context->global());
   const char* period_pos = strchr(holder_expr, '.');
   if (period_pos == NULL) {
@@ -1648,7 +1736,8 @@
 static void InstallBuiltinFunctionId(Handle<JSObject> holder,
                                      const char* function_name,
                                      BuiltinFunctionId id) {
-  Handle<String> name = FACTORY->LookupAsciiSymbol(function_name);
+  Factory* factory = holder->GetIsolate()->factory();
+  Handle<String> name = factory->LookupAsciiSymbol(function_name);
   Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
   Handle<JSFunction> function(JSFunction::cast(function_object));
   function->shared()->set_function_data(Smi::FromInt(id));
@@ -1675,13 +1764,14 @@
   F(16, global_context()->regexp_function())
 
 
-static FixedArray* CreateCache(int size, JSFunction* factory) {
+static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
+  Factory* factory = factory_function->GetIsolate()->factory();
   // Caches are supposed to live for a long time, allocate in old space.
   int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
   // Cannot use cast as object is not fully initialized yet.
   JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
-      *FACTORY->NewFixedArrayWithHoles(array_size, TENURED));
-  cache->set(JSFunctionResultCache::kFactoryIndex, factory);
+      *factory->NewFixedArrayWithHoles(array_size, TENURED));
+  cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
   cache->MakeZeroSize();
   return cache;
 }
@@ -1698,9 +1788,9 @@
 
   int index = 0;
 
-#define F(size, func) do {                           \
-    FixedArray* cache = CreateCache((size), (func)); \
-    caches->set(index++, cache);                     \
+#define F(size, func) do {                                              \
+    FixedArray* cache = CreateCache((size), Handle<JSFunction>(func));  \
+    caches->set(index++, cache);                                        \
   } while (false)
 
   JSFUNCTION_RESULT_CACHE_LIST(F);
@@ -1720,7 +1810,7 @@
 
 bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
                                      v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = global_context->GetIsolate();
   BootstrapperActive active;
   SaveContext saved_context(isolate);
   isolate->set_context(*global_context);
@@ -1731,7 +1821,7 @@
 
 
 void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   HandleScope scope;
   Handle<JSGlobalObject> js_global(
       JSGlobalObject::cast(global_context->global()));
@@ -1867,9 +1957,10 @@
 
 bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
   HandleScope scope;
+  Factory* factory = builtins->GetIsolate()->factory();
   for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
     Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
-    Handle<String> name = FACTORY->LookupAsciiSymbol(Builtins::GetName(id));
+    Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
     Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
     Handle<JSFunction> function
         = Handle<JSFunction>(JSFunction::cast(function_object));
@@ -1918,13 +2009,12 @@
   ASSERT(object->IsInstanceOf(
       FunctionTemplateInfo::cast(object_template->constructor())));
 
-  Isolate* isolate = Isolate::Current();
   bool pending_exception = false;
   Handle<JSObject> obj =
       Execution::InstantiateObject(object_template, &pending_exception);
   if (pending_exception) {
-    ASSERT(isolate->has_pending_exception());
-    isolate->clear_pending_exception();
+    ASSERT(isolate()->has_pending_exception());
+    isolate()->clear_pending_exception();
     return false;
   }
   TransferObject(obj, object);
@@ -1972,15 +2062,16 @@
           break;
         }
         case MAP_TRANSITION:
-        case EXTERNAL_ARRAY_TRANSITION:
+        case ELEMENTS_TRANSITION:
         case CONSTANT_TRANSITION:
         case NULL_DESCRIPTOR:
           // Ignore non-properties.
           break;
         case NORMAL:
           // Do not occur since the from object has fast properties.
+        case HANDLER:
         case INTERCEPTOR:
-          // No element in instance descriptors have interceptor type.
+          // No element in instance descriptors have proxy or interceptor type.
           UNREACHABLE();
           break;
       }
@@ -2023,6 +2114,7 @@
 
 void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
   HandleScope outer;
+  Factory* factory = from->GetIsolate()->factory();
 
   ASSERT(!from->IsJSArray());
   ASSERT(!to->IsJSArray());
@@ -2032,7 +2124,7 @@
 
   // Transfer the prototype (new map is needed).
   Handle<Map> old_to_map = Handle<Map>(to->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(from->map()->prototype());
   to->set_map(*new_to_map);
 }
@@ -2053,10 +2145,10 @@
 }
 
 
-Genesis::Genesis(Handle<Object> global_object,
+Genesis::Genesis(Isolate* isolate,
+                 Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
-                 v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+                 v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
   result_ = Handle<Context>::null();
   // If V8 isn't running and cannot be initialized, just return.
   if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
@@ -2086,7 +2178,7 @@
   } else {
     // We get here if there was no context snapshot.
     CreateRoots();
-    Handle<JSFunction> empty_function = CreateEmptyFunction();
+    Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
     Handle<GlobalObject> inner_global;
     Handle<JSGlobalProxy> global_proxy =
@@ -2103,6 +2195,10 @@
     isolate->counters()->contexts_created_from_scratch()->Increment();
   }
 
+  // Initialize experimental globals and install experimental natives.
+  InitializeExperimentalGlobal();
+  if (!InstallExperimentalNatives()) return;
+
   result_ = global_context_;
 }
 
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 3e158d6..abf61b9 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -29,6 +29,8 @@
 #ifndef V8_BOOTSTRAPPER_H_
 #define V8_BOOTSTRAPPER_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -93,6 +95,7 @@
   // Creates a JavaScript Global Context with initial object graph.
   // The returned value is a global handle casted to V8Environment*.
   Handle<Context> CreateEnvironment(
+      Isolate* isolate,
       Handle<Object> global_object,
       v8::Handle<v8::ObjectTemplate> global_template,
       v8::ExtensionConfiguration* extensions);
@@ -113,7 +116,7 @@
   bool IsActive() const { return nesting_ != 0; }
 
   // Support for thread preemption.
-  RLYSTC int ArchiveSpacePerThread();
+  static int ArchiveSpacePerThread();
   char* ArchiveState(char* to);
   char* RestoreState(char* from);
   void FreeThreadResources();
@@ -165,8 +168,9 @@
 class NativesExternalStringResource
     : public v8::String::ExternalAsciiStringResource {
  public:
-  explicit NativesExternalStringResource(Bootstrapper* bootstrapper,
-                                         const char* source);
+  NativesExternalStringResource(Bootstrapper* bootstrapper,
+                                const char* source,
+                                size_t length);
 
   const char* data() const {
     return data_;
diff --git a/src/builtins.cc b/src/builtins.cc
index 1846590..e6a0699 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -982,34 +982,12 @@
 // Strict mode poison pills
 
 
-BUILTIN(StrictArgumentsCallee) {
+BUILTIN(StrictModePoisonPill) {
   HandleScope scope;
   return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
+      "strict_poison_pill", HandleVector<Object>(NULL, 0)));
 }
 
-
-BUILTIN(StrictArgumentsCaller) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_arguments_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionCaller) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_function_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionArguments) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_function_arguments", HandleVector<Object>(NULL, 0)));
-}
-
-
 // -----------------------------------------------------------------------------
 //
 
@@ -1025,6 +1003,8 @@
                                 Object** argv,
                                 FunctionTemplateInfo* info) {
   Object* recv = argv[0];
+  // API calls are only supported with JSObject receivers.
+  if (!recv->IsJSObject()) return heap->null_value();
   Object* sig_obj = info->signature();
   if (sig_obj->IsUndefined()) return recv;
   SignatureInfo* sig = SignatureInfo::cast(sig_obj);
@@ -1222,10 +1202,10 @@
   ASSERT(!CalledAsConstructor(isolate));
   Heap* heap = isolate->heap();
 
-  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> receiver = args.receiver();
 
   // Get the object called.
-  JSObject* obj = JSObject::cast(*args.receiver());
+  JSObject* obj = JSObject::cast(*receiver);
 
   // Get the invocation callback from the function descriptor that was
   // used to create the called object.
@@ -1338,8 +1318,18 @@
 }
 
 
+static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+
 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm);
+  KeyedLoadIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateMiss(masm, true);
 }
 
 
@@ -1361,6 +1351,9 @@
   KeyedLoadIC::GenerateIndexedInterceptor(masm);
 }
 
+static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateNonStrictArguments(masm);
+}
 
 static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
   StoreIC::GenerateInitialize(masm);
@@ -1428,7 +1421,17 @@
 
 
 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm);
+  KeyedStoreIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMiss(masm, true);
+}
+
+
+static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateSlow(masm);
 }
 
 
@@ -1441,6 +1444,9 @@
   KeyedStoreIC::GenerateInitialize(masm);
 }
 
+static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateNonStrictArguments(masm);
+}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
@@ -1577,7 +1583,6 @@
     functions->s_name = #aname;                                             \
     functions->name = k##aname;                                             \
     functions->flags = Code::ComputeFlags(Code::kind,                       \
-                                          NOT_IN_LOOP,                      \
                                           state,                            \
                                           extra);                           \
     functions->extra_args = NO_EXTRA_ARGUMENTS;                             \
diff --git a/src/builtins.h b/src/builtins.h
index bc0facb..31090d3 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,122 +60,129 @@
   V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS)                    \
   V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)                 \
                                                                     \
-  V(StrictArgumentsCallee, NO_EXTRA_ARGUMENTS)                      \
-  V(StrictArgumentsCaller, NO_EXTRA_ARGUMENTS)                      \
-  V(StrictFunctionCaller, NO_EXTRA_ARGUMENTS)                       \
-  V(StrictFunctionArguments, NO_EXTRA_ARGUMENTS)
-
+  V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                           \
-  V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructCall,                BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubApi,             BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSEntryTrampoline,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LazyCompile,                    BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LazyRecompile,                  BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyOSR,                      BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(LoadIC_Miss,                    BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(LoadIC_Initialize,              LOAD_IC, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_PreMonomorphic,          LOAD_IC, PREMONOMORPHIC,        \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_Normal,                  LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_ArrayLength,             LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_StringLength,            LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_StringWrapperLength,     LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_FunctionPrototype,       LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_Megamorphic,             LOAD_IC, MEGAMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(KeyedLoadIC_Initialize,         KEYED_LOAD_IC, UNINITIALIZED,   \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_PreMonomorphic,     KEYED_LOAD_IC, PREMONOMORPHIC,  \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_Generic,            KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_String,             KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(StoreIC_Initialize,             STORE_IC, UNINITIALIZED,        \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_ArrayLength,            STORE_IC, MONOMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Normal,                 STORE_IC, MONOMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Megamorphic,            STORE_IC, MEGAMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_GlobalProxy,            STORE_IC, MEGAMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Initialize_Strict,      STORE_IC, UNINITIALIZED,        \
-                                    kStrictMode)                    \
-  V(StoreIC_ArrayLength_Strict,     STORE_IC, MONOMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_Normal_Strict,          STORE_IC, MONOMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_Megamorphic_Strict,     STORE_IC, MEGAMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_GlobalProxy_Strict,     STORE_IC, MEGAMORPHIC,          \
-                                    kStrictMode)                    \
-                                                                    \
-  V(KeyedStoreIC_Initialize,        KEYED_STORE_IC, UNINITIALIZED,  \
-                                    Code::kNoExtraICState)          \
-  V(KeyedStoreIC_Generic,           KEYED_STORE_IC, MEGAMORPHIC,    \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,  \
-                                    kStrictMode)                    \
-  V(KeyedStoreIC_Generic_Strict,    KEYED_STORE_IC, MEGAMORPHIC,    \
-                                    kStrictMode)                    \
-                                                                    \
-  /* Uses KeyedLoadIC_Initialize; must be after in list. */         \
-  V(FunctionCall,                   BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(FunctionApply,                  BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(ArrayCode,                      BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(StringConstructCode,            BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(OnStackReplacement,             BUILTIN, UNINITIALIZED,         \
+#define BUILTIN_LIST_A(V)                                               \
+  V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructCall,                BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubApi,             BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSEntryTrampoline,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LazyCompile,                    BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LazyRecompile,                  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyOSR,                      BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(LoadIC_Miss,                    BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_MissForceGeneric,   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Slow,               BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_MissForceGeneric,  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Slow,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Initialize,              LOAD_IC, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_PreMonomorphic,          LOAD_IC, PREMONOMORPHIC,            \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Normal,                  LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_ArrayLength,             LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_StringLength,            LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_StringWrapperLength,     LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_FunctionPrototype,       LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Megamorphic,             LOAD_IC, MEGAMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(KeyedLoadIC_Initialize,         KEYED_LOAD_IC, UNINITIALIZED,       \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_PreMonomorphic,     KEYED_LOAD_IC, PREMONOMORPHIC,      \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Generic,            KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_String,             KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(StoreIC_Initialize,             STORE_IC, UNINITIALIZED,            \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_ArrayLength,            STORE_IC, MONOMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Normal,                 STORE_IC, MONOMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Megamorphic,            STORE_IC, MEGAMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_GlobalProxy,            STORE_IC, MEGAMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Initialize_Strict,      STORE_IC, UNINITIALIZED,            \
+                                    kStrictMode)                        \
+  V(StoreIC_ArrayLength_Strict,     STORE_IC, MONOMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_Normal_Strict,          STORE_IC, MONOMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_Megamorphic_Strict,     STORE_IC, MEGAMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_GlobalProxy_Strict,     STORE_IC, MEGAMORPHIC,              \
+                                    kStrictMode)                        \
+                                                                        \
+  V(KeyedStoreIC_Initialize,        KEYED_STORE_IC, UNINITIALIZED,      \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Generic,           KEYED_STORE_IC, MEGAMORPHIC,        \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,      \
+                                    kStrictMode)                        \
+  V(KeyedStoreIC_Generic_Strict,    KEYED_STORE_IC, MEGAMORPHIC,        \
+                                    kStrictMode)                        \
+  V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC,       \
+                                     Code::kNoExtraICState)             \
+                                                                        \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
+  V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(FunctionApply,                  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(StringConstructCode,            BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(OnStackReplacement,             BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)
 
 
@@ -231,6 +238,8 @@
   V(FILTER_KEY, 1)                       \
   V(CALL_NON_FUNCTION, 0)                \
   V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
+  V(CALL_FUNCTION_PROXY, 1)                \
+  V(CALL_FUNCTION_PROXY_AS_CONSTRUCTOR, 1) \
   V(TO_OBJECT, 0)                        \
   V(TO_NUMBER, 0)                        \
   V(TO_STRING, 0)                        \
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index 43dbc78..30a67a6 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,10 +26,12 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdarg.h>
+#include <math.h>
 #include <limits.h>
 
-#include "v8.h"
-
+#include "../include/v8stdint.h"
+#include "globals.h"
+#include "checks.h"
 #include "cached-powers.h"
 
 namespace v8 {
@@ -147,7 +149,9 @@
     DiyFp* power,
     int* decimal_exponent) {
   int kQ = DiyFp::kSignificandSize;
-  double k = ceiling((min_exponent + kQ - 1) * kD_1_LOG2_10);
+  // Some platforms return incorrect sign on 0 result. We can ignore that here,
+  // which means we can avoid depending on platform.h.
+  double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
   int foo = kCachedPowersOffset;
   int index =
       (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
diff --git a/src/cached-powers.h b/src/cached-powers.h
index 2ae5619..88df222 100644
--- a/src/cached-powers.h
+++ b/src/cached-powers.h
@@ -35,7 +35,6 @@
 
 class PowersOfTenCache {
  public:
-
   // Not all powers of ten are cached. The decimal exponent of two neighboring
   // cached numbers will differ by kDecimalExponentDistance.
   static const int kDecimalExponentDistance;
diff --git a/src/char-predicates.h b/src/char-predicates.h
index dac1eb8..5a901a2 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_CHAR_PREDICATES_H_
 #define V8_CHAR_PREDICATES_H_
 
+#include "unicode.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/checks.h b/src/checks.h
index a560b2f..2f359f6 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -251,9 +251,9 @@
 // actually causes each use to introduce a new defined type with a
 // name depending on the source line.
 template <int> class StaticAssertionHelper { };
-#define STATIC_CHECK(test)                                                  \
-  typedef                                                                   \
-    StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>(test)>)> \
+#define STATIC_CHECK(test)                                                    \
+  typedef                                                                     \
+    StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
     SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
 
 
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
index 349f222..373bf60 100644
--- a/src/circular-queue-inl.h
+++ b/src/circular-queue-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,8 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_CIRCULAR_BUFFER_INL_H_
-#define V8_CIRCULAR_BUFFER_INL_H_
+#ifndef V8_CIRCULAR_QUEUE_INL_H_
+#define V8_CIRCULAR_QUEUE_INL_H_
 
 #include "circular-queue.h"
 
@@ -50,4 +50,4 @@
 
 } }  // namespace v8::internal
 
-#endif  // V8_CIRCULAR_BUFFER_INL_H_
+#endif  // V8_CIRCULAR_QUEUE_INL_H_
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index f680c60..724445e 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "stub-cache.h"
 #include "factory.h"
 #include "gdb-jit.h"
 #include "macro-assembler.h"
@@ -39,7 +40,7 @@
 bool CodeStub::FindCodeInCache(Code** code_out) {
   Heap* heap = Isolate::Current()->heap();
   int index = heap->code_stubs()->FindEntry(GetKey());
-  if (index != NumberDictionary::kNotFound) {
+  if (index != UnseededNumberDictionary::kNotFound) {
     *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
     return true;
   }
@@ -60,21 +61,29 @@
 }
 
 
+SmartArrayPointer<const char> CodeStub::GetName() {
+  char buffer[100];
+  NoAllocationStringAllocator allocator(buffer,
+                                        static_cast<unsigned>(sizeof(buffer)));
+  StringStream stream(&allocator);
+  PrintName(&stream);
+  return stream.ToCString();
+}
+
+
 void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
   code->set_major_key(MajorKey());
 
   Isolate* isolate = masm->isolate();
-  PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
-  GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
+  SmartArrayPointer<const char> name = GetName();
+  PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
   Counters* counters = isolate->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
 
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_print_code_stubs) {
-#ifdef DEBUG
-    Print();
-#endif
-    code->Disassemble(GetName());
+    code->Disassemble(*name);
     PrintF("\n");
   }
 #endif
@@ -105,7 +114,6 @@
     // Copy the generated code into a heap object.
     Code::Flags flags = Code::ComputeFlags(
         static_cast<Code::Kind>(GetCodeKind()),
-        InLoop(),
         GetICState());
     Handle<Code> new_object = factory->NewCode(
         desc, flags, masm.CodeObject(), NeedsImmovableCode());
@@ -113,9 +121,9 @@
     FinishCode(*new_object);
 
     // Update the dictionary and the root in Heap.
-    Handle<NumberDictionary> dict =
+    Handle<UnseededNumberDictionary> dict =
         factory->DictionaryAtNumberPut(
-            Handle<NumberDictionary>(heap->code_stubs()),
+            Handle<UnseededNumberDictionary>(heap->code_stubs()),
             GetKey(),
             new_object);
     heap->public_set_code_stubs(*dict);
@@ -143,7 +151,6 @@
     // Try to copy the generated code into a heap object.
     Code::Flags flags = Code::ComputeFlags(
         static_cast<Code::Kind>(GetCodeKind()),
-        InLoop(),
         GetICState());
     Object* new_object;
     { MaybeObject* maybe_new_object =
@@ -158,7 +165,7 @@
     MaybeObject* maybe_new_object =
         heap->code_stubs()->AtNumberPut(GetKey(), code);
     if (maybe_new_object->ToObject(&new_object)) {
-      heap->public_set_code_stubs(NumberDictionary::cast(new_object));
+      heap->public_set_code_stubs(UnseededNumberDictionary::cast(new_object));
     }
   }
 
@@ -169,7 +176,7 @@
 const char* CodeStub::MajorName(CodeStub::Major major_key,
                                 bool allow_unknown_keys) {
   switch (major_key) {
-#define DEF_CASE(name) case name: return #name;
+#define DEF_CASE(name) case name: return #name "Stub";
     CODE_STUB_LIST(DEF_CASE)
 #undef DEF_CASE
     default:
@@ -197,6 +204,12 @@
     case CompareIC::HEAP_NUMBERS:
       GenerateHeapNumbers(masm);
       break;
+    case CompareIC::STRINGS:
+      GenerateStrings(masm);
+      break;
+    case CompareIC::SYMBOLS:
+      GenerateSymbols(masm);
+      break;
     case CompareIC::OBJECTS:
       GenerateObjects(masm);
       break;
@@ -206,13 +219,7 @@
 }
 
 
-const char* InstanceofStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-
+void InstanceofStub::PrintName(StringStream* stream) {
   const char* args = "";
   if (HasArgsInRegisters()) {
     args = "_REGS";
@@ -228,12 +235,170 @@
     return_true_false_object = "_TRUEFALSE";
   }
 
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "InstanceofStub%s%s%s",
-               args,
-               inline_check,
-               return_true_false_object);
-  return name_;
+  stream->Add("InstanceofStub%s%s%s",
+              args,
+              inline_check,
+              return_true_false_object);
+}
+
+
+void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
+  switch (elements_kind_) {
+    case FAST_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+      break;
+    case DICTIONARY_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
+  switch (elements_kind_) {
+    case FAST_ELEMENTS:
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+      KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
+                                                             is_js_array_);
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+      break;
+    case DICTIONARY_ELEMENTS:
+      KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void ArgumentsAccessStub::PrintName(StringStream* stream) {
+  const char* type_name = NULL;  // Make g++ happy.
+  switch (type_) {
+    case READ_ELEMENT: type_name = "ReadElement"; break;
+    case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
+    case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
+    case NEW_STRICT: type_name = "NewStrict"; break;
+  }
+  stream->Add("ArgumentsAccessStub_%s", type_name);
+}
+
+
+void CallFunctionStub::PrintName(StringStream* stream) {
+  const char* flags_name = NULL;  // Make g++ happy.
+  switch (flags_) {
+    case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
+    case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
+  }
+  stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
+}
+
+
+void ToBooleanStub::PrintName(StringStream* stream) {
+  stream->Add("ToBooleanStub_");
+  types_.Print(stream);
+}
+
+
+void ToBooleanStub::Types::Print(StringStream* stream) const {
+  if (IsEmpty()) stream->Add("None");
+  if (Contains(UNDEFINED)) stream->Add("Undefined");
+  if (Contains(BOOLEAN)) stream->Add("Bool");
+  if (Contains(NULL_TYPE)) stream->Add("Null");
+  if (Contains(SMI)) stream->Add("Smi");
+  if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
+  if (Contains(STRING)) stream->Add("String");
+  if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
+}
+
+
+void ToBooleanStub::Types::TraceTransition(Types to) const {
+  if (!FLAG_trace_ic) return;
+  char buffer[100];
+  NoAllocationStringAllocator allocator(buffer,
+                                        static_cast<unsigned>(sizeof(buffer)));
+  StringStream stream(&allocator);
+  stream.Add("[ToBooleanIC (");
+  Print(&stream);
+  stream.Add("->");
+  to.Print(&stream);
+  stream.Add(")]\n");
+  stream.OutputToStdOut();
+}
+
+
+bool ToBooleanStub::Types::Record(Handle<Object> object) {
+  if (object->IsUndefined()) {
+    Add(UNDEFINED);
+    return false;
+  } else if (object->IsBoolean()) {
+    Add(BOOLEAN);
+    return object->IsTrue();
+  } else if (object->IsNull()) {
+    Add(NULL_TYPE);
+    return false;
+  } else if (object->IsSmi()) {
+    Add(SMI);
+    return Smi::cast(*object)->value() != 0;
+  } else if (object->IsSpecObject()) {
+    Add(SPEC_OBJECT);
+    return !object->IsUndetectableObject();
+  } else if (object->IsString()) {
+    Add(STRING);
+    return !object->IsUndetectableObject() &&
+        String::cast(*object)->length() != 0;
+  } else if (object->IsHeapNumber()) {
+    ASSERT(!object->IsUndetectableObject());
+    Add(HEAP_NUMBER);
+    double value = HeapNumber::cast(*object)->value();
+    return value != 0 && !isnan(value);
+  } else {
+    // We should never see an internal object at runtime here!
+    UNREACHABLE();
+    return true;
+  }
+}
+
+
+bool ToBooleanStub::Types::NeedsMap() const {
+  return Contains(ToBooleanStub::SPEC_OBJECT)
+      || Contains(ToBooleanStub::STRING)
+      || Contains(ToBooleanStub::HEAP_NUMBER);
+}
+
+
+bool ToBooleanStub::Types::CanBeUndetectable() const {
+  return Contains(ToBooleanStub::SPEC_OBJECT)
+      || Contains(ToBooleanStub::STRING);
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 56ef072..64c89b9 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -28,32 +28,38 @@
 #ifndef V8_CODE_STUBS_H_
 #define V8_CODE_STUBS_H_
 
+#include "allocation.h"
 #include "globals.h"
 
 namespace v8 {
 namespace internal {
 
-// List of code stubs used on all platforms. The order in this list is important
-// as only the stubs up to and including Instanceof allows nested stub calls.
+// List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
-  V(TypeRecordingBinaryOp)               \
+  V(UnaryOp)                             \
+  V(BinaryOp)                            \
   V(StringAdd)                           \
   V(SubString)                           \
   V(StringCompare)                       \
-  V(SmiOp)                               \
   V(Compare)                             \
   V(CompareIC)                           \
   V(MathPow)                             \
   V(TranscendentalCache)                 \
   V(Instanceof)                          \
+  /* All stubs above this line only exist in a few versions, which are  */  \
+  /* generated ahead of time.  Therefore compiling a call to one of     */  \
+  /* them can't cause a new stub to be compiled, so compiling a call to */  \
+  /* them is GC safe.  The ones below this line exist in many variants  */  \
+  /* so code compiling a call to one can cause a GC.  This means they   */  \
+  /* can't be called from other stubs, since stub generation code is    */  \
+  /* not GC safe.                                                       */  \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
   V(StackCheck)                          \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
   V(FastCloneShallowArray)               \
-  V(GenericUnaryOp)                      \
   V(RevertToNumber)                      \
   V(ToBoolean)                           \
   V(ToNumber)                            \
@@ -64,7 +70,10 @@
   V(NumberToString)                      \
   V(CEntry)                              \
   V(JSEntry)                             \
-  V(DebuggerStatement)
+  V(KeyedLoadElement)                    \
+  V(KeyedStoreElement)                   \
+  V(DebuggerStatement)                   \
+  V(StringDictionaryNegativeLookup)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -81,7 +90,8 @@
 // List of code stubs only used on MIPS platforms.
 #ifdef V8_TARGET_ARCH_MIPS
 #define CODE_STUB_LIST_MIPS(V)  \
-  V(RegExpCEntry)
+  V(RegExpCEntry)               \
+  V(DirectCEntry)
 #else
 #define CODE_STUB_LIST_MIPS(V)
 #endif
@@ -158,29 +168,24 @@
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
 
-  // The CallFunctionStub needs to override this so it can encode whether a
-  // lazily generated function should be fully optimized or not.
-  virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
-
-  // TypeRecordingBinaryOpStub needs to override this.
+  // BinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // TypeRecordingBinaryOpStub needs to override this.
+  // BinaryOpStub needs to override this.
   virtual InlineCacheState GetICState() {
     return UNINITIALIZED;
   }
 
   // Returns a name for logging/debugging purposes.
-  virtual const char* GetName() { return MajorName(MajorKey(), false); }
+  SmartArrayPointer<const char> GetName();
+  virtual void PrintName(StringStream* stream) {
+    stream->Add("%s", MajorName(MajorKey(), false));
+  }
 
   // Returns whether the code generated for this stub needs to be allocated as
   // a fixed (non-moveable) code object.
   virtual bool NeedsImmovableCode() { return false; }
 
-  #ifdef DEBUG
-  virtual void Print() { PrintF("%s\n", GetName()); }
-#endif
-
   // Computes the key based on major and minor.
   uint32_t GetKey() {
     ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -188,6 +193,7 @@
            MajorKeyBits::encode(MajorKey());
   }
 
+  // See comment above, where Instanceof is defined.
   bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
 
   class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
@@ -261,9 +267,6 @@
   void Generate(MacroAssembler* masm);
 
  private:
-
-  const char* GetName() { return "StackCheckStub"; }
-
   Major MajorKey() { return StackCheck; }
   int MinorKey() { return 0; }
 };
@@ -278,7 +281,6 @@
  private:
   Major MajorKey() { return ToNumber; }
   int MinorKey() { return 0; }
-  const char* GetName() { return "ToNumberStub"; }
 };
 
 
@@ -290,7 +292,6 @@
   void Generate(MacroAssembler* masm);
 
  private:
-  const char* GetName() { return "FastNewClosureStub"; }
   Major MajorKey() { return FastNewClosure; }
   int MinorKey() { return strict_mode_; }
 
@@ -311,7 +312,6 @@
  private:
   int slots_;
 
-  const char* GetName() { return "FastNewContextStub"; }
   Major MajorKey() { return FastNewContext; }
   int MinorKey() { return slots_; }
 };
@@ -340,7 +340,6 @@
   Mode mode_;
   int length_;
 
-  const char* GetName() { return "FastCloneShallowArrayStub"; }
   Major MajorKey() { return FastCloneShallowArray; }
   int MinorKey() {
     ASSERT(mode_ == 0 || mode_ == 1);
@@ -358,7 +357,7 @@
     kReturnTrueFalseObject = 1 << 2
   };
 
-  explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { }
+  explicit InstanceofStub(Flags flags) : flags_(flags) { }
 
   static Register left();
   static Register right();
@@ -381,58 +380,9 @@
     return (flags_ & kReturnTrueFalseObject) != 0;
   }
 
-  const char* GetName();
+  virtual void PrintName(StringStream* stream);
 
   Flags flags_;
-  char* name_;
-};
-
-
-enum NegativeZeroHandling {
-  kStrictNegativeZero,
-  kIgnoreNegativeZero
-};
-
-
-enum UnaryOpFlags {
-  NO_UNARY_FLAGS = 0,
-  NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
-  GenericUnaryOpStub(Token::Value op,
-                     UnaryOverwriteMode overwrite,
-                     UnaryOpFlags flags,
-                     NegativeZeroHandling negative_zero = kStrictNegativeZero)
-      : op_(op),
-        overwrite_(overwrite),
-        include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
-        negative_zero_(negative_zero) { }
-
- private:
-  Token::Value op_;
-  UnaryOverwriteMode overwrite_;
-  bool include_smi_code_;
-  NegativeZeroHandling negative_zero_;
-
-  class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
-  class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
-  class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
-  class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
-
-  Major MajorKey() { return GenericUnaryOp; }
-  int MinorKey() {
-    return OpField::encode(op_) |
-        OverwriteField::encode(overwrite_) |
-        IncludeSmiCodeField::encode(include_smi_code_) |
-        NegativeZeroField::encode(negative_zero_);
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName();
 };
 
 
@@ -444,8 +394,6 @@
  private:
   virtual CodeStub::Major MajorKey() { return MathPow; }
   virtual int MinorKey() { return 0; }
-
-  const char* GetName() { return "MathPowStub"; }
 };
 
 
@@ -471,6 +419,8 @@
 
   void GenerateSmis(MacroAssembler* masm);
   void GenerateHeapNumbers(MacroAssembler* masm);
+  void GenerateSymbols(MacroAssembler* masm);
+  void GenerateStrings(MacroAssembler* masm);
   void GenerateObjects(MacroAssembler* masm);
   void GenerateMiss(MacroAssembler* masm);
 
@@ -510,8 +460,7 @@
       include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
       include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
       lhs_(lhs),
-      rhs_(rhs),
-      name_(NULL) { }
+      rhs_(rhs) { }
 
   CompareStub(Condition cc,
               bool strict,
@@ -522,8 +471,7 @@
       include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
       include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
       lhs_(no_reg),
-      rhs_(no_reg),
-      name_(NULL) { }
+      rhs_(no_reg) { }
 
   void Generate(MacroAssembler* masm);
 
@@ -577,26 +525,7 @@
 
   // Unfortunately you have to run without snapshots to see most of these
   // names in the profile since most compare stubs end up in the snapshot.
-  char* name_;
-  const char* GetName();
-#ifdef DEBUG
-  void Print() {
-    PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
-           "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
-           MinorKey(),
-           static_cast<int>(cc_),
-           strict_ ? "true" : "false",
-           never_nan_nan_ ? "true" : "false",
-           include_smi_compare_ ? "inluded" : "not included",
-           include_number_compare_ ? "included" : "not included");
-
-    if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
-      PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
-    } else {
-      PrintF("\n");
-    }
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 };
 
 
@@ -627,8 +556,6 @@
   int MinorKey();
 
   bool NeedsImmovableCode();
-
-  const char* GetName() { return "CEntryStub"; }
 };
 
 
@@ -644,8 +571,6 @@
  private:
   Major MajorKey() { return JSEntry; }
   int MinorKey() { return 0; }
-
-  const char* GetName() { return "JSEntryStub"; }
 };
 
 
@@ -658,7 +583,9 @@
  private:
   int MinorKey() { return 1; }
 
-  const char* GetName() { return "JSConstructEntryStub"; }
+  virtual void PrintName(StringStream* stream) {
+    stream->Add("JSConstructEntryStub");
+  }
 };
 
 
@@ -666,7 +593,8 @@
  public:
   enum Type {
     READ_ELEMENT,
-    NEW_NON_STRICT,
+    NEW_NON_STRICT_FAST,
+    NEW_NON_STRICT_SLOW,
     NEW_STRICT
   };
 
@@ -680,28 +608,11 @@
 
   void Generate(MacroAssembler* masm);
   void GenerateReadElement(MacroAssembler* masm);
-  void GenerateNewObject(MacroAssembler* masm);
+  void GenerateNewStrict(MacroAssembler* masm);
+  void GenerateNewNonStrictFast(MacroAssembler* masm);
+  void GenerateNewNonStrictSlow(MacroAssembler* masm);
 
-  int GetArgumentsBoilerplateIndex() const {
-  return (type_ == NEW_STRICT)
-      ? Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX
-      : Context::ARGUMENTS_BOILERPLATE_INDEX;
-  }
-
-  int GetArgumentsObjectSize() const {
-    if (type_ == NEW_STRICT)
-      return Heap::kArgumentsObjectSizeStrict;
-    else
-      return Heap::kArgumentsObjectSize;
-  }
-
-  const char* GetName() { return "ArgumentsAccessStub"; }
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("ArgumentsAccessStub (type %d)\n", type_);
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 };
 
 
@@ -714,14 +625,6 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "RegExpExecStub"; }
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("RegExpExecStub\n");
-  }
-#endif
 };
 
 
@@ -734,21 +637,13 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "RegExpConstructResultStub"; }
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("RegExpConstructResultStub\n");
-  }
-#endif
 };
 
 
 class CallFunctionStub: public CodeStub {
  public:
-  CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
-      : argc_(argc), in_loop_(in_loop), flags_(flags) { }
+  CallFunctionStub(int argc, CallFunctionFlags flags)
+      : argc_(argc), flags_(flags) { }
 
   void Generate(MacroAssembler* masm);
 
@@ -758,34 +653,22 @@
 
  private:
   int argc_;
-  InLoopFlag in_loop_;
   CallFunctionFlags flags_;
 
-#ifdef DEBUG
-  void Print() {
-    PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
-           argc_,
-           static_cast<int>(in_loop_),
-           static_cast<int>(flags_));
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
-  class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
-  class ArgcBits: public BitField<int, 2, 32 - 2> {};
+  class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
+  class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() {
     // Encode the parameters in a unique 32 bit value.
-    return InLoopBits::encode(in_loop_)
-           | FlagBits::encode(flags_)
-           | ArgcBits::encode(argc_);
+    return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
   }
 
-  InLoopFlag InLoop() { return in_loop_; }
-  bool ReceiverMightBeValue() {
-    return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+  bool ReceiverMightBeImplicit() {
+    return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
 };
 
@@ -964,6 +847,111 @@
   DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
 };
 
+
+class KeyedLoadElementStub : public CodeStub {
+ public:
+  explicit KeyedLoadElementStub(ElementsKind elements_kind)
+      : elements_kind_(elements_kind)
+  { }
+
+  Major MajorKey() { return KeyedLoadElement; }
+  int MinorKey() { return elements_kind_; }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  ElementsKind elements_kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+};
+
+
+class KeyedStoreElementStub : public CodeStub {
+ public:
+  KeyedStoreElementStub(bool is_js_array,
+                        ElementsKind elements_kind)
+    : is_js_array_(is_js_array),
+    elements_kind_(elements_kind) { }
+
+  Major MajorKey() { return KeyedStoreElement; }
+  int MinorKey() {
+    return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_;
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  bool is_js_array_;
+  ElementsKind elements_kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+  enum Type {
+    UNDEFINED,
+    BOOLEAN,
+    NULL_TYPE,
+    SMI,
+    SPEC_OBJECT,
+    STRING,
+    HEAP_NUMBER,
+    NUMBER_OF_TYPES
+  };
+
+  // At most 8 different types can be distinguished, because the Code object
+  // only has room for a single byte to hold a set of these types. :-P
+  STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
+
+  class Types {
+   public:
+    Types() {}
+    explicit Types(byte bits) : set_(bits) {}
+
+    bool IsEmpty() const { return set_.IsEmpty(); }
+    bool Contains(Type type) const { return set_.Contains(type); }
+    void Add(Type type) { set_.Add(type); }
+    byte ToByte() const { return set_.ToIntegral(); }
+    void Print(StringStream* stream) const;
+    void TraceTransition(Types to) const;
+    bool Record(Handle<Object> object);
+    bool NeedsMap() const;
+    bool CanBeUndetectable() const;
+
+   private:
+    EnumSet<Type, byte> set_;
+  };
+
+  static Types no_types() { return Types(); }
+  static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+
+  explicit ToBooleanStub(Register tos, Types types = Types())
+      : tos_(tos), types_(types) { }
+
+  void Generate(MacroAssembler* masm);
+  virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
+  virtual void PrintName(StringStream* stream);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
+
+  virtual void FinishCode(Code* code) {
+    code->set_to_boolean_state(types_.ToByte());
+  }
+
+  void CheckOddball(MacroAssembler* masm,
+                    Type type,
+                    Heap::RootListIndex value,
+                    bool result);
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  Register tos_;
+  Types types_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/code.h b/src/code.h
index 072344b..766c932 100644
--- a/src/code.h
+++ b/src/code.h
@@ -28,6 +28,8 @@
 #ifndef V8_CODE_H_
 #define V8_CODE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 4bbe6ae..cdc9ba1 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -34,7 +34,6 @@
 #include "prettyprinter.h"
 #include "rewriter.h"
 #include "runtime.h"
-#include "scopeinfo.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -170,23 +169,21 @@
 #endif  // ENABLE_DISASSEMBLER
 }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Vector<const char> kRegexp = CStrVector("regexp");
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
-  if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
+  Isolate* isolate = Isolate::Current();
+  if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
+    return false;
+  }
   Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
   if (FLAG_log_regexp) {
-    if (name->IsEqualTo(kRegexp))
+    if (name->IsEqualTo(CStrVector("regexp")))
       return true;
   }
   return false;
 }
 
-#endif
-
 
 bool CodeGenerator::RecordPositions(MacroAssembler* masm,
                                     int pos,
@@ -202,37 +199,19 @@
 }
 
 
-const char* GenericUnaryOpStub::GetName() {
-  switch (op_) {
-    case Token::SUB:
-      if (negative_zero_ == kStrictNegativeZero) {
-        return overwrite_ == UNARY_OVERWRITE
-            ? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
-            : "GenericUnaryOpStub_SUB_Alloc_Strict0";
-      } else {
-        return overwrite_ == UNARY_OVERWRITE
-            ? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
-            : "GenericUnaryOpStub_SUB_Alloc_Ignore0";
-      }
-    case Token::BIT_NOT:
-      return overwrite_ == UNARY_OVERWRITE
-          ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
-          : "GenericUnaryOpStub_BIT_NOT_Alloc";
-    default:
-      UNREACHABLE();
-      return "<unknown>";
-  }
-}
-
-
 void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
   switch (type_) {
     case READ_ELEMENT:
       GenerateReadElement(masm);
       break;
-    case NEW_NON_STRICT:
+    case NEW_NON_STRICT_FAST:
+      GenerateNewNonStrictFast(masm);
+      break;
+    case NEW_NON_STRICT_SLOW:
+      GenerateNewNonStrictSlow(masm);
+      break;
     case NEW_STRICT:
-      GenerateNewObject(masm);
+      GenerateNewStrict(masm);
       break;
   }
 }
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 5bd8bf3..28e833a 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -52,8 +52,7 @@
       eval_global_(isolate, kEvalGlobalGenerations),
       eval_contextual_(isolate, kEvalContextualGenerations),
       reg_exp_(isolate, kRegExpGenerations),
-      enabled_(true),
-      eager_optimizing_set_(NULL) {
+      enabled_(true) {
   CompilationSubCache* subcaches[kSubCacheCount] =
     {&script_, &eval_global_, &eval_contextual_, &reg_exp_};
   for (int i = 0; i < kSubCacheCount; ++i) {
@@ -62,10 +61,7 @@
 }
 
 
-CompilationCache::~CompilationCache() {
-  delete eager_optimizing_set_;
-  eager_optimizing_set_ = NULL;
-}
+CompilationCache::~CompilationCache() {}
 
 
 static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
@@ -457,47 +453,6 @@
 }
 
 
-static bool SourceHashCompare(void* key1, void* key2) {
-  return key1 == key2;
-}
-
-
-HashMap* CompilationCache::EagerOptimizingSet() {
-  if (eager_optimizing_set_ == NULL) {
-    eager_optimizing_set_ = new HashMap(&SourceHashCompare);
-  }
-  return eager_optimizing_set_;
-}
-
-
-bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
-  if (FLAG_opt_eagerly) return true;
-  uint32_t hash = function->SourceHash();
-  void* key = reinterpret_cast<void*>(hash);
-  return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
-}
-
-
-void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
-  uint32_t hash = function->SourceHash();
-  void* key = reinterpret_cast<void*>(hash);
-  EagerOptimizingSet()->Lookup(key, hash, true);
-}
-
-
-void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
-  uint32_t hash = function->SourceHash();
-  void* key = reinterpret_cast<void*>(hash);
-  EagerOptimizingSet()->Remove(key, hash);
-}
-
-
-void CompilationCache::ResetEagerOptimizingData() {
-  HashMap* set = EagerOptimizingSet();
-  if (set->occupancy() > 0) set->Clear();
-}
-
-
 void CompilationCache::Clear() {
   for (int i = 0; i < kSubCacheCount; i++) {
     subcaches_[i]->Clear();
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 887d4e8..4339d22 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -223,14 +223,6 @@
                  JSRegExp::Flags flags,
                  Handle<FixedArray> data);
 
-  // Support for eager optimization tracking.
-  bool ShouldOptimizeEagerly(Handle<JSFunction> function);
-  void MarkForEagerOptimizing(Handle<JSFunction> function);
-  void MarkForLazyOptimizing(Handle<JSFunction> function);
-
-  // Reset the eager optimization tracking data.
-  void ResetEagerOptimizingData();
-
   // Clear the cache - also used to initialize the cache at startup.
   void Clear();
 
@@ -250,6 +242,7 @@
   // cache during debugging to make sure new scripts are always compiled.
   void Enable();
   void Disable();
+
  private:
   explicit CompilationCache(Isolate* isolate);
   ~CompilationCache();
@@ -274,8 +267,6 @@
   // Current enable state of the compilation cache.
   bool enabled_;
 
-  HashMap* eager_optimizing_set_;
-
   friend class Isolate;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationCache);
diff --git a/src/compiler.cc b/src/compiler.cc
old mode 100755
new mode 100644
index 6a9bc27..5e1c4a9
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,7 +32,6 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "compilation-cache.h"
-#include "data-flow.h"
 #include "debug.h"
 #include "full-codegen.h"
 #include "gdb-jit.h"
@@ -42,6 +41,7 @@
 #include "parser.h"
 #include "rewriter.h"
 #include "runtime-profiler.h"
+#include "scanner-character-streams.h"
 #include "scopeinfo.h"
 #include "scopes.h"
 #include "vm-state-inl.h"
@@ -95,22 +95,21 @@
 }
 
 
+// Disable optimization for the rest of the compilation pipeline.
 void CompilationInfo::DisableOptimization() {
-  if (FLAG_optimize_closures) {
-    // If we allow closures optimizations and it's an optimizable closure
-    // mark it correspondingly.
-    bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
-    if (is_closure) {
-      bool is_optimizable_closure =
-          !scope_->outer_scope_calls_eval() && !scope_->inside_with();
-      if (is_optimizable_closure) {
-        SetMode(BASE);
-        return;
-      }
-    }
-  }
+  bool is_optimizable_closure =
+    FLAG_optimize_closures &&
+    closure_.is_null() &&
+    !scope_->HasTrivialOuterContext() &&
+    !scope_->outer_scope_calls_non_strict_eval() &&
+    !scope_->inside_with();
+  SetMode(is_optimizable_closure ? BASE : NONOPT);
+}
 
-  SetMode(NONOPT);
+
+void CompilationInfo::AbortOptimization() {
+  Handle<Code> code(shared_info()->code());
+  SetCode(code);
 }
 
 
@@ -122,20 +121,23 @@
 // all. However crankshaft support recompilation of functions, so in this case
 // the full compiler need not be be used if a debugger is attached, but only if
 // break points has actually been set.
-static bool AlwaysFullCompiler() {
+static bool is_debugging_active() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Isolate* isolate = Isolate::Current();
-  if (V8::UseCrankshaft()) {
-    return FLAG_always_full_compiler || isolate->debug()->has_break_points();
-  } else {
-    return FLAG_always_full_compiler || isolate->debugger()->IsDebuggerActive();
-  }
+  return V8::UseCrankshaft() ?
+    isolate->debug()->has_break_points() :
+    isolate->debugger()->IsDebuggerActive();
 #else
-  return FLAG_always_full_compiler;
+  return false;
 #endif
 }
 
 
+static bool AlwaysFullCompiler() {
+  return FLAG_always_full_compiler || is_debugging_active();
+}
+
+
 static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
   int opt_count = function->shared()->opt_count();
   function->shared()->set_opt_count(opt_count + 1);
@@ -162,31 +164,6 @@
 }
 
 
-static void AbortAndDisable(CompilationInfo* info) {
-  // Disable optimization for the shared function info and mark the
-  // code as non-optimizable. The marker on the shared function info
-  // is there because we flush non-optimized code thereby loosing the
-  // non-optimizable information for the code. When the code is
-  // regenerated and set on the shared function info it is marked as
-  // non-optimizable if optimization is disabled for the shared
-  // function info.
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  shared->set_optimization_disabled(true);
-  Handle<Code> code = Handle<Code>(shared->code());
-  ASSERT(code->kind() == Code::FUNCTION);
-  code->set_optimizable(false);
-  info->SetCode(code);
-  Isolate* isolate = code->GetIsolate();
-  isolate->compilation_cache()->MarkForLazyOptimizing(info->closure());
-  if (FLAG_trace_opt) {
-    PrintF("[disabled optimization for: ");
-    info->closure()->PrintName();
-    PrintF(" / %" V8PRIxPTR "]\n",
-           reinterpret_cast<intptr_t>(*info->closure()));
-  }
-}
-
-
 static bool MakeCrankshaftCode(CompilationInfo* info) {
   // Test if we can optimize this function when asked to. We can only
   // do this after the scopes are computed.
@@ -220,7 +197,9 @@
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
   if (info->shared_info()->opt_count() > kMaxOptCount) {
-    AbortAndDisable(info);
+    info->AbortOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -239,7 +218,9 @@
   if ((scope->num_parameters() + 1) > parameter_limit ||
       (info->osr_ast_id() != AstNode::kNoNumber &&
        scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
-    AbortAndDisable(info);
+    info->AbortOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -312,28 +293,32 @@
     }
   }
 
-  // Compilation with the Hydrogen compiler failed. Keep using the
-  // shared code but mark it as unoptimizable.
-  AbortAndDisable(info);
+  // Keep using the shared code.
+  info->AbortOptimization();
+  if (!builder.inline_bailout()) {
+    // Mark the shared code as unoptimizable unless it was an inlined
+    // function that bailed out.
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
+  }
   // True indicates the compilation pipeline is still going, not necessarily
   // that we optimized the code.
   return true;
 }
 
 
+static bool GenerateCode(CompilationInfo* info) {
+  return V8::UseCrankshaft() ?
+    MakeCrankshaftCode(info) :
+    FullCodeGenerator::MakeCode(info);
+}
+
+
 static bool MakeCode(CompilationInfo* info) {
   // Precondition: code has been parsed.  Postcondition: the code field in
   // the compilation info is set if compilation succeeded.
   ASSERT(info->function() != NULL);
-
-  if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
-    if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
-    // If crankshaft is not supported fall back to full code generator
-    // for all compilation.
-    return FullCodeGenerator::MakeCode(info);
-  }
-
-  return false;
+  return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
 }
 
 
@@ -353,9 +338,8 @@
 
 
 static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
   Isolate* isolate = info->isolate();
+  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
 
   ASSERT(!isolate->global_context().is_null());
@@ -428,7 +412,8 @@
         String::cast(script->name())));
     GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
                    script,
-                   info->code()));
+                   info->code(),
+                   info));
   } else {
     PROFILE(isolate, CodeCreateEvent(
         info->is_eval()
@@ -437,7 +422,7 @@
         *info->code(),
         *result,
         isolate->heap()->empty_string()));
-    GDBJIT(AddCode(Handle<String>(), script, info->code()));
+    GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
   }
 
   // Hint to the runtime system used when allocating space for initial
@@ -494,15 +479,21 @@
     // that would be compiled lazily anyway, so we skip the preparse step
     // in that case too.
     ScriptDataImpl* pre_data = input_pre_data;
+    bool harmony_block_scoping = natives != NATIVES_CODE &&
+                                 FLAG_harmony_block_scoping;
     if (pre_data == NULL
         && source_length >= FLAG_min_preparse_length) {
       if (source->IsExternalTwoByteString()) {
         ExternalTwoByteStringUC16CharacterStream stream(
             Handle<ExternalTwoByteString>::cast(source), 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream, extension);
+        pre_data = ParserApi::PartialPreParse(&stream,
+                                              extension,
+                                              harmony_block_scoping);
       } else {
         GenericStringUC16CharacterStream stream(source, 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream, extension);
+        pre_data = ParserApi::PartialPreParse(&stream,
+                                              extension,
+                                              harmony_block_scoping);
       }
     }
 
@@ -586,12 +577,13 @@
 
 
 bool Compiler::CompileLazy(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  Isolate* isolate = info->isolate();
+
+  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
 
   // The VM is in the COMPILER state until exiting this function.
-  VMState state(info->isolate(), COMPILER);
+  VMState state(isolate, COMPILER);
 
-  Isolate* isolate = info->isolate();
   PostponeInterruptsScope postpone(isolate);
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
@@ -628,6 +620,7 @@
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
       if (info->IsOptimizing()) {
+        ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
         function->ReplaceCode(*code);
       } else {
         // Update the shared function info with the compiled code and the
@@ -665,13 +658,10 @@
           // version of the function right away - unless the debugger is
           // active as it makes no sense to compile optimized code then.
           if (FLAG_always_opt &&
-              !Isolate::Current()->debug()->has_break_points()) {
+              !Isolate::Current()->DebuggerHasBreakPoints()) {
             CompilationInfo optimized(function);
             optimized.SetOptimizing(AstNode::kNoNumber);
             return CompileLazy(&optimized);
-          } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
-              function)) {
-            isolate->runtime_profiler()->OptimizeSoon(*function);
           }
         }
       }
@@ -691,6 +681,7 @@
   CompilationInfo info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
+  if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
 
   LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
   // Determine if the function can be lazily compiled. This is necessary to
@@ -749,6 +740,7 @@
   function_info->set_start_position(lit->start_position());
   function_info->set_end_position(lit->end_position());
   function_info->set_is_expression(lit->is_expression());
+  function_info->set_is_anonymous(lit->is_anonymous());
   function_info->set_is_toplevel(is_toplevel);
   function_info->set_inferred_name(*lit->inferred_name());
   function_info->SetThisPropertyAssignmentsInfo(
@@ -756,6 +748,8 @@
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   function_info->set_strict_mode(lit->strict_mode());
+  function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+  function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
 }
 
 
@@ -768,7 +762,8 @@
   // Log the code generation. If source information is available include
   // script name and line number. Check explicitly whether logging is
   // enabled as finding the line number is not free.
-  if (info->isolate()->logger()->is_logging() || CpuProfiler::is_profiling()) {
+  if (info->isolate()->logger()->is_logging() ||
+      CpuProfiler::is_profiling(info->isolate())) {
     Handle<Script> script = info->script();
     Handle<Code> code = info->code();
     if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
@@ -793,7 +788,8 @@
 
   GDBJIT(AddCode(Handle<String>(shared->DebugName()),
                  Handle<Script>(info->script()),
-                 Handle<Code>(info->code())));
+                 Handle<Code>(info->code()),
+                 info));
 }
 
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index a6a0d90..69ab27d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -28,8 +28,8 @@
 #ifndef V8_COMPILER_H_
 #define V8_COMPILER_H_
 
+#include "allocation.h"
 #include "ast.h"
-#include "frame-element.h"
 #include "zone.h"
 
 namespace v8 {
@@ -49,11 +49,11 @@
     ASSERT(Isolate::Current() == isolate_);
     return isolate_;
   }
-  bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
-  bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
-  bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
-  bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; }
-  bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
+  bool is_lazy() const { return IsLazy::decode(flags_); }
+  bool is_eval() const { return IsEval::decode(flags_); }
+  bool is_global() const { return IsGlobal::decode(flags_); }
+  bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
+  bool is_in_loop() const { return IsInLoop::decode(flags_); }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
   Handle<Code> code() const { return code_; }
@@ -144,6 +144,10 @@
     return V8::UseCrankshaft() && !closure_.is_null();
   }
 
+  // Disable all optimization attempts of this info for the rest of the
+  // current compilation pipeline.
+  void AbortOptimization();
+
  private:
   Isolate* isolate_;
 
@@ -163,6 +167,7 @@
 
   void Initialize(Mode mode) {
     mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+    ASSERT(!script_.is_null());
     if (script_->type()->value() == Script::TYPE_NATIVE) {
       MarkAsNative();
     }
@@ -191,6 +196,7 @@
   // Is this a function from our natives.
   class IsNative: public BitField<bool, 6, 1> {};
 
+
   unsigned flags_;
 
   // Fields filled in by the compilation pipeline.
@@ -288,22 +294,6 @@
 };
 
 
-// During compilation we need a global list of handles to constants
-// for frame elements.  When the zone gets deleted, we make sure to
-// clear this list of handles as well.
-class CompilationZoneScope : public ZoneScope {
- public:
-  explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
-  virtual ~CompilationZoneScope() {
-    if (ShouldDeleteOnExit()) {
-      Isolate* isolate = Isolate::Current();
-      isolate->frame_element_constant_list()->Clear();
-      isolate->result_constant_list()->Clear();
-    }
-  }
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_COMPILER_H_
diff --git a/src/contexts.cc b/src/contexts.cc
index 520f3dd..4f93abd 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,6 +34,16 @@
 namespace v8 {
 namespace internal {
 
+Context* Context::declaration_context() {
+  Context* current = this;
+  while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
+    current = current->previous();
+    ASSERT(current->closure() == closure());
+  }
+  return current;
+}
+
+
 JSBuiltinsObject* Context::builtins() {
   GlobalObject* object = global();
   if (object->IsJSGlobalObject()) {
@@ -74,14 +84,18 @@
 }
 
 
-Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
-                               int* index_, PropertyAttributes* attributes) {
+Handle<Object> Context::Lookup(Handle<String> name,
+                               ContextLookupFlags flags,
+                               int* index_,
+                               PropertyAttributes* attributes,
+                               BindingFlags* binding_flags) {
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
   *index_ = -1;
   *attributes = ABSENT;
+  *binding_flags = MISSING_BINDING;
 
   if (FLAG_trace_contexts) {
     PrintF("Context::Lookup(");
@@ -96,40 +110,65 @@
       PrintF("\n");
     }
 
-    // check extension/with object
-    if (context->has_extension()) {
-      Handle<JSObject> extension = Handle<JSObject>(context->extension(),
-                                                    isolate);
-      // Context extension objects needs to behave as if they have no
-      // prototype.  So even if we want to follow prototype chains, we
-      // need to only do a local lookup for context extension objects.
-      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
-          extension->IsJSContextExtensionObject()) {
-        *attributes = extension->GetLocalPropertyAttribute(*name);
-      } else {
-        *attributes = extension->GetPropertyAttribute(*name);
-      }
-      if (*attributes != ABSENT) {
-        // property found
-        if (FLAG_trace_contexts) {
-          PrintF("=> found property in context object %p\n",
-                 reinterpret_cast<void*>(*extension));
+    // Check extension/with/global object.
+    if (!context->IsBlockContext() && context->has_extension()) {
+      if (context->IsCatchContext()) {
+        // Catch contexts have the variable name in the extension slot.
+        if (name->Equals(String::cast(context->extension()))) {
+          if (FLAG_trace_contexts) {
+            PrintF("=> found in catch context\n");
+          }
+          *index_ = Context::THROWN_OBJECT_INDEX;
+          *attributes = NONE;
+          *binding_flags = MUTABLE_IS_INITIALIZED;
+          return context;
         }
-        return extension;
+      } else {
+        ASSERT(context->IsGlobalContext() ||
+               context->IsFunctionContext() ||
+               context->IsWithContext());
+        // Global, function, and with contexts may have an object in the
+        // extension slot.
+        Handle<JSObject> extension(JSObject::cast(context->extension()),
+                                   isolate);
+        // Context extension objects needs to behave as if they have no
+        // prototype.  So even if we want to follow prototype chains, we
+        // need to only do a local lookup for context extension objects.
+        if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+            extension->IsJSContextExtensionObject()) {
+          *attributes = extension->GetLocalPropertyAttribute(*name);
+        } else {
+          *attributes = extension->GetPropertyAttribute(*name);
+        }
+        if (*attributes != ABSENT) {
+          // property found
+          if (FLAG_trace_contexts) {
+            PrintF("=> found property in context object %p\n",
+                   reinterpret_cast<void*>(*extension));
+          }
+          return extension;
+        }
       }
     }
 
-    if (context->is_function_context()) {
-      // we have context-local slots
+    // Check serialized scope information of functions and blocks. Only
+    // functions can have parameters, and a function name.
+    if (context->IsFunctionContext() || context->IsBlockContext()) {
+      // We may have context-local slots.  Check locals in the context.
+      Handle<SerializedScopeInfo> scope_info;
+      if (context->IsFunctionContext()) {
+        scope_info = Handle<SerializedScopeInfo>(
+            context->closure()->shared()->scope_info(), isolate);
+      } else {
+        ASSERT(context->IsBlockContext());
+        scope_info = Handle<SerializedScopeInfo>(
+            SerializedScopeInfo::cast(context->extension()), isolate);
+      }
 
-      // check non-parameter locals in context
-      Handle<SerializedScopeInfo> scope_info(
-          context->closure()->shared()->scope_info(), isolate);
       Variable::Mode mode;
       int index = scope_info->ContextSlotIndex(*name, &mode);
       ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
       if (index >= 0) {
-        // slot found
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
                  index, mode);
@@ -142,63 +181,54 @@
         // declared variables that were introduced through declaration nodes)
         // must not appear here.
         switch (mode) {
-          case Variable::INTERNAL:  // fall through
-          case Variable::VAR: *attributes = NONE; break;
-          case Variable::CONST: *attributes = READ_ONLY; break;
-          case Variable::DYNAMIC: UNREACHABLE(); break;
-          case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
-          case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
-          case Variable::TEMPORARY: UNREACHABLE(); break;
+          case Variable::INTERNAL:  // Fall through.
+          case Variable::VAR:
+            *attributes = NONE;
+            *binding_flags = MUTABLE_IS_INITIALIZED;
+            break;
+          case Variable::LET:
+            *attributes = NONE;
+            *binding_flags = MUTABLE_CHECK_INITIALIZED;
+            break;
+          case Variable::CONST:
+            *attributes = READ_ONLY;
+            *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
+            break;
+          case Variable::DYNAMIC:
+          case Variable::DYNAMIC_GLOBAL:
+          case Variable::DYNAMIC_LOCAL:
+          case Variable::TEMPORARY:
+            UNREACHABLE();
+            break;
         }
         return context;
       }
 
-      // check parameter locals in context
-      int param_index = scope_info->ParameterIndex(*name);
-      if (param_index >= 0) {
-        // slot found.
-        int index = scope_info->ContextSlotIndex(
-            isolate->heap()->arguments_shadow_symbol(), NULL);
-        ASSERT(index >= 0);  // arguments must exist and be in the heap context
-        Handle<JSObject> arguments(JSObject::cast(context->get(index)),
-                                   isolate);
-        ASSERT(arguments->HasLocalProperty(isolate->heap()->length_symbol()));
-        if (FLAG_trace_contexts) {
-          PrintF("=> found parameter %d in arguments object\n", param_index);
-        }
-        *index_ = param_index;
-        *attributes = NONE;
-        return arguments;
-      }
-
-      // check intermediate context (holding only the function name variable)
+      // Check the slot corresponding to the intermediate context holding
+      // only the function name variable.
       if (follow_context_chain) {
         int index = scope_info->FunctionContextSlotIndex(*name);
         if (index >= 0) {
-          // slot found
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
                    index);
           }
           *index_ = index;
           *attributes = READ_ONLY;
+          *binding_flags = IMMUTABLE_IS_INITIALIZED;
           return context;
         }
       }
     }
 
-    // proceed with enclosing context
+    // Proceed with the previous context.
     if (context->IsGlobalContext()) {
       follow_context_chain = false;
-    } else if (context->is_function_context()) {
-      context = Handle<Context>(Context::cast(context->closure()->context()),
-                                isolate);
     } else {
       context = Handle<Context>(context->previous(), isolate);
     }
   } while (follow_context_chain);
 
-  // slot not found
   if (FLAG_trace_contexts) {
     PrintF("=> no property/slot found\n");
   }
@@ -213,11 +243,12 @@
   // before the global context and check that there are no context
   // extension objects (conservative check for with statements).
   while (!context->IsGlobalContext()) {
-    // Check if the context is a potentially a with context.
+    // Check if the context is a catch or with context, or has introduced
+    // bindings by calling non-strict eval.
     if (context->has_extension()) return false;
 
     // Not a with context so it must be a function context.
-    ASSERT(context->is_function_context());
+    ASSERT(context->IsFunctionContext());
 
     // Check non-parameter locals.
     Handle<SerializedScopeInfo> scope_info(
@@ -234,7 +265,7 @@
     // Check context only holding the function name variable.
     index = scope_info->FunctionContextSlotIndex(*name);
     if (index >= 0) return false;
-    context = Context::cast(context->closure()->context());
+    context = context->previous();
   }
 
   // No local or potential with statement found so the variable is
@@ -243,6 +274,30 @@
 }
 
 
+void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                                   bool* outer_scope_calls_non_strict_eval) {
+  // Skip up the context chain checking all the function contexts to see
+  // whether they call eval.
+  Context* context = this;
+  while (!context->IsGlobalContext()) {
+    if (context->IsFunctionContext()) {
+      Handle<SerializedScopeInfo> scope_info(
+          context->closure()->shared()->scope_info());
+      if (scope_info->CallsEval()) {
+        *outer_scope_calls_eval = true;
+        if (!scope_info->IsStrictMode()) {
+          // No need to go further since the answers will not change from
+          // here.
+          *outer_scope_calls_non_strict_eval = true;
+          return;
+        }
+      }
+    }
+    context = context->previous();
+  }
+}
+
+
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsGlobalContext());
 #ifdef DEBUG
diff --git a/src/contexts.h b/src/contexts.h
index e46619e..505f86c 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,6 +44,30 @@
 };
 
 
+// ES5 10.2 defines lexical environments with mutable and immutable bindings.
+// Immutable bindings have two states, initialized and uninitialized, and
+// their state is changed by the InitializeImmutableBinding method.
+//
+// The harmony proposal for block scoped bindings also introduces the
+// uninitialized state for mutable bindings. A 'let' declared variable
+// is a mutable binding that is created uninitalized upon activation of its
+// lexical environment and it is initialized when evaluating its declaration
+// statement. Var declared variables are mutable bindings that are
+// immediately initialized upon creation. The BindingFlags enum represents
+// information if a binding has definitely been initialized. 'const' declared
+// variables are created as uninitialized immutable bindings.
+
+// In harmony mode accessing an uninitialized binding produces a reference
+// error.
+enum BindingFlags {
+  MUTABLE_IS_INITIALIZED,
+  MUTABLE_CHECK_INITIALIZED,
+  IMMUTABLE_IS_INITIALIZED,
+  IMMUTABLE_CHECK_INITIALIZED,
+  MISSING_BINDING
+};
+
+
 // Heap-allocated activation contexts.
 //
 // Contexts are implemented as FixedArray objects; the Context
@@ -88,6 +112,8 @@
   V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
   V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
   V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+  V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+    aliased_arguments_boilerplate) \
   V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
     strict_mode_arguments_boilerplate) \
   V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
@@ -106,7 +132,11 @@
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
   V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
   V(MAP_CACHE_INDEX, Object, map_cache) \
-  V(CONTEXT_DATA_INDEX, Object, data)
+  V(CONTEXT_DATA_INDEX, Object, data) \
+  V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+  V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -125,13 +155,6 @@
 //                statically allocated context slots. The names are needed
 //                for dynamic lookups in the presence of 'with' or 'eval'.
 //
-// [ fcontext  ]  A pointer to the innermost enclosing function context.
-//                It is the same for all contexts *allocated* inside a
-//                function, and the function context's fcontext points
-//                to itself. It is only needed for fast access of the
-//                function context (used for declarations, and static
-//                context slot access).
-//
 // [ previous  ]  A pointer to the previous context. It is NULL for
 //                function contexts, and non-NULL for 'with' contexts.
 //                Used to implement the 'with' statement.
@@ -153,19 +176,6 @@
 // (via static context addresses) or through 'eval' (dynamic context lookups).
 // Finally, the global context contains additional slots for fast access to
 // global properties.
-//
-// We may be able to simplify the implementation:
-//
-// - We may be able to get rid of 'fcontext': We can always use the fact that
-//   previous == NULL for function contexts and so we can search for them. They
-//   are only needed when doing dynamic declarations, and the context chains
-//   tend to be very very short (depth of nesting of 'with' statements). At
-//   the moment we also use it in generated code for context slot accesses -
-//   and there we don't want a loop because of code bloat - but we may not
-//   need it there after all (see comment in codegen_*.cc).
-//
-// - If we cannot get rid of fcontext, consider making 'previous' never NULL
-//   except for the global context. This could simplify Context::Lookup.
 
 class Context: public FixedArray {
  public:
@@ -179,16 +189,22 @@
   enum {
     // These slots are in all contexts.
     CLOSURE_INDEX,
-    FCONTEXT_INDEX,
     PREVIOUS_INDEX,
+    // The extension slot is used for either the global object (in global
+    // contexts), eval extension object (function contexts), subject of with
+    // (with contexts), or the variable name (catch contexts).
     EXTENSION_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
 
+    // This slot holds the thrown value in catch contexts.
+    THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
     // These slots are only in global contexts.
     GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
     SECURITY_TOKEN_INDEX,
     ARGUMENTS_BOILERPLATE_INDEX,
+    ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
     STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
     JS_ARRAY_MAP_INDEX,
     REGEXP_RESULT_MAP_INDEX,
@@ -234,12 +250,16 @@
     OPAQUE_REFERENCE_FUNCTION_INDEX,
     CONTEXT_EXTENSION_FUNCTION_INDEX,
     OUT_OF_MEMORY_INDEX,
-    MAP_CACHE_INDEX,
     CONTEXT_DATA_INDEX,
+    ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+    DERIVED_HAS_TRAP_INDEX,
+    DERIVED_GET_TRAP_INDEX,
+    DERIVED_SET_TRAP_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
     OPTIMIZED_FUNCTIONS_LIST,  // Weak.
+    MAP_CACHE_INDEX,  // Weak.
     NEXT_CONTEXT_LINK,  // Weak.
 
     // Total number of slots.
@@ -252,9 +272,6 @@
   JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
   void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
 
-  Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
-  void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
-
   Context* previous() {
     Object* result = unchecked_previous();
     ASSERT(IsBootstrappingOrContext(result));
@@ -262,9 +279,13 @@
   }
   void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
 
-  bool has_extension() { return unchecked_extension() != NULL; }
-  JSObject* extension() { return JSObject::cast(unchecked_extension()); }
-  void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
+  bool has_extension() { return extension() != NULL; }
+  Object* extension() { return get(EXTENSION_INDEX); }
+  void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+
+  // Get the context where var declarations will be hoisted to, which
+  // may be the context itself.
+  Context* declaration_context();
 
   GlobalObject* global() {
     Object* result = get(GLOBAL_INDEX);
@@ -283,8 +304,25 @@
   // Compute the global context by traversing the context chain.
   Context* global_context();
 
-  // Tells if this is a function context (as opposed to a 'with' context).
-  bool is_function_context() { return unchecked_previous() == NULL; }
+  // Predicates for context types.  IsGlobalContext is defined on Object
+  // because we frequently have to know if arbitrary objects are global
+  // contexts.
+  bool IsFunctionContext() {
+    Map* map = this->map();
+    return map == map->GetHeap()->function_context_map();
+  }
+  bool IsCatchContext() {
+    Map* map = this->map();
+    return map == map->GetHeap()->catch_context_map();
+  }
+  bool IsWithContext() {
+    Map* map = this->map();
+    return map == map->GetHeap()->with_context_map();
+  }
+  bool IsBlockContext() {
+    Map* map = this->map();
+    return map == map->GetHeap()->block_context_map();
+  }
 
   // Tells whether the global context is marked with out of memory.
   inline bool has_out_of_memory();
@@ -337,8 +375,11 @@
   // 4) index_ < 0 && result.is_null():
   //    there was no context found with the corresponding property.
   //    attributes == ABSENT.
-  Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
-                        int* index_, PropertyAttributes* attributes);
+  Handle<Object> Lookup(Handle<String> name,
+                        ContextLookupFlags flags,
+                        int* index_,
+                        PropertyAttributes* attributes,
+                        BindingFlags* binding_flags);
 
   // Determine if a local variable with the given name exists in a
   // context.  Do not consider context extension objects.  This is
@@ -349,6 +390,11 @@
   // eval.
   bool GlobalIfNotShadowedByEval(Handle<String> name);
 
+  // Determine if any function scope in the context call eval and if
+  // any of those calls are in non-strict mode.
+  void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                            bool* outer_scope_calls_non_strict_eval);
+
   // Code generation support.
   static int SlotOffset(int index) {
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
@@ -368,7 +414,6 @@
  private:
   // Unchecked access to the slots.
   Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
-  Object* unchecked_extension() { return get(EXTENSION_INDEX); }
 
 #ifdef DEBUG
   // Bootstrapping-aware type checks.
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index cb7dbf8..41cf0d5 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,19 +28,29 @@
 #ifndef V8_CONVERSIONS_INL_H_
 #define V8_CONVERSIONS_INL_H_
 
+#include <limits.h>        // Required for INT_MAX etc.
 #include <math.h>
-#include <float.h>         // required for DBL_MAX and on Win32 for finite()
+#include <float.h>         // Required for DBL_MAX and on Win32 for finite()
 #include <stdarg.h>
+#include "globals.h"       // Required for V8_INFINITY
 
 // ----------------------------------------------------------------------------
 // Extra POSIX/ANSI functions for Win32/MSVC.
 
 #include "conversions.h"
+#include "double.h"
 #include "platform.h"
+#include "scanner.h"
+#include "strtod.h"
 
 namespace v8 {
 namespace internal {
 
+static inline double JunkStringValue() {
+  return std::numeric_limits<double>::quiet_NaN();
+}
+
+
 // The fast double-to-unsigned-int conversion routine does not guarantee
 // rounding towards zero, or any reasonable value if the argument is larger
 // than what fits in an unsigned 32-bit integer.
@@ -77,30 +87,584 @@
 }
 
 
-int32_t NumberToInt32(Object* number) {
-  if (number->IsSmi()) return Smi::cast(number)->value();
-  return DoubleToInt32(number->Number());
-}
-
-
-uint32_t NumberToUint32(Object* number) {
-  if (number->IsSmi()) return Smi::cast(number)->value();
-  return DoubleToUint32(number->Number());
-}
-
-
 int32_t DoubleToInt32(double x) {
   int32_t i = FastD2I(x);
   if (FastI2D(i) == x) return i;
-  static const double two32 = 4294967296.0;
-  static const double two31 = 2147483648.0;
-  if (!isfinite(x) || x == 0) return 0;
-  if (x < 0 || x >= two32) x = modulo(x, two32);
-  x = (x >= 0) ? floor(x) : ceil(x) + two32;
-  return (int32_t) ((x >= two31) ? x - two32 : x);
+  Double d(x);
+  int exponent = d.Exponent();
+  if (exponent < 0) {
+    if (exponent <= -Double::kSignificandSize) return 0;
+    return d.Sign() * static_cast<int32_t>(d.Significand() >> -exponent);
+  } else {
+    if (exponent > 31) return 0;
+    return d.Sign() * static_cast<int32_t>(d.Significand() << exponent);
+  }
 }
 
 
+template <class Iterator, class EndMark>
+static bool SubStringEquals(Iterator* current,
+                            EndMark end,
+                            const char* substring) {
+  ASSERT(**current == *substring);
+  for (substring++; *substring != '\0'; substring++) {
+    ++*current;
+    if (*current == end || **current != *substring) return false;
+  }
+  ++*current;
+  return true;
+}
+
+
+// Returns true if a nonspace character has been found and false if the
+// end was been reached before finding a nonspace character.
+template <class Iterator, class EndMark>
+static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+                                     Iterator* current,
+                                     EndMark end) {
+  while (*current != end) {
+    if (!unicode_cache->IsWhiteSpace(**current)) return true;
+    ++*current;
+  }
+  return false;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2, class Iterator, class EndMark>
+static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+                                        Iterator current,
+                                        EndMark end,
+                                        bool negative,
+                                        bool allow_trailing_junk) {
+  ASSERT(current != end);
+
+  // Skip leading 0s.
+  while (*current == '0') {
+    ++current;
+    if (current == end) return SignedZero(negative);
+  }
+
+  int64_t number = 0;
+  int exponent = 0;
+  const int radix = (1 << radix_log_2);
+
+  do {
+    int digit;
+    if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+      digit = static_cast<char>(*current) - '0';
+    } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+      digit = static_cast<char>(*current) - 'a' + 10;
+    } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+      digit = static_cast<char>(*current) - 'A' + 10;
+    } else {
+      if (allow_trailing_junk ||
+          !AdvanceToNonspace(unicode_cache, &current, end)) {
+        break;
+      } else {
+        return JunkStringValue();
+      }
+    }
+
+    number = number * radix + digit;
+    int overflow = static_cast<int>(number >> 53);
+    if (overflow != 0) {
+      // Overflow occurred. Need to determine which direction to round the
+      // result.
+      int overflow_bits_count = 1;
+      while (overflow > 1) {
+        overflow_bits_count++;
+        overflow >>= 1;
+      }
+
+      int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+      int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+      number >>= overflow_bits_count;
+      exponent = overflow_bits_count;
+
+      bool zero_tail = true;
+      while (true) {
+        ++current;
+        if (current == end || !isDigit(*current, radix)) break;
+        zero_tail = zero_tail && *current == '0';
+        exponent += radix_log_2;
+      }
+
+      if (!allow_trailing_junk &&
+          AdvanceToNonspace(unicode_cache, &current, end)) {
+        return JunkStringValue();
+      }
+
+      int middle_value = (1 << (overflow_bits_count - 1));
+      if (dropped_bits > middle_value) {
+        number++;  // Rounding up.
+      } else if (dropped_bits == middle_value) {
+        // Rounding to even to consistency with decimals: half-way case rounds
+        // up if significant part is odd and down otherwise.
+        if ((number & 1) != 0 || !zero_tail) {
+          number++;  // Rounding up.
+        }
+      }
+
+      // Rounding up may cause overflow.
+      if ((number & ((int64_t)1 << 53)) != 0) {
+        exponent++;
+        number >>= 1;
+      }
+      break;
+    }
+    ++current;
+  } while (current != end);
+
+  ASSERT(number < ((int64_t)1 << 53));
+  ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+  if (exponent == 0) {
+    if (negative) {
+      if (number == 0) return -0.0;
+      number = -number;
+    }
+    return static_cast<double>(number);
+  }
+
+  ASSERT(number != 0);
+  // The double could be constructed faster from number (mantissa), exponent
+  // and sign. Assuming it's a rare case more simple code is used.
+  return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
+}
+
+
+template <class Iterator, class EndMark>
+static double InternalStringToInt(UnicodeCache* unicode_cache,
+                                  Iterator current,
+                                  EndMark end,
+                                  int radix) {
+  const bool allow_trailing_junk = true;
+  const double empty_string_val = JunkStringValue();
+
+  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+    return empty_string_val;
+  }
+
+  bool negative = false;
+  bool leading_zero = false;
+
+  if (*current == '+') {
+    // Ignore leading sign; skip following spaces.
+    ++current;
+    if (current == end) {
+      return JunkStringValue();
+    }
+  } else if (*current == '-') {
+    ++current;
+    if (current == end) {
+      return JunkStringValue();
+    }
+    negative = true;
+  }
+
+  if (radix == 0) {
+    // Radix detection.
+    if (*current == '0') {
+      ++current;
+      if (current == end) return SignedZero(negative);
+      if (*current == 'x' || *current == 'X') {
+        radix = 16;
+        ++current;
+        if (current == end) return JunkStringValue();
+      } else {
+        radix = 8;
+        leading_zero = true;
+      }
+    } else {
+      radix = 10;
+    }
+  } else if (radix == 16) {
+    if (*current == '0') {
+      // Allow "0x" prefix.
+      ++current;
+      if (current == end) return SignedZero(negative);
+      if (*current == 'x' || *current == 'X') {
+        ++current;
+        if (current == end) return JunkStringValue();
+      } else {
+        leading_zero = true;
+      }
+    }
+  }
+
+  if (radix < 2 || radix > 36) return JunkStringValue();
+
+  // Skip leading zeros.
+  while (*current == '0') {
+    leading_zero = true;
+    ++current;
+    if (current == end) return SignedZero(negative);
+  }
+
+  if (!leading_zero && !isDigit(*current, radix)) {
+    return JunkStringValue();
+  }
+
+  if (IsPowerOf2(radix)) {
+    switch (radix) {
+      case 2:
+        return InternalStringToIntDouble<1>(
+            unicode_cache, current, end, negative, allow_trailing_junk);
+      case 4:
+        return InternalStringToIntDouble<2>(
+            unicode_cache, current, end, negative, allow_trailing_junk);
+      case 8:
+        return InternalStringToIntDouble<3>(
+            unicode_cache, current, end, negative, allow_trailing_junk);
+
+      case 16:
+        return InternalStringToIntDouble<4>(
+            unicode_cache, current, end, negative, allow_trailing_junk);
+
+      case 32:
+        return InternalStringToIntDouble<5>(
+            unicode_cache, current, end, negative, allow_trailing_junk);
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  if (radix == 10) {
+    // Parsing with strtod.
+    const int kMaxSignificantDigits = 309;  // Doubles are less than 1.8e308.
+    // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
+    // end.
+    const int kBufferSize = kMaxSignificantDigits + 2;
+    char buffer[kBufferSize];
+    int buffer_pos = 0;
+    while (*current >= '0' && *current <= '9') {
+      if (buffer_pos <= kMaxSignificantDigits) {
+        // If the number has more than kMaxSignificantDigits it will be parsed
+        // as infinity.
+        ASSERT(buffer_pos < kBufferSize);
+        buffer[buffer_pos++] = static_cast<char>(*current);
+      }
+      ++current;
+      if (current == end) break;
+    }
+
+    if (!allow_trailing_junk &&
+        AdvanceToNonspace(unicode_cache, &current, end)) {
+      return JunkStringValue();
+    }
+
+    ASSERT(buffer_pos < kBufferSize);
+    buffer[buffer_pos] = '\0';
+    Vector<const char> buffer_vector(buffer, buffer_pos);
+    return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
+  }
+
+  // The following code causes accumulating rounding error for numbers greater
+  // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+  // 16, or 32, then mathInt may be an implementation-dependent approximation to
+  // the mathematical integer value" (15.1.2.2).
+
+  int lim_0 = '0' + (radix < 10 ? radix : 10);
+  int lim_a = 'a' + (radix - 10);
+  int lim_A = 'A' + (radix - 10);
+
+  // NOTE: The code for computing the value may seem a bit complex at
+  // first glance. It is structured to use 32-bit multiply-and-add
+  // loops as long as possible to avoid loosing precision.
+
+  double v = 0.0;
+  bool done = false;
+  do {
+    // Parse the longest part of the string starting at index j
+    // possible while keeping the multiplier, and thus the part
+    // itself, within 32 bits.
+    unsigned int part = 0, multiplier = 1;
+    while (true) {
+      int d;
+      if (*current >= '0' && *current < lim_0) {
+        d = *current - '0';
+      } else if (*current >= 'a' && *current < lim_a) {
+        d = *current - 'a' + 10;
+      } else if (*current >= 'A' && *current < lim_A) {
+        d = *current - 'A' + 10;
+      } else {
+        done = true;
+        break;
+      }
+
+      // Update the value of the part as long as the multiplier fits
+      // in 32 bits. When we can't guarantee that the next iteration
+      // will not overflow the multiplier, we stop parsing the part
+      // by leaving the loop.
+      const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
+      uint32_t m = multiplier * radix;
+      if (m > kMaximumMultiplier) break;
+      part = part * radix + d;
+      multiplier = m;
+      ASSERT(multiplier > part);
+
+      ++current;
+      if (current == end) {
+        done = true;
+        break;
+      }
+    }
+
+    // Update the value and skip the part in the string.
+    v = v * multiplier + part;
+  } while (!done);
+
+  if (!allow_trailing_junk &&
+      AdvanceToNonspace(unicode_cache, &current, end)) {
+    return JunkStringValue();
+  }
+
+  return negative ? -v : v;
+}
+
+
+// Converts a string to a double value. Assumes the Iterator supports
+// the following operations:
+// 1. current == end (other ops are not allowed), current != end.
+// 2. *current - gets the current character in the sequence.
+// 3. ++current (advances the position).
+template <class Iterator, class EndMark>
+static double InternalStringToDouble(UnicodeCache* unicode_cache,
+                                     Iterator current,
+                                     EndMark end,
+                                     int flags,
+                                     double empty_string_val) {
+  // To make sure that iterator dereferencing is valid the following
+  // convention is used:
+  // 1. Each '++current' statement is followed by check for equality to 'end'.
+  // 2. If AdvanceToNonspace returned false then current == end.
+  // 3. If 'current' becomes be equal to 'end' the function returns or goes to
+  // 'parsing_done'.
+  // 4. 'current' is not dereferenced after the 'parsing_done' label.
+  // 5. Code before 'parsing_done' may rely on 'current != end'.
+  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+    return empty_string_val;
+  }
+
+  const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
+
+  // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
+  const int kBufferSize = kMaxSignificantDigits + 10;
+  char buffer[kBufferSize];  // NOLINT: size is known at compile time.
+  int buffer_pos = 0;
+
+  // Exponent will be adjusted if insignificant digits of the integer part
+  // or insignificant leading zeros of the fractional part are dropped.
+  int exponent = 0;
+  int significant_digits = 0;
+  int insignificant_digits = 0;
+  bool nonzero_digit_dropped = false;
+
+  bool negative = false;
+
+  if (*current == '+') {
+    // Ignore leading sign.
+    ++current;
+    if (current == end) return JunkStringValue();
+  } else if (*current == '-') {
+    ++current;
+    if (current == end) return JunkStringValue();
+    negative = true;
+  }
+
+  static const char kInfinitySymbol[] = "Infinity";
+  if (*current == kInfinitySymbol[0]) {
+    if (!SubStringEquals(&current, end, kInfinitySymbol)) {
+      return JunkStringValue();
+    }
+
+    if (!allow_trailing_junk &&
+        AdvanceToNonspace(unicode_cache, &current, end)) {
+      return JunkStringValue();
+    }
+
+    ASSERT(buffer_pos == 0);
+    return negative ? -V8_INFINITY : V8_INFINITY;
+  }
+
+  bool leading_zero = false;
+  if (*current == '0') {
+    ++current;
+    if (current == end) return SignedZero(negative);
+
+    leading_zero = true;
+
+    // It could be hexadecimal value.
+    if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+      ++current;
+      if (current == end || !isDigit(*current, 16)) {
+        return JunkStringValue();  // "0x".
+      }
+
+      return InternalStringToIntDouble<4>(unicode_cache,
+                                          current,
+                                          end,
+                                          negative,
+                                          allow_trailing_junk);
+    }
+
+    // Ignore leading zeros in the integer part.
+    while (*current == '0') {
+      ++current;
+      if (current == end) return SignedZero(negative);
+    }
+  }
+
+  bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+
+  // Copy significant digits of the integer part (if any) to the buffer.
+  while (*current >= '0' && *current <= '9') {
+    if (significant_digits < kMaxSignificantDigits) {
+      ASSERT(buffer_pos < kBufferSize);
+      buffer[buffer_pos++] = static_cast<char>(*current);
+      significant_digits++;
+      // Will later check if it's an octal in the buffer.
+    } else {
+      insignificant_digits++;  // Move the digit into the exponential part.
+      nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+    }
+    octal = octal && *current < '8';
+    ++current;
+    if (current == end) goto parsing_done;
+  }
+
+  if (significant_digits == 0) {
+    octal = false;
+  }
+
+  if (*current == '.') {
+    if (octal && !allow_trailing_junk) return JunkStringValue();
+    if (octal) goto parsing_done;
+
+    ++current;
+    if (current == end) {
+      if (significant_digits == 0 && !leading_zero) {
+        return JunkStringValue();
+      } else {
+        goto parsing_done;
+      }
+    }
+
+    if (significant_digits == 0) {
+      // octal = false;
+      // Integer part consists of 0 or is absent. Significant digits start after
+      // leading zeros (if any).
+      while (*current == '0') {
+        ++current;
+        if (current == end) return SignedZero(negative);
+        exponent--;  // Move this 0 into the exponent.
+      }
+    }
+
+    // There is a fractional part.  We don't emit a '.', but adjust the exponent
+    // instead.
+    while (*current >= '0' && *current <= '9') {
+      if (significant_digits < kMaxSignificantDigits) {
+        ASSERT(buffer_pos < kBufferSize);
+        buffer[buffer_pos++] = static_cast<char>(*current);
+        significant_digits++;
+        exponent--;
+      } else {
+        // Ignore insignificant digits in the fractional part.
+        nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+      }
+      ++current;
+      if (current == end) goto parsing_done;
+    }
+  }
+
+  if (!leading_zero && exponent == 0 && significant_digits == 0) {
+    // If leading_zeros is true then the string contains zeros.
+    // If exponent < 0 then string was [+-]\.0*...
+    // If significant_digits != 0 the string is not equal to 0.
+    // Otherwise there are no digits in the string.
+    return JunkStringValue();
+  }
+
+  // Parse exponential part.
+  if (*current == 'e' || *current == 'E') {
+    if (octal) return JunkStringValue();
+    ++current;
+    if (current == end) {
+      if (allow_trailing_junk) {
+        goto parsing_done;
+      } else {
+        return JunkStringValue();
+      }
+    }
+    char sign = '+';
+    if (*current == '+' || *current == '-') {
+      sign = static_cast<char>(*current);
+      ++current;
+      if (current == end) {
+        if (allow_trailing_junk) {
+          goto parsing_done;
+        } else {
+          return JunkStringValue();
+        }
+      }
+    }
+
+    if (current == end || *current < '0' || *current > '9') {
+      if (allow_trailing_junk) {
+        goto parsing_done;
+      } else {
+        return JunkStringValue();
+      }
+    }
+
+    const int max_exponent = INT_MAX / 2;
+    ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+    int num = 0;
+    do {
+      // Check overflow.
+      int digit = *current - '0';
+      if (num >= max_exponent / 10
+          && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+        num = max_exponent;
+      } else {
+        num = num * 10 + digit;
+      }
+      ++current;
+    } while (current != end && *current >= '0' && *current <= '9');
+
+    exponent += (sign == '-' ? -num : num);
+  }
+
+  if (!allow_trailing_junk &&
+      AdvanceToNonspace(unicode_cache, &current, end)) {
+    return JunkStringValue();
+  }
+
+  parsing_done:
+  exponent += insignificant_digits;
+
+  if (octal) {
+    return InternalStringToIntDouble<3>(unicode_cache,
+                                        buffer,
+                                        buffer + buffer_pos,
+                                        negative,
+                                        allow_trailing_junk);
+  }
+
+  if (nonzero_digit_dropped) {
+    buffer[buffer_pos++] = '1';
+    exponent--;
+  }
+
+  ASSERT(buffer_pos < kBufferSize);
+  buffer[buffer_pos] = '\0';
+
+  double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
+  return negative ? -converted : converted;
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_CONVERSIONS_INL_H_
diff --git a/src/conversions.cc b/src/conversions.cc
index 1458584..5bfddd0 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -26,696 +26,17 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdarg.h>
+#include <math.h>
 #include <limits.h>
 
-#include "v8.h"
-
 #include "conversions-inl.h"
 #include "dtoa.h"
-#include "factory.h"
-#include "scanner-base.h"
 #include "strtod.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-namespace {
-
-// C++-style iterator adaptor for StringInputBuffer
-// (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
- public:
-  class EndMarker {};
-
-  explicit StringInputBufferIterator(StringInputBuffer* buffer);
-
-  int operator*() const;
-  void operator++();
-  bool operator==(EndMarker const&) const { return end_; }
-  bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
-  StringInputBuffer* const buffer_;
-  int current_;
-  bool end_;
-};
-
-
-StringInputBufferIterator::StringInputBufferIterator(
-    StringInputBuffer* buffer) : buffer_(buffer) {
-  ++(*this);
-}
-
-int StringInputBufferIterator::operator*() const {
-  return current_;
-}
-
-
-void StringInputBufferIterator::operator++() {
-  end_ = !buffer_->has_more();
-  if (!end_) {
-    current_ = buffer_->GetNext();
-  }
-}
-}
-
-
-template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
-                            EndMark end,
-                            const char* substring) {
-  ASSERT(**current == *substring);
-  for (substring++; *substring != '\0'; substring++) {
-    ++*current;
-    if (*current == end || **current != *substring) return false;
-  }
-  ++*current;
-  return true;
-}
-
-
-// Maximum number of significant digits in decimal representation.
-// The longest possible double in decimal representation is
-// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
-// (768 digits). If we parse a number whose first digits are equal to a
-// mean of 2 adjacent doubles (that could have up to 769 digits) the result
-// must be rounded to the bigger one unless the tail consists of zeros, so
-// we don't need to preserve all the digits.
-const int kMaxSignificantDigits = 772;
-
-
-static const double JUNK_STRING_VALUE = OS::nan_value();
-
-
-// Returns true if a nonspace found and false if the end has reached.
-template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
-                                     Iterator* current,
-                                     EndMark end) {
-  while (*current != end) {
-    if (!unicode_cache->IsWhiteSpace(**current)) return true;
-    ++*current;
-  }
-  return false;
-}
-
-
-static bool isDigit(int x, int radix) {
-  return (x >= '0' && x <= '9' && x < '0' + radix)
-      || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
-      || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
-}
-
-
-static double SignedZero(bool negative) {
-  return negative ? -0.0 : 0.0;
-}
-
-
-// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
-template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
-                                        Iterator current,
-                                        EndMark end,
-                                        bool negative,
-                                        bool allow_trailing_junk) {
-  ASSERT(current != end);
-
-  // Skip leading 0s.
-  while (*current == '0') {
-    ++current;
-    if (current == end) return SignedZero(negative);
-  }
-
-  int64_t number = 0;
-  int exponent = 0;
-  const int radix = (1 << radix_log_2);
-
-  do {
-    int digit;
-    if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
-      digit = static_cast<char>(*current) - '0';
-    } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
-      digit = static_cast<char>(*current) - 'a' + 10;
-    } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
-      digit = static_cast<char>(*current) - 'A' + 10;
-    } else {
-      if (allow_trailing_junk ||
-          !AdvanceToNonspace(unicode_cache, &current, end)) {
-        break;
-      } else {
-        return JUNK_STRING_VALUE;
-      }
-    }
-
-    number = number * radix + digit;
-    int overflow = static_cast<int>(number >> 53);
-    if (overflow != 0) {
-      // Overflow occurred. Need to determine which direction to round the
-      // result.
-      int overflow_bits_count = 1;
-      while (overflow > 1) {
-        overflow_bits_count++;
-        overflow >>= 1;
-      }
-
-      int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
-      int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
-      number >>= overflow_bits_count;
-      exponent = overflow_bits_count;
-
-      bool zero_tail = true;
-      while (true) {
-        ++current;
-        if (current == end || !isDigit(*current, radix)) break;
-        zero_tail = zero_tail && *current == '0';
-        exponent += radix_log_2;
-      }
-
-      if (!allow_trailing_junk &&
-          AdvanceToNonspace(unicode_cache, &current, end)) {
-        return JUNK_STRING_VALUE;
-      }
-
-      int middle_value = (1 << (overflow_bits_count - 1));
-      if (dropped_bits > middle_value) {
-        number++;  // Rounding up.
-      } else if (dropped_bits == middle_value) {
-        // Rounding to even to consistency with decimals: half-way case rounds
-        // up if significant part is odd and down otherwise.
-        if ((number & 1) != 0 || !zero_tail) {
-          number++;  // Rounding up.
-        }
-      }
-
-      // Rounding up may cause overflow.
-      if ((number & ((int64_t)1 << 53)) != 0) {
-        exponent++;
-        number >>= 1;
-      }
-      break;
-    }
-    ++current;
-  } while (current != end);
-
-  ASSERT(number < ((int64_t)1 << 53));
-  ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
-
-  if (exponent == 0) {
-    if (negative) {
-      if (number == 0) return -0.0;
-      number = -number;
-    }
-    return static_cast<double>(number);
-  }
-
-  ASSERT(number != 0);
-  // The double could be constructed faster from number (mantissa), exponent
-  // and sign. Assuming it's a rare case more simple code is used.
-  return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
-}
-
-
-template <class Iterator, class EndMark>
-static double InternalStringToInt(UnicodeCache* unicode_cache,
-                                  Iterator current,
-                                  EndMark end,
-                                  int radix) {
-  const bool allow_trailing_junk = true;
-  const double empty_string_val = JUNK_STRING_VALUE;
-
-  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
-    return empty_string_val;
-  }
-
-  bool negative = false;
-  bool leading_zero = false;
-
-  if (*current == '+') {
-    // Ignore leading sign; skip following spaces.
-    ++current;
-    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
-      return JUNK_STRING_VALUE;
-    }
-  } else if (*current == '-') {
-    ++current;
-    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
-      return JUNK_STRING_VALUE;
-    }
-    negative = true;
-  }
-
-  if (radix == 0) {
-    // Radix detection.
-    if (*current == '0') {
-      ++current;
-      if (current == end) return SignedZero(negative);
-      if (*current == 'x' || *current == 'X') {
-        radix = 16;
-        ++current;
-        if (current == end) return JUNK_STRING_VALUE;
-      } else {
-        radix = 8;
-        leading_zero = true;
-      }
-    } else {
-      radix = 10;
-    }
-  } else if (radix == 16) {
-    if (*current == '0') {
-      // Allow "0x" prefix.
-      ++current;
-      if (current == end) return SignedZero(negative);
-      if (*current == 'x' || *current == 'X') {
-        ++current;
-        if (current == end) return JUNK_STRING_VALUE;
-      } else {
-        leading_zero = true;
-      }
-    }
-  }
-
-  if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
-
-  // Skip leading zeros.
-  while (*current == '0') {
-    leading_zero = true;
-    ++current;
-    if (current == end) return SignedZero(negative);
-  }
-
-  if (!leading_zero && !isDigit(*current, radix)) {
-    return JUNK_STRING_VALUE;
-  }
-
-  if (IsPowerOf2(radix)) {
-    switch (radix) {
-      case 2:
-        return InternalStringToIntDouble<1>(
-            unicode_cache, current, end, negative, allow_trailing_junk);
-      case 4:
-        return InternalStringToIntDouble<2>(
-            unicode_cache, current, end, negative, allow_trailing_junk);
-      case 8:
-        return InternalStringToIntDouble<3>(
-            unicode_cache, current, end, negative, allow_trailing_junk);
-
-      case 16:
-        return InternalStringToIntDouble<4>(
-            unicode_cache, current, end, negative, allow_trailing_junk);
-
-      case 32:
-        return InternalStringToIntDouble<5>(
-            unicode_cache, current, end, negative, allow_trailing_junk);
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  if (radix == 10) {
-    // Parsing with strtod.
-    const int kMaxSignificantDigits = 309;  // Doubles are less than 1.8e308.
-    // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
-    // end.
-    const int kBufferSize = kMaxSignificantDigits + 2;
-    char buffer[kBufferSize];
-    int buffer_pos = 0;
-    while (*current >= '0' && *current <= '9') {
-      if (buffer_pos <= kMaxSignificantDigits) {
-        // If the number has more than kMaxSignificantDigits it will be parsed
-        // as infinity.
-        ASSERT(buffer_pos < kBufferSize);
-        buffer[buffer_pos++] = static_cast<char>(*current);
-      }
-      ++current;
-      if (current == end) break;
-    }
-
-    if (!allow_trailing_junk &&
-        AdvanceToNonspace(unicode_cache, &current, end)) {
-      return JUNK_STRING_VALUE;
-    }
-
-    ASSERT(buffer_pos < kBufferSize);
-    buffer[buffer_pos] = '\0';
-    Vector<const char> buffer_vector(buffer, buffer_pos);
-    return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
-  }
-
-  // The following code causes accumulating rounding error for numbers greater
-  // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
-  // 16, or 32, then mathInt may be an implementation-dependent approximation to
-  // the mathematical integer value" (15.1.2.2).
-
-  int lim_0 = '0' + (radix < 10 ? radix : 10);
-  int lim_a = 'a' + (radix - 10);
-  int lim_A = 'A' + (radix - 10);
-
-  // NOTE: The code for computing the value may seem a bit complex at
-  // first glance. It is structured to use 32-bit multiply-and-add
-  // loops as long as possible to avoid loosing precision.
-
-  double v = 0.0;
-  bool done = false;
-  do {
-    // Parse the longest part of the string starting at index j
-    // possible while keeping the multiplier, and thus the part
-    // itself, within 32 bits.
-    unsigned int part = 0, multiplier = 1;
-    while (true) {
-      int d;
-      if (*current >= '0' && *current < lim_0) {
-        d = *current - '0';
-      } else if (*current >= 'a' && *current < lim_a) {
-        d = *current - 'a' + 10;
-      } else if (*current >= 'A' && *current < lim_A) {
-        d = *current - 'A' + 10;
-      } else {
-        done = true;
-        break;
-      }
-
-      // Update the value of the part as long as the multiplier fits
-      // in 32 bits. When we can't guarantee that the next iteration
-      // will not overflow the multiplier, we stop parsing the part
-      // by leaving the loop.
-      const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
-      uint32_t m = multiplier * radix;
-      if (m > kMaximumMultiplier) break;
-      part = part * radix + d;
-      multiplier = m;
-      ASSERT(multiplier > part);
-
-      ++current;
-      if (current == end) {
-        done = true;
-        break;
-      }
-    }
-
-    // Update the value and skip the part in the string.
-    v = v * multiplier + part;
-  } while (!done);
-
-  if (!allow_trailing_junk &&
-      AdvanceToNonspace(unicode_cache, &current, end)) {
-    return JUNK_STRING_VALUE;
-  }
-
-  return negative ? -v : v;
-}
-
-
-// Converts a string to a double value. Assumes the Iterator supports
-// the following operations:
-// 1. current == end (other ops are not allowed), current != end.
-// 2. *current - gets the current character in the sequence.
-// 3. ++current (advances the position).
-template <class Iterator, class EndMark>
-static double InternalStringToDouble(UnicodeCache* unicode_cache,
-                                     Iterator current,
-                                     EndMark end,
-                                     int flags,
-                                     double empty_string_val) {
-  // To make sure that iterator dereferencing is valid the following
-  // convention is used:
-  // 1. Each '++current' statement is followed by check for equality to 'end'.
-  // 2. If AdvanceToNonspace returned false then current == end.
-  // 3. If 'current' becomes be equal to 'end' the function returns or goes to
-  // 'parsing_done'.
-  // 4. 'current' is not dereferenced after the 'parsing_done' label.
-  // 5. Code before 'parsing_done' may rely on 'current != end'.
-  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
-    return empty_string_val;
-  }
-
-  const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
-
-  // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
-  const int kBufferSize = kMaxSignificantDigits + 10;
-  char buffer[kBufferSize];  // NOLINT: size is known at compile time.
-  int buffer_pos = 0;
-
-  // Exponent will be adjusted if insignificant digits of the integer part
-  // or insignificant leading zeros of the fractional part are dropped.
-  int exponent = 0;
-  int significant_digits = 0;
-  int insignificant_digits = 0;
-  bool nonzero_digit_dropped = false;
-  bool fractional_part = false;
-
-  bool negative = false;
-
-  if (*current == '+') {
-    // Ignore leading sign.
-    ++current;
-    if (current == end) return JUNK_STRING_VALUE;
-  } else if (*current == '-') {
-    ++current;
-    if (current == end) return JUNK_STRING_VALUE;
-    negative = true;
-  }
-
-  static const char kInfinitySymbol[] = "Infinity";
-  if (*current == kInfinitySymbol[0]) {
-    if (!SubStringEquals(&current, end, kInfinitySymbol)) {
-      return JUNK_STRING_VALUE;
-    }
-
-    if (!allow_trailing_junk &&
-        AdvanceToNonspace(unicode_cache, &current, end)) {
-      return JUNK_STRING_VALUE;
-    }
-
-    ASSERT(buffer_pos == 0);
-    return negative ? -V8_INFINITY : V8_INFINITY;
-  }
-
-  bool leading_zero = false;
-  if (*current == '0') {
-    ++current;
-    if (current == end) return SignedZero(negative);
-
-    leading_zero = true;
-
-    // It could be hexadecimal value.
-    if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
-      ++current;
-      if (current == end || !isDigit(*current, 16)) {
-        return JUNK_STRING_VALUE;  // "0x".
-      }
-
-      return InternalStringToIntDouble<4>(unicode_cache,
-                                          current,
-                                          end,
-                                          negative,
-                                          allow_trailing_junk);
-    }
-
-    // Ignore leading zeros in the integer part.
-    while (*current == '0') {
-      ++current;
-      if (current == end) return SignedZero(negative);
-    }
-  }
-
-  bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
-
-  // Copy significant digits of the integer part (if any) to the buffer.
-  while (*current >= '0' && *current <= '9') {
-    if (significant_digits < kMaxSignificantDigits) {
-      ASSERT(buffer_pos < kBufferSize);
-      buffer[buffer_pos++] = static_cast<char>(*current);
-      significant_digits++;
-      // Will later check if it's an octal in the buffer.
-    } else {
-      insignificant_digits++;  // Move the digit into the exponential part.
-      nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
-    }
-    octal = octal && *current < '8';
-    ++current;
-    if (current == end) goto parsing_done;
-  }
-
-  if (significant_digits == 0) {
-    octal = false;
-  }
-
-  if (*current == '.') {
-    if (octal && !allow_trailing_junk) return JUNK_STRING_VALUE;
-    if (octal) goto parsing_done;
-
-    ++current;
-    if (current == end) {
-      if (significant_digits == 0 && !leading_zero) {
-        return JUNK_STRING_VALUE;
-      } else {
-        goto parsing_done;
-      }
-    }
-
-    if (significant_digits == 0) {
-      // octal = false;
-      // Integer part consists of 0 or is absent. Significant digits start after
-      // leading zeros (if any).
-      while (*current == '0') {
-        ++current;
-        if (current == end) return SignedZero(negative);
-        exponent--;  // Move this 0 into the exponent.
-      }
-    }
-
-    // We don't emit a '.', but adjust the exponent instead.
-    fractional_part = true;
-
-    // There is a fractional part.
-    while (*current >= '0' && *current <= '9') {
-      if (significant_digits < kMaxSignificantDigits) {
-        ASSERT(buffer_pos < kBufferSize);
-        buffer[buffer_pos++] = static_cast<char>(*current);
-        significant_digits++;
-        exponent--;
-      } else {
-        // Ignore insignificant digits in the fractional part.
-        nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
-      }
-      ++current;
-      if (current == end) goto parsing_done;
-    }
-  }
-
-  if (!leading_zero && exponent == 0 && significant_digits == 0) {
-    // If leading_zeros is true then the string contains zeros.
-    // If exponent < 0 then string was [+-]\.0*...
-    // If significant_digits != 0 the string is not equal to 0.
-    // Otherwise there are no digits in the string.
-    return JUNK_STRING_VALUE;
-  }
-
-  // Parse exponential part.
-  if (*current == 'e' || *current == 'E') {
-    if (octal) return JUNK_STRING_VALUE;
-    ++current;
-    if (current == end) {
-      if (allow_trailing_junk) {
-        goto parsing_done;
-      } else {
-        return JUNK_STRING_VALUE;
-      }
-    }
-    char sign = '+';
-    if (*current == '+' || *current == '-') {
-      sign = static_cast<char>(*current);
-      ++current;
-      if (current == end) {
-        if (allow_trailing_junk) {
-          goto parsing_done;
-        } else {
-          return JUNK_STRING_VALUE;
-        }
-      }
-    }
-
-    if (current == end || *current < '0' || *current > '9') {
-      if (allow_trailing_junk) {
-        goto parsing_done;
-      } else {
-        return JUNK_STRING_VALUE;
-      }
-    }
-
-    const int max_exponent = INT_MAX / 2;
-    ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
-    int num = 0;
-    do {
-      // Check overflow.
-      int digit = *current - '0';
-      if (num >= max_exponent / 10
-          && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
-        num = max_exponent;
-      } else {
-        num = num * 10 + digit;
-      }
-      ++current;
-    } while (current != end && *current >= '0' && *current <= '9');
-
-    exponent += (sign == '-' ? -num : num);
-  }
-
-  if (!allow_trailing_junk &&
-      AdvanceToNonspace(unicode_cache, &current, end)) {
-    return JUNK_STRING_VALUE;
-  }
-
-  parsing_done:
-  exponent += insignificant_digits;
-
-  if (octal) {
-    return InternalStringToIntDouble<3>(unicode_cache,
-                                        buffer,
-                                        buffer + buffer_pos,
-                                        negative,
-                                        allow_trailing_junk);
-  }
-
-  if (nonzero_digit_dropped) {
-    buffer[buffer_pos++] = '1';
-    exponent--;
-  }
-
-  ASSERT(buffer_pos < kBufferSize);
-  buffer[buffer_pos] = '\0';
-
-  double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
-  return negative ? -converted : converted;
-}
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
-                      String* str, int flags, double empty_string_val) {
-  StringShape shape(str);
-  if (shape.IsSequentialAscii()) {
-    const char* begin = SeqAsciiString::cast(str)->GetChars();
-    const char* end = begin + str->length();
-    return InternalStringToDouble(unicode_cache, begin, end, flags,
-                                  empty_string_val);
-  } else if (shape.IsSequentialTwoByte()) {
-    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
-    const uc16* end = begin + str->length();
-    return InternalStringToDouble(unicode_cache, begin, end, flags,
-                                  empty_string_val);
-  } else {
-    StringInputBuffer buffer(str);
-    return InternalStringToDouble(unicode_cache,
-                                  StringInputBufferIterator(&buffer),
-                                  StringInputBufferIterator::EndMarker(),
-                                  flags,
-                                  empty_string_val);
-  }
-}
-
-
-double StringToInt(UnicodeCache* unicode_cache,
-                   String* str,
-                   int radix) {
-  StringShape shape(str);
-  if (shape.IsSequentialAscii()) {
-    const char* begin = SeqAsciiString::cast(str)->GetChars();
-    const char* end = begin + str->length();
-    return InternalStringToInt(unicode_cache, begin, end, radix);
-  } else if (shape.IsSequentialTwoByte()) {
-    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
-    const uc16* end = begin + str->length();
-    return InternalStringToInt(unicode_cache, begin, end, radix);
-  } else {
-    StringInputBuffer buffer(str);
-    return InternalStringToInt(unicode_cache,
-                               StringInputBufferIterator(&buffer),
-                               StringInputBufferIterator::EndMarker(),
-                               radix);
-  }
-}
-
 
 double StringToDouble(UnicodeCache* unicode_cache,
                       const char* str, int flags, double empty_string_val) {
@@ -734,6 +55,15 @@
                                 empty_string_val);
 }
 
+double StringToDouble(UnicodeCache* unicode_cache,
+                      Vector<const uc16> str,
+                      int flags,
+                      double empty_string_val) {
+  const uc16* end = str.start() + str.length();
+  return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+                                empty_string_val);
+}
+
 
 const char* DoubleToCString(double v, Vector<char> buffer) {
   switch (fpclassify(v)) {
@@ -741,7 +71,7 @@
     case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
     case FP_ZERO: return "0";
     default: {
-      StringBuilder builder(buffer.start(), buffer.length());
+      SimpleStringBuilder builder(buffer.start(), buffer.length());
       int decimal_point;
       int sign;
       const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -782,7 +112,7 @@
         builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
         int exponent = decimal_point - 1;
         if (exponent < 0) exponent = -exponent;
-        builder.AddFormatted("%d", exponent);
+        builder.AddDecimalInteger(exponent);
       }
     return builder.Finalize();
     }
@@ -860,7 +190,7 @@
 
   unsigned rep_length =
       zero_prefix_length + decimal_rep_length + zero_postfix_length;
-  StringBuilder rep_builder(rep_length + 1);
+  SimpleStringBuilder rep_builder(rep_length + 1);
   rep_builder.AddPadding('0', zero_prefix_length);
   rep_builder.AddString(decimal_rep);
   rep_builder.AddPadding('0', zero_postfix_length);
@@ -869,7 +199,7 @@
   // Create the result string by appending a minus and putting in a
   // decimal point if needed.
   unsigned result_size = decimal_point + f + 2;
-  StringBuilder builder(result_size + 1);
+  SimpleStringBuilder builder(result_size + 1);
   if (negative) builder.AddCharacter('-');
   builder.AddSubstring(rep, decimal_point);
   if (f > 0) {
@@ -895,7 +225,7 @@
   // letter 'e', a minus or a plus depending on the exponent, and a
   // three digit exponent.
   unsigned result_size = significant_digits + 7;
-  StringBuilder builder(result_size + 1);
+  SimpleStringBuilder builder(result_size + 1);
 
   if (negative) builder.AddCharacter('-');
   builder.AddCharacter(decimal_rep[0]);
@@ -908,7 +238,7 @@
 
   builder.AddCharacter('e');
   builder.AddCharacter(negative_exponent ? '-' : '+');
-  builder.AddFormatted("%d", exponent);
+  builder.AddDecimalInteger(exponent);
   return builder.Finalize();
 }
 
@@ -1000,7 +330,7 @@
     unsigned result_size = (decimal_point <= 0)
         ? -decimal_point + p + 3
         : p + 2;
-    StringBuilder builder(result_size + 1);
+    SimpleStringBuilder builder(result_size + 1);
     if (negative) builder.AddCharacter('-');
     if (decimal_point <= 0) {
       builder.AddString("0.");
@@ -1059,7 +389,7 @@
   int integer_pos = kBufferSize - 2;
   do {
     integer_buffer[integer_pos--] =
-        chars[static_cast<int>(modulo(integer_part, radix))];
+        chars[static_cast<int>(fmod(integer_part, radix))];
     integer_part /= radix;
   } while (integer_part >= 1.0);
   // Sanity check.
@@ -1092,31 +422,11 @@
   // If the number has a decimal part, leave room for the period.
   if (decimal_pos > 0) result_size++;
   // Allocate result and fill in the parts.
-  StringBuilder builder(result_size + 1);
+  SimpleStringBuilder builder(result_size + 1);
   builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
   if (decimal_pos > 0) builder.AddCharacter('.');
   builder.AddSubstring(decimal_buffer, decimal_pos);
   return builder.Finalize();
 }
 
-
-static Mutex* dtoa_lock_one = OS::CreateMutex();
-static Mutex* dtoa_lock_zero = OS::CreateMutex();
-
-
 } }  // namespace v8::internal
-
-
-extern "C" {
-void ACQUIRE_DTOA_LOCK(int n) {
-  ASSERT(n == 0 || n == 1);
-  (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
-}
-
-
-void FREE_DTOA_LOCK(int n) {
-  ASSERT(n == 0 || n == 1);
-  (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
-      Unlock();
-}
-}
diff --git a/src/conversions.h b/src/conversions.h
index a14dc9a..e51ad65 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,11 +28,36 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
-#include "scanner-base.h"
+#include <limits>
+
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
+class UnicodeCache;
+
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
+
+
+static inline bool isDigit(int x, int radix) {
+  return (x >= '0' && x <= '9' && x < '0' + radix)
+      || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+      || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static inline double SignedZero(bool negative) {
+  return negative ? -0.0 : 0.0;
+}
+
 
 // The fast double-to-(unsigned-)int conversion routine does not guarantee
 // rounding towards zero.
@@ -87,18 +112,13 @@
 };
 
 
-// Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number);
-static inline uint32_t NumberToUint32(Object* number);
-
-
 // Converts a string into a double value according to ECMA-262 9.3.1
 double StringToDouble(UnicodeCache* unicode_cache,
-                      String* str,
+                      Vector<const char> str,
                       int flags,
                       double empty_string_val = 0);
 double StringToDouble(UnicodeCache* unicode_cache,
-                      Vector<const char> str,
+                      Vector<const uc16> str,
                       int flags,
                       double empty_string_val = 0);
 // This version expects a zero-terminated character array.
@@ -107,8 +127,7 @@
                       int flags,
                       double empty_string_val = 0);
 
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
+const int kDoubleToCStringMinBufferSize = 100;
 
 // Converts a double to a string value according to ECMA-262 9.8.1.
 // The buffer should be large enough for any floating point number.
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index b704417..4982197 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -30,8 +30,7 @@
 
 #include "cpu-profiler.h"
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
+#include <new>
 #include "circular-queue-inl.h"
 #include "profile-generator-inl.h"
 #include "unbound-queue-inl.h"
@@ -52,34 +51,15 @@
 }
 
 
-void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
-  code_map->DeleteCode(start);
-}
-
-
 void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
   code_map->MoveCode(from, to);
 }
 
 
-TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
-  TickSampleEventRecord* result =
-      reinterpret_cast<TickSampleEventRecord*>(value);
-  result->filler = 1;
-  ASSERT(result->filler != SamplingCircularQueue::kClear);
-  // Init the required fields only.
-  result->sample.pc = NULL;
-  result->sample.frames_count = 0;
-  result->sample.has_external_callback = false;
-  return result;
-}
-
-
 TickSample* ProfilerEventsProcessor::TickSampleEvent() {
   generator_->Tick();
   TickSampleEventRecord* evt =
-      TickSampleEventRecord::init(ticks_buffer_.Enqueue());
-  evt->order = enqueue_order_;  // No increment!
+      new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
   return &evt->sample;
 }
 
@@ -96,6 +76,4 @@
 
 } }  // namespace v8::internal
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 #endif  // V8_CPU_PROFILER_INL_H_
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 10a3360..d74c034 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -29,8 +29,6 @@
 
 #include "cpu-profiler-inl.h"
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "frames-inl.h"
 #include "hashmap.h"
 #include "log-inl.h"
@@ -46,9 +44,8 @@
 static const int kTickSamplesBufferChunksCount = 16;
 
 
-ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
-                                                 ProfileGenerator* generator)
-    : Thread(isolate, "v8:ProfEvntProc"),
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+    : Thread("v8:ProfEvntProc"),
       generator_(generator),
       running_(true),
       ticks_buffer_(sizeof(TickSampleEventRecord),
@@ -140,16 +137,6 @@
 }
 
 
-void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
-  CodeEventsContainer evt_rec;
-  CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
-  rec->type = CodeEventRecord::CODE_DELETE;
-  rec->order = ++enqueue_order_;
-  rec->start = from;
-  events_buffer_.Enqueue(evt_rec);
-}
-
-
 void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
                                                           Address to) {
   CodeEventsContainer evt_rec;
@@ -182,20 +169,16 @@
 
 
 void ProfilerEventsProcessor::AddCurrentStack() {
-  TickSampleEventRecord record;
+  TickSampleEventRecord record(enqueue_order_);
   TickSample* sample = &record.sample;
   Isolate* isolate = Isolate::Current();
   sample->state = isolate->current_vm_state();
   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
-  sample->tos = NULL;
-  sample->has_external_callback = false;
-  sample->frames_count = 0;
   for (StackTraceFrameIterator it(isolate);
        !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
        it.Advance()) {
     sample->stack[sample->frames_count++] = it.frame()->pc();
   }
-  record.order = enqueue_order_;
   ticks_from_vm_buffer_.Enqueue(record);
 }
 
@@ -288,14 +271,16 @@
 
 
 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
-  return is_profiling() ?
-      Isolate::Current()->cpu_profiler()->StopCollectingProfile(title) : NULL;
+  Isolate* isolate = Isolate::Current();
+  return is_profiling(isolate) ?
+      isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
 }
 
 
 CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
-  return is_profiling() ?
-      Isolate::Current()->cpu_profiler()->StopCollectingProfile(
+  Isolate* isolate = Isolate::Current();
+  return is_profiling(isolate) ?
+      isolate->cpu_profiler()->StopCollectingProfile(
           security_token, title) : NULL;
 }
 
@@ -336,8 +321,9 @@
 void CpuProfiler::DeleteAllProfiles() {
   Isolate* isolate = Isolate::Current();
   ASSERT(isolate->cpu_profiler() != NULL);
-  if (is_profiling())
+  if (is_profiling(isolate)) {
     isolate->cpu_profiler()->StopProcessor();
+  }
   isolate->cpu_profiler()->ResetProfiles();
 }
 
@@ -429,7 +415,6 @@
 
 
 void CpuProfiler::CodeDeleteEvent(Address from) {
-  Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
 }
 
 
@@ -504,7 +489,7 @@
     saved_logging_nesting_ = isolate->logger()->logging_nesting_;
     isolate->logger()->logging_nesting_ = 0;
     generator_ = new ProfileGenerator(profiles_);
-    processor_ = new ProfilerEventsProcessor(isolate, generator_);
+    processor_ = new ProfilerEventsProcessor(generator_);
     NoBarrier_Store(&is_profiling_, true);
     processor_->Start();
     // Enumerate stuff we already have in the heap.
@@ -566,41 +551,31 @@
     sampler->Stop();
     need_to_stop_sampler_ = false;
   }
+  NoBarrier_Store(&is_profiling_, false);
   processor_->Stop();
   processor_->Join();
   delete processor_;
   delete generator_;
   processor_ = NULL;
-  NoBarrier_Store(&is_profiling_, false);
   generator_ = NULL;
   logger->logging_nesting_ = saved_logging_nesting_;
 }
 
-} }  // namespace v8::internal
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
-namespace v8 {
-namespace internal {
 
 void CpuProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Isolate* isolate = Isolate::Current();
   if (isolate->cpu_profiler() == NULL) {
     isolate->set_cpu_profiler(new CpuProfiler());
   }
-#endif
 }
 
 
 void CpuProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Isolate* isolate = Isolate::Current();
   if (isolate->cpu_profiler() != NULL) {
     delete isolate->cpu_profiler();
   }
   isolate->set_cpu_profiler(NULL);
-#endif
 }
 
 } }  // namespace v8::internal
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index e04cf85..a71c0e0 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -28,8 +28,7 @@
 #ifndef V8_CPU_PROFILER_H_
 #define V8_CPU_PROFILER_H_
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
+#include "allocation.h"
 #include "atomicops.h"
 #include "circular-queue.h"
 #include "unbound-queue.h"
@@ -49,7 +48,6 @@
 #define CODE_EVENTS_TYPE_LIST(V)                                   \
   V(CODE_CREATION,    CodeCreateEventRecord)                       \
   V(CODE_MOVE,        CodeMoveEventRecord)                         \
-  V(CODE_DELETE,      CodeDeleteEventRecord)                       \
   V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
 
 
@@ -88,14 +86,6 @@
 };
 
 
-class CodeDeleteEventRecord : public CodeEventRecord {
- public:
-  Address start;
-
-  INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
 class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
  public:
   Address from;
@@ -105,10 +95,14 @@
 };
 
 
-class TickSampleEventRecord BASE_EMBEDDED {
+class TickSampleEventRecord {
  public:
-  TickSampleEventRecord()
-      : filler(1) {
+  // The parameterless constructor is used when we dequeue data from
+  // the ticks buffer.
+  TickSampleEventRecord() { }
+  explicit TickSampleEventRecord(unsigned order)
+      : filler(1),
+        order(order) {
     ASSERT(filler != SamplingCircularQueue::kClear);
   }
 
@@ -124,8 +118,6 @@
   static TickSampleEventRecord* cast(void* value) {
     return reinterpret_cast<TickSampleEventRecord*>(value);
   }
-
-  INLINE(static TickSampleEventRecord* init(void* value));
 };
 
 
@@ -133,8 +125,7 @@
 // methods called by event producers: VM and stack sampler threads.
 class ProfilerEventsProcessor : public Thread {
  public:
-  explicit ProfilerEventsProcessor(Isolate* isolate,
-                                   ProfileGenerator* generator);
+  explicit ProfilerEventsProcessor(ProfileGenerator* generator);
   virtual ~ProfilerEventsProcessor() {}
 
   // Thread control.
@@ -197,16 +188,13 @@
 } }  // namespace v8::internal
 
 
-#define PROFILE(isolate, Call)                         \
-  LOG(isolate, Call);                                  \
-  do {                                                 \
-    if (v8::internal::CpuProfiler::is_profiling()) {   \
-      v8::internal::CpuProfiler::Call;                 \
-    }                                                  \
+#define PROFILE(isolate, Call)                                \
+  LOG(isolate, Call);                                         \
+  do {                                                        \
+    if (v8::internal::CpuProfiler::is_profiling(isolate)) {   \
+      v8::internal::CpuProfiler::Call;                        \
+    }                                                         \
   } while (false)
-#else
-#define PROFILE(isolate, Call) LOG(isolate, Call)
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 namespace v8 {
@@ -219,7 +207,6 @@
   static void Setup();
   static void TearDown();
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static void StartProfiling(const char* title);
   static void StartProfiling(String* title);
   static CpuProfile* StopProfiling(const char* title);
@@ -261,10 +248,6 @@
 
   // TODO(isolates): this doesn't have to use atomics anymore.
 
-  static INLINE(bool is_profiling()) {
-    return is_profiling(Isolate::Current());
-  }
-
   static INLINE(bool is_profiling(Isolate* isolate)) {
     CpuProfiler* profiler = isolate->cpu_profiler();
     return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
@@ -291,10 +274,6 @@
   bool need_to_stop_sampler_;
   Atomic32 is_profiling_;
 
-#else
-  static INLINE(bool is_profiling()) { return false; }
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
 };
diff --git a/src/cpu.h b/src/cpu.h
index e307302..2525484 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -36,6 +36,8 @@
 #ifndef V8_CPU_H_
 #define V8_CPU_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 3df8693..adefba7 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -159,7 +159,7 @@
 
 
 void RunRemoteDebugger(int port) {
-  RemoteDebugger debugger(i::Isolate::Current(), port);
+  RemoteDebugger debugger(port);
   debugger.Run();
 }
 
@@ -186,11 +186,11 @@
   }
 
   // Start the receiver thread.
-  ReceiverThread receiver(isolate_, this);
+  ReceiverThread receiver(this);
   receiver.Start();
 
   // Start the keyboard thread.
-  KeyboardThread keyboard(isolate_, this);
+  KeyboardThread keyboard(this);
   keyboard.Start();
   PrintPrompt();
 
@@ -221,14 +221,14 @@
 }
 
 
-void RemoteDebugger::MessageReceived(i::SmartPointer<char> message) {
+void RemoteDebugger::MessageReceived(i::SmartArrayPointer<char> message) {
   RemoteDebuggerEvent* event =
       new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
   AddEvent(event);
 }
 
 
-void RemoteDebugger::KeyboardCommand(i::SmartPointer<char> command) {
+void RemoteDebugger::KeyboardCommand(i::SmartArrayPointer<char> command) {
   RemoteDebuggerEvent* event =
       new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
   AddEvent(event);
@@ -238,7 +238,7 @@
 void RemoteDebugger::ConnectionClosed() {
   RemoteDebuggerEvent* event =
       new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
-                              i::SmartPointer<char>());
+                              i::SmartArrayPointer<char>());
   AddEvent(event);
 }
 
@@ -272,6 +272,7 @@
 
 
 void RemoteDebugger::HandleMessageReceived(char* message) {
+  Locker lock;
   HandleScope scope;
 
   // Print the event details.
@@ -300,6 +301,7 @@
 
 
 void RemoteDebugger::HandleKeyboardCommand(char* command) {
+  Locker lock;
   HandleScope scope;
 
   // Convert the debugger command to a JSON debugger request.
@@ -328,14 +330,14 @@
 
 void ReceiverThread::Run() {
   // Receive the connect message (with empty body).
-  i::SmartPointer<char> message =
-    i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
+  i::SmartArrayPointer<char> message =
+      i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
   ASSERT(*message == NULL);
 
   while (true) {
     // Receive a message.
-    i::SmartPointer<char> message =
-      i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
+    i::SmartArrayPointer<char> message =
+        i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
     if (*message == NULL) {
       remote_debugger_->ConnectionClosed();
       return;
@@ -359,7 +361,7 @@
 
     // Pass the keyboard command to the main thread.
     remote_debugger_->KeyboardCommand(
-        i::SmartPointer<char>(i::StrDup(command)));
+        i::SmartArrayPointer<char>(i::StrDup(command)));
   }
 }
 
diff --git a/src/d8-debug.h b/src/d8-debug.h
index ceb9e36..aeff3c1 100644
--- a/src/d8-debug.h
+++ b/src/d8-debug.h
@@ -53,16 +53,16 @@
 // Remote debugging class.
 class RemoteDebugger {
  public:
-  RemoteDebugger(i::Isolate* isolate, int port)
+  explicit RemoteDebugger(int port)
       : port_(port),
         event_access_(i::OS::CreateMutex()),
         event_available_(i::OS::CreateSemaphore(0)),
-        head_(NULL), tail_(NULL), isolate_(isolate) {}
+        head_(NULL), tail_(NULL) {}
   void Run();
 
   // Handle events from the subordinate threads.
-  void MessageReceived(i::SmartPointer<char> message);
-  void KeyboardCommand(i::SmartPointer<char> command);
+  void MessageReceived(i::SmartArrayPointer<char> message);
+  void KeyboardCommand(i::SmartArrayPointer<char> command);
   void ConnectionClosed();
 
  private:
@@ -89,7 +89,6 @@
   i::Semaphore* event_available_;
   RemoteDebuggerEvent* head_;
   RemoteDebuggerEvent* tail_;
-  i::Isolate* isolate_;
 
   friend class ReceiverThread;
 };
@@ -98,8 +97,8 @@
 // Thread reading from debugged V8 instance.
 class ReceiverThread: public i::Thread {
  public:
-  ReceiverThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
-      : Thread(isolate, "d8:ReceiverThrd"),
+  explicit ReceiverThread(RemoteDebugger* remote_debugger)
+      : Thread("d8:ReceiverThrd"),
         remote_debugger_(remote_debugger) {}
   ~ReceiverThread() {}
 
@@ -113,8 +112,8 @@
 // Thread reading keyboard input.
 class KeyboardThread: public i::Thread {
  public:
-  explicit KeyboardThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
-      : Thread(isolate, "d8:KeyboardThrd"),
+  explicit KeyboardThread(RemoteDebugger* remote_debugger)
+      : Thread("d8:KeyboardThrd"),
         remote_debugger_(remote_debugger) {}
   ~KeyboardThread() {}
 
@@ -128,7 +127,7 @@
 // Events processed by the main deubgger thread.
 class RemoteDebuggerEvent {
  public:
-  RemoteDebuggerEvent(int type, i::SmartPointer<char> data)
+  RemoteDebuggerEvent(int type, i::SmartArrayPointer<char> data)
       : type_(type), data_(data), next_(NULL) {
     ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
   }
@@ -145,7 +144,7 @@
   RemoteDebuggerEvent* next() { return next_; }
 
   int type_;
-  i::SmartPointer<char> data_;
+  i::SmartArrayPointer<char> data_;
   RemoteDebuggerEvent* next_;
 
   friend class RemoteDebugger;
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index a7a4049..289c3b0 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -231,6 +231,7 @@
   static const unsigned kMaxArgs = 1000;
   char** arg_array() { return exec_args_; }
   char* arg0() { return exec_args_[0]; }
+
  private:
   char* exec_args_[kMaxArgs + 1];
 };
@@ -311,10 +312,6 @@
                                int read_timeout,
                                int total_timeout) {
   Handle<String> accumulator = String::Empty();
-  const char* source = "(function(a, b) { return a + b; })";
-  Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
-  Handle<Function> cons_function(Function::Cast(*cons_as_obj));
-  Handle<Value> cons_args[2];
 
   int fullness = 0;
   static const int kStdoutReadBufferSize = 4096;
@@ -350,12 +347,7 @@
                    bytes_read + fullness :
                    LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
       Handle<String> addition = String::New(buffer, length);
-      cons_args[0] = accumulator;
-      cons_args[1] = addition;
-      accumulator = Handle<String>::Cast(cons_function->Call(
-          Shell::utility_context()->Global(),
-          2,
-          cons_args));
+      accumulator = String::Concat(accumulator, addition);
       fullness = bytes_read + fullness - length;
       memcpy(buffer, buffer + length, fullness);
     }
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 67fc9ef..71be933 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,8 @@
 #include <readline/readline.h> // NOLINT
 #include <readline/history.h> // NOLINT
 
+// The readline includes leaves RETURN defined which breaks V8 compilation.
+#undef RETURN
 
 #include "d8.h"
 
@@ -47,7 +49,7 @@
 class ReadLineEditor: public LineEditor {
  public:
   ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
-  virtual i::SmartPointer<char> Prompt(const char* prompt);
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
   virtual bool Open();
   virtual bool Close();
   virtual void AddHistory(const char* str);
@@ -70,6 +72,7 @@
   rl_completer_word_break_characters = kWordBreakCharacters;
   rl_bind_key('\t', rl_complete);
   using_history();
+  stifle_history(Shell::kMaxHistoryEntries);
   return read_history(Shell::kHistoryFileName) == 0;
 }
 
@@ -79,13 +82,25 @@
 }
 
 
-i::SmartPointer<char> ReadLineEditor::Prompt(const char* prompt) {
+i::SmartArrayPointer<char> ReadLineEditor::Prompt(const char* prompt) {
   char* result = readline(prompt);
-  return i::SmartPointer<char>(result);
+  return i::SmartArrayPointer<char>(result);
 }
 
 
 void ReadLineEditor::AddHistory(const char* str) {
+  // Do not record empty input.
+  if (strlen(str) == 0) return;
+  // Remove duplicate history entry.
+  history_set_pos(history_length-1);
+  if (current_history()) {
+    do {
+      if (strcmp(current_history()->line, str) == 0) {
+        remove_history(where_history());
+        break;
+      }
+    } while (previous_history());
+  }
   add_history(str);
 }
 
@@ -103,7 +118,7 @@
   static unsigned current_index;
   static Persistent<Array> current_completions;
   if (state == 0) {
-    i::SmartPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
+    i::SmartArrayPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
     HandleScope scope;
     Handle<Array> completions =
       Shell::GetCompletions(String::New(text), String::New(*full_text));
diff --git a/src/d8.cc b/src/d8.cc
index 7de82b7..63a7d15 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,27 +26,51 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-#include <stdlib.h>
-#include <errno.h>
+#ifdef USING_V8_SHARED  // Defined when linking against shared lib on Windows.
+#define V8_SHARED
+#endif
 
-#include "v8.h"
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#ifdef V8_SHARED
+#include <assert.h>
+#include "../include/v8-testing.h"
+#endif  // V8_SHARED
 
 #include "d8.h"
+
+#ifndef V8_SHARED
+#include "api.h"
+#include "checks.h"
 #include "d8-debug.h"
 #include "debug.h"
-#include "api.h"
 #include "natives.h"
 #include "platform.h"
+#include "v8.h"
+#endif  // V8_SHARED
 
+#if !defined(_WIN32) && !defined(_WIN64)
+#include <unistd.h>  // NOLINT
+#endif
+
+#ifndef ASSERT
+#define ASSERT(condition) assert(condition)
+#endif
 
 namespace v8 {
 
 
-const char* Shell::kHistoryFileName = ".d8_history";
-const char* Shell::kPrompt = "d8> ";
-
-
+#ifndef V8_SHARED
 LineEditor *LineEditor::first_ = NULL;
+const char* Shell::kHistoryFileName = ".d8_history";
+const int Shell::kMaxHistoryEntries = 1000;
 
 
 LineEditor::LineEditor(Type type, const char* name)
@@ -72,19 +96,19 @@
 class DumbLineEditor: public LineEditor {
  public:
   DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
-  virtual i::SmartPointer<char> Prompt(const char* prompt);
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
 };
 
 
 static DumbLineEditor dumb_line_editor;
 
 
-i::SmartPointer<char> DumbLineEditor::Prompt(const char* prompt) {
+i::SmartArrayPointer<char> DumbLineEditor::Prompt(const char* prompt) {
   static const int kBufferSize = 256;
   char buffer[kBufferSize];
   printf("%s", prompt);
   char* str = fgets(buffer, kBufferSize, stdin);
-  return i::SmartPointer<char>(str ? i::StrDup(str) : str);
+  return i::SmartArrayPointer<char>(str ? i::StrDup(str) : str);
 }
 
 
@@ -92,15 +116,23 @@
 i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
+i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
 Persistent<Context> Shell::utility_context_;
+LineEditor* Shell::console = NULL;
+#endif  // V8_SHARED
+
 Persistent<Context> Shell::evaluation_context_;
+ShellOptions Shell::options;
+const char* Shell::kPrompt = "d8> ";
 
 
+#ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
   const char* name2 = reinterpret_cast<const char*>(key2);
   return strcmp(name1, name2) == 0;
 }
+#endif  // V8_SHARED
 
 
 // Converts a V8 value to a C string.
@@ -114,16 +146,22 @@
                           Handle<Value> name,
                           bool print_result,
                           bool report_exceptions) {
+#ifndef V8_SHARED
+  bool FLAG_debugger = i::FLAG_debugger;
+#else
+  bool FLAG_debugger = false;
+#endif  // V8_SHARED
   HandleScope handle_scope;
   TryCatch try_catch;
-  if (i::FLAG_debugger) {
+  options.script_executed = true;
+  if (FLAG_debugger) {
     // When debugging make exceptions appear to be uncaught.
     try_catch.SetVerbose(true);
   }
   Handle<Script> script = Script::Compile(source, name);
   if (script.IsEmpty()) {
     // Print errors that happened during compilation.
-    if (report_exceptions && !i::FLAG_debugger)
+    if (report_exceptions && !FLAG_debugger)
       ReportException(&try_catch);
     return false;
   } else {
@@ -131,7 +169,7 @@
     if (result.IsEmpty()) {
       ASSERT(try_catch.HasCaught());
       // Print errors that happened during execution.
-      if (report_exceptions && !i::FLAG_debugger)
+      if (report_exceptions && !FLAG_debugger)
         ReportException(&try_catch);
       return false;
     } else {
@@ -140,8 +178,8 @@
         // If all went well and the result wasn't undefined then print
         // the returned value.
         v8::String::Utf8Value str(result);
-        const char* cstr = ToCString(str);
-        printf("%s\n", cstr);
+        fwrite(*str, sizeof(**str), str.length(), stdout);
+        printf("\n");
       }
       return true;
     }
@@ -152,6 +190,7 @@
 Handle<Value> Shell::Print(const Arguments& args) {
   Handle<Value> val = Write(args);
   printf("\n");
+  fflush(stdout);
   return val;
 }
 
@@ -163,16 +202,28 @@
       printf(" ");
     }
     v8::String::Utf8Value str(args[i]);
-    int n = fwrite(*str, sizeof(**str), str.length(), stdout);
+    int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
     if (n != str.length()) {
       printf("Error in fwrite\n");
-      exit(1);
+      Exit(1);
     }
   }
   return Undefined();
 }
 
 
+Handle<Value> Shell::EnableProfiler(const Arguments& args) {
+  V8::ResumeProfiler();
+  return Undefined();
+}
+
+
+Handle<Value> Shell::DisableProfiler(const Arguments& args) {
+  V8::PauseProfiler();
+  return Undefined();
+}
+
+
 Handle<Value> Shell::Read(const Arguments& args) {
   String::Utf8Value file(args[0]);
   if (*file == NULL) {
@@ -187,15 +238,27 @@
 
 
 Handle<Value> Shell::ReadLine(const Arguments& args) {
-  i::SmartPointer<char> line(i::ReadLine(""));
-  if (*line == NULL) {
-    return Null();
+  static const int kBufferSize = 256;
+  char buffer[kBufferSize];
+  Handle<String> accumulator = String::New("");
+  int length;
+  while (true) {
+    // Continue reading if the line ends with an escape '\\' or the line has
+    // not been fully read into the buffer yet (does not end with '\n').
+    // If fgets gets an error, just give up.
+    if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
+    length = static_cast<int>(strlen(buffer));
+    if (length == 0) {
+      return accumulator;
+    } else if (buffer[length-1] != '\n') {
+      accumulator = String::Concat(accumulator, String::New(buffer, length));
+    } else if (length > 1 && buffer[length-2] == '\\') {
+      buffer[length-2] = '\n';
+      accumulator = String::Concat(accumulator, String::New(buffer, length-1));
+    } else {
+      return String::Concat(accumulator, String::New(buffer, length-1));
+    }
   }
-  size_t len = strlen(*line);
-  if (len > 0 && line[len - 1] == '\n') {
-    --len;
-  }
-  return String::New(*line, len);
 }
 
 
@@ -210,7 +273,7 @@
     if (source.IsEmpty()) {
       return ThrowException(String::New("Error loading file"));
     }
-    if (!ExecuteString(source, String::New(*file), false, false)) {
+    if (!ExecuteString(source, String::New(*file), false, true)) {
       return ThrowException(String::New("Error executing file"));
     }
   }
@@ -218,6 +281,112 @@
 }
 
 
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+                                         ExternalArrayType type,
+                                         size_t element_size) {
+  ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+         element_size == 8);
+  if (args.Length() != 1) {
+    return ThrowException(
+        String::New("Array constructor needs one parameter."));
+  }
+  static const int kMaxLength = 0x3fffffff;
+#ifndef V8_SHARED
+  ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
+#endif  // V8_SHARED
+  size_t length = 0;
+  if (args[0]->IsUint32()) {
+    length = args[0]->Uint32Value();
+  } else {
+    Local<Number> number = args[0]->ToNumber();
+    if (number.IsEmpty() || !number->IsNumber()) {
+      return ThrowException(String::New("Array length must be a number."));
+    }
+    int32_t raw_length = number->ToInt32()->Int32Value();
+    if (raw_length < 0) {
+      return ThrowException(String::New("Array length must not be negative."));
+    }
+    if (raw_length > static_cast<int32_t>(kMaxLength)) {
+      return ThrowException(
+          String::New("Array length exceeds maximum length."));
+    }
+    length = static_cast<size_t>(raw_length);
+  }
+  if (length > static_cast<size_t>(kMaxLength)) {
+    return ThrowException(String::New("Array length exceeds maximum length."));
+  }
+  void* data = calloc(length, element_size);
+  if (data == NULL) {
+    return ThrowException(String::New("Memory allocation failed."));
+  }
+  Handle<Object> array = Object::New();
+  Persistent<Object> persistent_array = Persistent<Object>::New(array);
+  persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
+  persistent_array.MarkIndependent();
+  array->SetIndexedPropertiesToExternalArrayData(data, type,
+                                                 static_cast<int>(length));
+  array->Set(String::New("length"),
+             Int32::New(static_cast<int32_t>(length)), ReadOnly);
+  array->Set(String::New("BYTES_PER_ELEMENT"),
+             Int32::New(static_cast<int32_t>(element_size)));
+  return array;
+}
+
+
+void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
+  free(data);
+  object.Dispose();
+}
+
+
+Handle<Value> Shell::Int8Array(const Arguments& args) {
+  return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
+}
+
+
+Handle<Value> Shell::Uint8Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
+}
+
+
+Handle<Value> Shell::Int16Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
+}
+
+
+Handle<Value> Shell::Uint16Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalUnsignedShortArray,
+                             sizeof(uint16_t));
+}
+
+
+Handle<Value> Shell::Int32Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
+}
+
+
+Handle<Value> Shell::Uint32Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
+}
+
+
+Handle<Value> Shell::Float32Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalFloatArray,
+                             sizeof(float));  // NOLINT
+}
+
+
+Handle<Value> Shell::Float64Array(const Arguments& args) {
+  return CreateExternalArray(args, kExternalDoubleArray,
+                             sizeof(double));  // NOLINT
+}
+
+
+Handle<Value> Shell::PixelArray(const Arguments& args) {
+  return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
+}
+
+
 Handle<Value> Shell::Yield(const Arguments& args) {
   v8::Unlocker unlocker;
   return Undefined();
@@ -226,7 +395,9 @@
 
 Handle<Value> Shell::Quit(const Arguments& args) {
   int exit_code = args[0]->Int32Value();
+#ifndef V8_SHARED
   OnExit();
+#endif  // V8_SHARED
   exit(exit_code);
   return Undefined();
 }
@@ -266,10 +437,17 @@
       printf("^");
     }
     printf("\n");
+    v8::String::Utf8Value stack_trace(try_catch->StackTrace());
+    if (stack_trace.length() > 0) {
+      const char* stack_trace_string = ToCString(stack_trace);
+      printf("%s\n", stack_trace_string);
+    }
   }
+  printf("\n");
 }
 
 
+#ifndef V8_SHARED
 Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
   HandleScope handle_scope;
   Context::Scope context_scope(utility_context_);
@@ -303,9 +481,11 @@
   Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
   return val;
 }
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
+#endif  // V8_SHARED
 
 
+#ifndef V8_SHARED
 int32_t* Counter::Bind(const char* name, bool is_histogram) {
   int i;
   for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -337,13 +517,13 @@
 
 
 void Shell::MapCounters(const char* name) {
-  counters_file_ = i::OS::MemoryMappedFile::create(name,
-    sizeof(CounterCollection), &local_counters_);
+  counters_file_ = i::OS::MemoryMappedFile::create(
+      name, sizeof(CounterCollection), &local_counters_);
   void* memory = (counters_file_ == NULL) ?
       NULL : counters_file_->memory();
   if (memory == NULL) {
     printf("Could not map counters file %s\n", name);
-    exit(1);
+    Exit(1);
   }
   counters_ = static_cast<CounterCollection*>(memory);
   V8::SetCounterFunction(LookupCounter);
@@ -404,56 +584,15 @@
 }
 
 
-void Shell::Initialize() {
-  Shell::counter_map_ = new CounterMap();
-  // Set up counters
-  if (i::StrLength(i::FLAG_map_counters) != 0)
-    MapCounters(i::FLAG_map_counters);
-  if (i::FLAG_dump_counters) {
-    V8::SetCounterFunction(LookupCounter);
-    V8::SetCreateHistogramFunction(CreateHistogram);
-    V8::SetAddHistogramSampleFunction(AddHistogramSample);
-  }
-
-  // Initialize the global objects
+void Shell::InstallUtilityScript() {
+  Locker lock;
   HandleScope scope;
-  Handle<ObjectTemplate> global_template = ObjectTemplate::New();
-  global_template->Set(String::New("print"), FunctionTemplate::New(Print));
-  global_template->Set(String::New("write"), FunctionTemplate::New(Write));
-  global_template->Set(String::New("read"), FunctionTemplate::New(Read));
-  global_template->Set(String::New("readline"),
-                       FunctionTemplate::New(ReadLine));
-  global_template->Set(String::New("load"), FunctionTemplate::New(Load));
-  global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
-  global_template->Set(String::New("version"), FunctionTemplate::New(Version));
-
-#ifdef LIVE_OBJECT_LIST
-  global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
-#else
-  global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
-#endif
-
-  Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
-  AddOSMethods(os_templ);
-  global_template->Set(String::New("os"), os_templ);
-
-  utility_context_ = Context::New(NULL, global_template);
+  // If we use the utility context, we have to set the security tokens so that
+  // utility, evaluation and debug context can all access each other.
   utility_context_->SetSecurityToken(Undefined());
+  evaluation_context_->SetSecurityToken(Undefined());
   Context::Scope utility_scope(utility_context_);
 
-  i::JSArguments js_args = i::FLAG_js_arguments;
-  i::Handle<i::FixedArray> arguments_array =
-      FACTORY->NewFixedArray(js_args.argc());
-  for (int j = 0; j < js_args.argc(); j++) {
-    i::Handle<i::String> arg =
-        FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
-    arguments_array->set(j, *arg);
-  }
-  i::Handle<i::JSArray> arguments_jsarray =
-      FACTORY->NewJSArrayWithElements(arguments_array);
-  global_template->Set(String::New("arguments"),
-                       Utils::ToLocal(arguments_jsarray));
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Install the debugger object in the utility scope
   i::Debug* debug = i::Isolate::Current()->debug();
@@ -462,21 +601,21 @@
       = i::Handle<i::JSObject>(debug->debug_context()->global());
   utility_context_->Global()->Set(String::New("$debug"),
                                   Utils::ToLocal(js_debug));
-#endif
+  debug->debug_context()->set_security_token(HEAP->undefined_value());
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
   // Run the d8 shell utility script in the utility context
   int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
-  i::Vector<const char> shell_source
-      = i::NativesCollection<i::D8>::GetScriptSource(source_index);
-  i::Vector<const char> shell_source_name
-      = i::NativesCollection<i::D8>::GetScriptName(source_index);
+  i::Vector<const char> shell_source =
+      i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
+  i::Vector<const char> shell_source_name =
+      i::NativesCollection<i::D8>::GetScriptName(source_index);
   Handle<String> source = String::New(shell_source.start(),
-                                      shell_source.length());
+      shell_source.length());
   Handle<String> name = String::New(shell_source_name.start(),
-                                    shell_source_name.length());
+      shell_source_name.length());
   Handle<Script> script = Script::Compile(source, name);
   script->Run();
-
   // Mark the d8 shell script as native to avoid it showing up as normal source
   // in the debugger.
   i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
@@ -487,53 +626,225 @@
           i::SharedFunctionInfo::cast(*compiled_script)->script()));
   script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
 
-  // Create the evaluation context
-  evaluation_context_ = Context::New(NULL, global_template);
-  evaluation_context_->SetSecurityToken(Undefined());
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  // Set the security token of the debug context to allow access.
-  debug->debug_context()->set_security_token(HEAP->undefined_value());
-
-  // Start the debugger agent if requested.
-  if (i::FLAG_debugger_agent) {
-    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
-  }
-
   // Start the in-process debugger if requested.
   if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
     v8::Debug::SetDebugEventListener(HandleDebugEvent);
   }
+#endif  // ENABLE_DEBUGGER_SUPPORT
+}
+#endif  // V8_SHARED
+
+
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Decompressor : public v8::StartupDataDecompressor {
+ public:
+  virtual ~BZip2Decompressor() { }
+
+ protected:
+  virtual int DecompressData(char* raw_data,
+                             int* raw_data_size,
+                             const char* compressed_data,
+                             int compressed_data_size) {
+    ASSERT_EQ(v8::StartupData::kBZip2,
+              v8::V8::GetCompressedStartupDataAlgorithm());
+    unsigned int decompressed_size = *raw_data_size;
+    int result =
+        BZ2_bzBuffToBuffDecompress(raw_data,
+                                   &decompressed_size,
+                                   const_cast<char*>(compressed_data),
+                                   compressed_data_size,
+                                   0, 1);
+    if (result == BZ_OK) {
+      *raw_data_size = decompressed_size;
+    }
+    return result;
+  }
+};
 #endif
+
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
+  Handle<ObjectTemplate> global_template = ObjectTemplate::New();
+  global_template->Set(String::New("print"), FunctionTemplate::New(Print));
+  global_template->Set(String::New("write"), FunctionTemplate::New(Write));
+  global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+  global_template->Set(String::New("readline"),
+                       FunctionTemplate::New(ReadLine));
+  global_template->Set(String::New("load"), FunctionTemplate::New(Load));
+  global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
+  global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+  global_template->Set(String::New("enableProfiler"),
+                       FunctionTemplate::New(EnableProfiler));
+  global_template->Set(String::New("disableProfiler"),
+                       FunctionTemplate::New(DisableProfiler));
+
+  // Bind the handlers for external arrays.
+  global_template->Set(String::New("Int8Array"),
+                       FunctionTemplate::New(Int8Array));
+  global_template->Set(String::New("Uint8Array"),
+                       FunctionTemplate::New(Uint8Array));
+  global_template->Set(String::New("Int16Array"),
+                       FunctionTemplate::New(Int16Array));
+  global_template->Set(String::New("Uint16Array"),
+                       FunctionTemplate::New(Uint16Array));
+  global_template->Set(String::New("Int32Array"),
+                       FunctionTemplate::New(Int32Array));
+  global_template->Set(String::New("Uint32Array"),
+                       FunctionTemplate::New(Uint32Array));
+  global_template->Set(String::New("Float32Array"),
+                       FunctionTemplate::New(Float32Array));
+  global_template->Set(String::New("Float64Array"),
+                       FunctionTemplate::New(Float64Array));
+  global_template->Set(String::New("PixelArray"),
+                       FunctionTemplate::New(PixelArray));
+
+#ifdef LIVE_OBJECT_LIST
+  global_template->Set(String::New("lol_is_enabled"), True());
+#else
+  global_template->Set(String::New("lol_is_enabled"), False());
+#endif
+
+#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
+  Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
+  AddOSMethods(os_templ);
+  global_template->Set(String::New("os"), os_templ);
+#endif  // V8_SHARED
+
+  return global_template;
 }
 
 
-void Shell::OnExit() {
+void Shell::Initialize() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  BZip2Decompressor startup_data_decompressor;
+  int bz2_result = startup_data_decompressor.Decompress();
+  if (bz2_result != BZ_OK) {
+    fprintf(stderr, "bzip error code: %d\n", bz2_result);
+    Exit(1);
+  }
+#endif
+
+#ifndef V8_SHARED
+  Shell::counter_map_ = new CounterMap();
+  // Set up counters
+  if (i::StrLength(i::FLAG_map_counters) != 0)
+    MapCounters(i::FLAG_map_counters);
   if (i::FLAG_dump_counters) {
-    ::printf("+----------------------------------------+-------------+\n");
-    ::printf("| Name                                   | Value       |\n");
-    ::printf("+----------------------------------------+-------------+\n");
+    V8::SetCounterFunction(LookupCounter);
+    V8::SetCreateHistogramFunction(CreateHistogram);
+    V8::SetAddHistogramSampleFunction(AddHistogramSample);
+  }
+#endif  // V8_SHARED
+  if (options.test_shell) return;
+
+#ifndef V8_SHARED
+  Locker lock;
+  HandleScope scope;
+  Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+  utility_context_ = Context::New(NULL, global_template);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Start the debugger agent if requested.
+  if (i::FLAG_debugger_agent) {
+    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
+  }
+#endif  // ENABLE_DEBUGGER_SUPPORT
+#endif  // V8_SHARED
+}
+
+
+Persistent<Context> Shell::CreateEvaluationContext() {
+#ifndef V8_SHARED
+  // This needs to be a critical section since this is not thread-safe
+  i::ScopedLock lock(context_mutex_);
+#endif  // V8_SHARED
+  // Initialize the global objects
+  Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+
+  v8::TryCatch try_catch;
+  Persistent<Context> context = Context::New(NULL, global_template);
+  if (context.IsEmpty()) {
+    v8::Local<v8::Value> st = try_catch.StackTrace();
+    ASSERT(!context.IsEmpty());
+  }
+  Context::Scope scope(context);
+
+#ifndef V8_SHARED
+  i::JSArguments js_args = i::FLAG_js_arguments;
+  i::Handle<i::FixedArray> arguments_array =
+      FACTORY->NewFixedArray(js_args.argc());
+  for (int j = 0; j < js_args.argc(); j++) {
+    i::Handle<i::String> arg =
+        FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
+    arguments_array->set(j, *arg);
+  }
+  i::Handle<i::JSArray> arguments_jsarray =
+      FACTORY->NewJSArrayWithElements(arguments_array);
+  context->Global()->Set(String::New("arguments"),
+                         Utils::ToLocal(arguments_jsarray));
+#endif  // V8_SHARED
+  return context;
+}
+
+
+void Shell::Exit(int exit_code) {
+  // Use _exit instead of exit to avoid races between isolate
+  // threads and static destructors.
+  fflush(stdout);
+  fflush(stderr);
+  _exit(exit_code);
+}
+
+
+#ifndef V8_SHARED
+void Shell::OnExit() {
+  if (console != NULL) console->Close();
+  if (i::FLAG_dump_counters) {
+    printf("+----------------------------------------+-------------+\n");
+    printf("| Name                                   | Value       |\n");
+    printf("+----------------------------------------+-------------+\n");
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
       Counter* counter = i.CurrentValue();
       if (counter->is_histogram()) {
-        ::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
-        ::printf("| t:%-36s | %11i |\n",
-                 i.CurrentKey(),
-                 counter->sample_total());
+        printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
+        printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
       } else {
-        ::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+        printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
       }
     }
-    ::printf("+----------------------------------------+-------------+\n");
+    printf("+----------------------------------------+-------------+\n");
   }
   if (counters_file_ != NULL)
     delete counters_file_;
 }
+#endif  // V8_SHARED
+
+
+static FILE* FOpen(const char* path, const char* mode) {
+#if (defined(_WIN32) || defined(_WIN64))
+  FILE* result;
+  if (fopen_s(&result, path, mode) == 0) {
+    return result;
+  } else {
+    return NULL;
+  }
+#else
+  FILE* file = fopen(path, mode);
+  if (file == NULL) return NULL;
+  struct stat file_stat;
+  if (fstat(fileno(file), &file_stat) != 0) return NULL;
+  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+  if (is_regular_file) return file;
+  fclose(file);
+  return NULL;
+#endif
+}
 
 
 static char* ReadChars(const char* name, int* size_out) {
-  v8::Unlocker unlocker;  // Release the V8 lock while reading files.
-  FILE* file = i::OS::FOpen(name, "rb");
+  // Release the V8 lock while reading files.
+  v8::Unlocker unlocker(Isolate::GetCurrent());
+  FILE* file = FOpen(name, "rb");
   if (file == NULL) return NULL;
 
   fseek(file, 0, SEEK_END);
@@ -543,7 +854,7 @@
   char* chars = new char[size + 1];
   chars[size] = '\0';
   for (int i = 0; i < size;) {
-    int read = fread(&chars[i], 1, size - i, file);
+    int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
     i += read;
   }
   fclose(file);
@@ -552,6 +863,7 @@
 }
 
 
+#ifndef V8_SHARED
 static char* ReadToken(char* data, char token) {
   char* next = i::OS::StrChr(data, token);
   if (next != NULL) {
@@ -571,6 +883,7 @@
 static char* ReadWord(char* data) {
   return ReadToken(data, ' ');
 }
+#endif  // V8_SHARED
 
 
 // Reads a file into a v8 string.
@@ -585,61 +898,60 @@
 
 
 void Shell::RunShell() {
-  LineEditor* editor = LineEditor::Get();
-  printf("V8 version %s [console: %s]\n", V8::GetVersion(), editor->name());
+  Locker locker;
+  Context::Scope context_scope(evaluation_context_);
+  HandleScope outer_scope;
+  Handle<String> name = String::New("(d8)");
+#ifndef V8_SHARED
+  console = LineEditor::Get();
+  printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
   if (i::FLAG_debugger) {
     printf("JavaScript debugger enabled\n");
   }
-  editor->Open();
+  console->Open();
   while (true) {
-    Locker locker;
-    HandleScope handle_scope;
-    Context::Scope context_scope(evaluation_context_);
-    i::SmartPointer<char> input = editor->Prompt(Shell::kPrompt);
-    if (input.is_empty())
-      break;
-    editor->AddHistory(*input);
-    Handle<String> name = String::New("(d8)");
+    i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
+    if (input.is_empty()) break;
+    console->AddHistory(*input);
+    HandleScope inner_scope;
     ExecuteString(String::New(*input), name, true, true);
   }
-  editor->Close();
+#else
+  printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
+  static const int kBufferSize = 256;
+  while (true) {
+    char buffer[kBufferSize];
+    printf("%s", Shell::kPrompt);
+    if (fgets(buffer, kBufferSize, stdin) == NULL) break;
+    HandleScope inner_scope;
+    ExecuteString(String::New(buffer), name, true, true);
+  }
+#endif  // V8_SHARED
   printf("\n");
 }
 
 
+#ifndef V8_SHARED
 class ShellThread : public i::Thread {
  public:
-  ShellThread(i::Isolate* isolate, int no, i::Vector<const char> files)
-    : Thread(isolate, "d8:ShellThread"),
-      no_(no), files_(files) { }
+  // Takes ownership of the underlying char array of |files|.
+  ShellThread(int no, char* files)
+      : Thread("d8:ShellThread"),
+        no_(no), files_(files) { }
+
+  ~ShellThread() {
+    delete[] files_;
+  }
+
   virtual void Run();
  private:
   int no_;
-  i::Vector<const char> files_;
+  char* files_;
 };
 
 
 void ShellThread::Run() {
-  // Prepare the context for this thread.
-  Locker locker;
-  HandleScope scope;
-  Handle<ObjectTemplate> global_template = ObjectTemplate::New();
-  global_template->Set(String::New("print"),
-                       FunctionTemplate::New(Shell::Print));
-  global_template->Set(String::New("write"),
-                       FunctionTemplate::New(Shell::Write));
-  global_template->Set(String::New("read"),
-                       FunctionTemplate::New(Shell::Read));
-  global_template->Set(String::New("readline"),
-                       FunctionTemplate::New(Shell::ReadLine));
-  global_template->Set(String::New("load"),
-                       FunctionTemplate::New(Shell::Load));
-  global_template->Set(String::New("yield"),
-                       FunctionTemplate::New(Shell::Yield));
-  global_template->Set(String::New("version"),
-                       FunctionTemplate::New(Shell::Version));
-
-  char* ptr = const_cast<char*>(files_.start());
+  char* ptr = files_;
   while ((ptr != NULL) && (*ptr != '\0')) {
     // For each newline-separated line.
     char* next_line = ReadLine(ptr);
@@ -650,23 +962,26 @@
       continue;
     }
 
-    Persistent<Context> thread_context = Context::New(NULL, global_template);
-    thread_context->SetSecurityToken(Undefined());
+    // Prepare the context for this thread.
+    Locker locker;
+    HandleScope outer_scope;
+    Persistent<Context> thread_context = Shell::CreateEvaluationContext();
     Context::Scope context_scope(thread_context);
 
     while ((ptr != NULL) && (*ptr != '\0')) {
+      HandleScope inner_scope;
       char* filename = ptr;
       ptr = ReadWord(ptr);
 
       // Skip empty strings.
       if (strlen(filename) == 0) {
-        break;
+        continue;
       }
 
       Handle<String> str = Shell::ReadFile(filename);
       if (str.IsEmpty()) {
-        printf("WARNING: %s not found\n", filename);
-        break;
+        printf("File '%s' not found\n", filename);
+        Shell::Exit(1);
       }
 
       Shell::ExecuteString(str, String::New(filename), false, false);
@@ -676,121 +991,371 @@
     ptr = next_line;
   }
 }
+#endif  // V8_SHARED
 
 
-int Shell::Main(int argc, char* argv[]) {
-  i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
-  if (i::FLAG_help) {
-    return 1;
-  }
-  Initialize();
-  bool run_shell = (argc == 1);
+SourceGroup::~SourceGroup() {
+#ifndef V8_SHARED
+  delete next_semaphore_;
+  next_semaphore_ = NULL;
+  delete done_semaphore_;
+  done_semaphore_ = NULL;
+  delete thread_;
+  thread_ = NULL;
+#endif  // V8_SHARED
+}
 
-  // Default use preemption if threads are created.
-  bool use_preemption = true;
 
-  // Default to use lowest possible thread preemption interval to test as many
-  // edgecases as possible.
-  int preemption_interval = 1;
-
-  i::List<i::Thread*> threads(1);
-
-  {
-    // Acquire the V8 lock once initialization has finished. Since the thread
-    // below may spawn new threads accessing V8 holding the V8 lock here is
-    // mandatory.
-    Locker locker;
-    Context::Scope context_scope(evaluation_context_);
-    for (int i = 1; i < argc; i++) {
-      char* str = argv[i];
-      if (strcmp(str, "--shell") == 0) {
-        run_shell = true;
-      } else if (strcmp(str, "--preemption") == 0) {
-        use_preemption = true;
-      } else if (strcmp(str, "--no-preemption") == 0) {
-        use_preemption = false;
-      } else if (strcmp(str, "--preemption-interval") == 0) {
-        if (i + 1 < argc) {
-          char* end = NULL;
-          preemption_interval = strtol(argv[++i], &end, 10);  // NOLINT
-          if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
-            printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
-            return 1;
-          }
-        } else {
-          printf("Missing value for --preemption-interval\n");
-          return 1;
-       }
-      } else if (strcmp(str, "-f") == 0) {
-        // Ignore any -f flags for compatibility with other stand-alone
-        // JavaScript engines.
-        continue;
-      } else if (strncmp(str, "--", 2) == 0) {
-        printf("Warning: unknown flag %s.\nTry --help for options\n", str);
-      } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
-        // Execute argument given to -e option directly.
-        v8::HandleScope handle_scope;
-        v8::Handle<v8::String> file_name = v8::String::New("unnamed");
-        v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
-        if (!ExecuteString(source, file_name, false, true)) {
-          OnExit();
-          return 1;
-        }
-        i++;
-      } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
-        int size = 0;
-        const char* files = ReadChars(argv[++i], &size);
-        if (files == NULL) return 1;
-        ShellThread* thread =
-            new ShellThread(i::Isolate::Current(),
-                            threads.length(),
-                            i::Vector<const char>(files, size));
-        thread->Start();
-        threads.Add(thread);
-      } else {
-        // Use all other arguments as names of files to load and run.
-        HandleScope handle_scope;
-        Handle<String> file_name = v8::String::New(str);
-        Handle<String> source = ReadFile(str);
-        if (source.IsEmpty()) {
-          printf("Error reading '%s'\n", str);
-          return 1;
-        }
-        if (!ExecuteString(source, file_name, false, true)) {
-          OnExit();
-          return 1;
-        }
+void SourceGroup::Execute() {
+  for (int i = begin_offset_; i < end_offset_; ++i) {
+    const char* arg = argv_[i];
+    if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
+      // Execute argument given to -e option directly.
+      HandleScope handle_scope;
+      Handle<String> file_name = String::New("unnamed");
+      Handle<String> source = String::New(argv_[i + 1]);
+      if (!Shell::ExecuteString(source, file_name, false, true)) {
+        Shell::Exit(1);
+      }
+      ++i;
+    } else if (arg[0] == '-') {
+      // Ignore other options. They have been parsed already.
+    } else {
+      // Use all other arguments as names of files to load and run.
+      HandleScope handle_scope;
+      Handle<String> file_name = String::New(arg);
+      Handle<String> source = ReadFile(arg);
+      if (source.IsEmpty()) {
+        printf("Error reading '%s'\n", arg);
+        Shell::Exit(1);
+      }
+      if (!Shell::ExecuteString(source, file_name, false, true)) {
+        Shell::Exit(1);
       }
     }
-
-    // Start preemption if threads have been created and preemption is enabled.
-    if (threads.length() > 0 && use_preemption) {
-      Locker::StartPreemption(preemption_interval);
-    }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-    // Run the remote debugger if requested.
-    if (i::FLAG_remote_debugger) {
-      RunRemoteDebugger(i::FLAG_debugger_port);
-      return 0;
-    }
-#endif
   }
-  if (run_shell)
-    RunShell();
+}
+
+
+Handle<String> SourceGroup::ReadFile(const char* name) {
+  int size;
+  const char* chars = ReadChars(name, &size);
+  if (chars == NULL) return Handle<String>();
+  Handle<String> result = String::New(chars, size);
+  delete[] chars;
+  return result;
+}
+
+
+#ifndef V8_SHARED
+i::Thread::Options SourceGroup::GetThreadOptions() {
+  i::Thread::Options options;
+  options.name = "IsolateThread";
+  // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
+  // which is not enough to parse the big literal expressions used in tests.
+  // The stack size should be at least StackGuard::kLimitSize + some
+  // OS-specific padding for thread startup code.
+  options.stack_size = 2 << 20;  // 2 Mb seems to be enough
+  return options;
+}
+
+
+void SourceGroup::ExecuteInThread() {
+  Isolate* isolate = Isolate::New();
+  do {
+    if (next_semaphore_ != NULL) next_semaphore_->Wait();
+    {
+      Isolate::Scope iscope(isolate);
+      Locker lock(isolate);
+      HandleScope scope;
+      Persistent<Context> context = Shell::CreateEvaluationContext();
+      {
+        Context::Scope cscope(context);
+        Execute();
+      }
+      context.Dispose();
+    }
+    if (done_semaphore_ != NULL) done_semaphore_->Signal();
+  } while (!Shell::options.last_run);
+  isolate->Dispose();
+}
+
+
+void SourceGroup::StartExecuteInThread() {
+  if (thread_ == NULL) {
+    thread_ = new IsolateThread(this);
+    thread_->Start();
+  }
+  next_semaphore_->Signal();
+}
+
+
+void SourceGroup::WaitForThread() {
+  if (thread_ == NULL) return;
+  if (Shell::options.last_run) {
+    thread_->Join();
+  } else {
+    done_semaphore_->Wait();
+  }
+}
+#endif  // V8_SHARED
+
+
+bool Shell::SetOptions(int argc, char* argv[]) {
+  for (int i = 0; i < argc; i++) {
+    if (strcmp(argv[i], "--stress-opt") == 0) {
+      options.stress_opt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--stress-deopt") == 0) {
+      options.stress_deopt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+      // No support for stressing if we can't use --always-opt.
+      options.stress_opt = false;
+      options.stress_deopt = false;
+    } else if (strcmp(argv[i], "--shell") == 0) {
+      options.interactive_shell = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--test") == 0) {
+      options.test_shell = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--preemption") == 0) {
+#ifdef V8_SHARED
+      printf("D8 with shared library does not support multi-threading\n");
+      return false;
+#else
+      options.use_preemption = true;
+      argv[i] = NULL;
+#endif  // V8_SHARED
+    } else if (strcmp(argv[i], "--no-preemption") == 0) {
+#ifdef V8_SHARED
+      printf("D8 with shared library does not support multi-threading\n");
+      return false;
+#else
+      options.use_preemption = false;
+      argv[i] = NULL;
+#endif  // V8_SHARED
+    } else if (strcmp(argv[i], "--preemption-interval") == 0) {
+#ifdef V8_SHARED
+      printf("D8 with shared library does not support multi-threading\n");
+      return false;
+#else
+      if (++i < argc) {
+        argv[i-1] = NULL;
+        char* end = NULL;
+        options.preemption_interval = strtol(argv[i], &end, 10);  // NOLINT
+        if (options.preemption_interval <= 0
+            || *end != '\0'
+            || errno == ERANGE) {
+          printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
+          return false;
+        }
+        argv[i] = NULL;
+      } else {
+        printf("Missing value for --preemption-interval\n");
+        return false;
+      }
+#endif  // V8_SHARED
+    } else if (strcmp(argv[i], "-f") == 0) {
+      // Ignore any -f flags for compatibility with other stand-alone
+      // JavaScript engines.
+      continue;
+    } else if (strcmp(argv[i], "--isolate") == 0) {
+#ifdef V8_SHARED
+      printf("D8 with shared library does not support multi-threading\n");
+      return false;
+#endif  // V8_SHARED
+      options.num_isolates++;
+    } else if (strcmp(argv[i], "-p") == 0) {
+#ifdef V8_SHARED
+      printf("D8 with shared library does not support multi-threading\n");
+      return false;
+#else
+      options.num_parallel_files++;
+#endif  // V8_SHARED
+    }
+#ifdef V8_SHARED
+    else if (strcmp(argv[i], "--dump-counters") == 0) {
+      printf("D8 with shared library does not include counters\n");
+      return false;
+    } else if (strcmp(argv[i], "--debugger") == 0) {
+      printf("Javascript debugger not included\n");
+      return false;
+    }
+#endif  // V8_SHARED
+  }
+
+#ifndef V8_SHARED
+  // Run parallel threads if we are not using --isolate
+  options.parallel_files = new char*[options.num_parallel_files];
+  int parallel_files_set = 0;
+  for (int i = 1; i < argc; i++) {
+    if (argv[i] == NULL) continue;
+    if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
+      if (options.num_isolates > 1) {
+        printf("-p is not compatible with --isolate\n");
+        return false;
+      }
+      argv[i] = NULL;
+      i++;
+      options.parallel_files[parallel_files_set] = argv[i];
+      parallel_files_set++;
+      argv[i] = NULL;
+    }
+  }
+  if (parallel_files_set != options.num_parallel_files) {
+    printf("-p requires a file containing a list of files as parameter\n");
+    return false;
+  }
+#endif  // V8_SHARED
+
+  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+
+  // Set up isolated source groups.
+  options.isolate_sources = new SourceGroup[options.num_isolates];
+  SourceGroup* current = options.isolate_sources;
+  current->Begin(argv, 1);
+  for (int i = 1; i < argc; i++) {
+    const char* str = argv[i];
+    if (strcmp(str, "--isolate") == 0) {
+      current->End(i);
+      current++;
+      current->Begin(argv, i + 1);
+    } else if (strncmp(argv[i], "--", 2) == 0) {
+      printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
+    }
+  }
+  current->End(argc);
+
+  return true;
+}
+
+
+int Shell::RunMain(int argc, char* argv[]) {
+#ifndef V8_SHARED
+  i::List<i::Thread*> threads(1);
+  if (options.parallel_files != NULL) {
+    for (int i = 0; i < options.num_parallel_files; i++) {
+      char* files = NULL;
+      { Locker lock(Isolate::GetCurrent());
+        int size = 0;
+        files = ReadChars(options.parallel_files[i], &size);
+      }
+      if (files == NULL) {
+        printf("File list '%s' not found\n", options.parallel_files[i]);
+        Exit(1);
+      }
+      ShellThread* thread = new ShellThread(threads.length(), files);
+      thread->Start();
+      threads.Add(thread);
+    }
+  }
+  for (int i = 1; i < options.num_isolates; ++i) {
+    options.isolate_sources[i].StartExecuteInThread();
+  }
+#endif  // V8_SHARED
+  {  // NOLINT
+    Locker lock;
+    HandleScope scope;
+    Persistent<Context> context = CreateEvaluationContext();
+    {
+      Context::Scope cscope(context);
+      options.isolate_sources[0].Execute();
+    }
+    if (options.last_run) {
+      // Keep using the same context in the interactive shell
+      evaluation_context_ = context;
+    } else {
+      context.Dispose();
+    }
+
+#ifndef V8_SHARED
+    // Start preemption if threads have been created and preemption is enabled.
+    if (threads.length() > 0
+        && options.use_preemption) {
+      Locker::StartPreemption(options.preemption_interval);
+    }
+#endif  // V8_SHARED
+  }
+
+#ifndef V8_SHARED
+  for (int i = 1; i < options.num_isolates; ++i) {
+    options.isolate_sources[i].WaitForThread();
+  }
+
   for (int i = 0; i < threads.length(); i++) {
     i::Thread* thread = threads[i];
     thread->Join();
     delete thread;
   }
-  OnExit();
+
+  if (threads.length() > 0 && options.use_preemption) {
+    Locker lock;
+    Locker::StopPreemption();
+  }
+#endif  // V8_SHARED
   return 0;
 }
 
 
+int Shell::Main(int argc, char* argv[]) {
+  if (!SetOptions(argc, argv)) return 1;
+  Initialize();
+
+  int result = 0;
+  if (options.stress_opt || options.stress_deopt) {
+    Testing::SetStressRunType(
+        options.stress_opt ? Testing::kStressTypeOpt
+                           : Testing::kStressTypeDeopt);
+    int stress_runs = Testing::GetStressRuns();
+    for (int i = 0; i < stress_runs && result == 0; i++) {
+      printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+      Testing::PrepareStressRun(i);
+      options.last_run = (i == stress_runs - 1);
+      result = RunMain(argc, argv);
+    }
+    printf("======== Full Deoptimization =======\n");
+    Testing::DeoptimizeAll();
+  } else {
+    result = RunMain(argc, argv);
+  }
+
+
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+  // Run remote debugger if requested, but never on --test
+  if (i::FLAG_remote_debugger && !options.test_shell) {
+    InstallUtilityScript();
+    RunRemoteDebugger(i::FLAG_debugger_port);
+    return 0;
+  }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+
+  // Run interactive shell if explicitly requested or if no script has been
+  // executed, but never on --test
+
+  if (( options.interactive_shell
+      || !options.script_executed )
+      && !options.test_shell ) {
+#ifndef V8_SHARED
+    InstallUtilityScript();
+#endif  // V8_SHARED
+    RunShell();
+  }
+
+  V8::Dispose();
+
+#ifndef V8_SHARED
+  OnExit();
+#endif  // V8_SHARED
+
+  return result;
+}
+
 }  // namespace v8
 
 
+#ifndef GOOGLE3
 int main(int argc, char* argv[]) {
   return v8::Shell::Main(argc, argv);
 }
+#endif
diff --git a/src/d8.gyp b/src/d8.gyp
index 901fd65..70186cf 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -26,43 +26,71 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../build/common.gypi'],
+  'variables': {
+    'console%': '',
+  },
   'targets': [
     {
       'target_name': 'd8',
       'type': 'executable',
       'dependencies': [
-        'd8_js2c#host',
         '../tools/gyp/v8.gyp:v8',
       ],
+      # Generated source files need this explicitly:
       'include_dirs+': [
         '../src',
       ],
       'defines': [
-        'ENABLE_LOGGING_AND_PROFILING',
         'ENABLE_DEBUGGER_SUPPORT',
-        'ENABLE_VMSTATE_TRACKING',
-        'V8_FAST_TLS',
       ],
       'sources': [
         'd8.cc',
-        'd8-debug.cc',
-        '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
       ],
       'conditions': [
-        [ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
-          'sources': [ 'd8-posix.cc', ]
+        [ 'component!="shared_library"', {
+          'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
+          'conditions': [
+            [ 'want_separate_host_toolset==1', {
+              'dependencies': [
+                'd8_js2c#host',
+              ],
+            }, {
+              'dependencies': [
+                'd8_js2c',
+              ],
+            }],
+            [ 'console=="readline"', {
+              'libraries': [ '-lreadline', ],
+              'sources': [ 'd8-readline.cc' ],
+            }],
+            [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
+              or OS=="openbsd" or OS=="solaris")', {
+              'sources': [ 'd8-posix.cc', ]
+            }],
+            [ 'OS=="win"', {
+              'sources': [ 'd8-windows.cc', ]
+            }],
+          ],
         }],
       ],
     },
     {
       'target_name': 'd8_js2c',
       'type': 'none',
-      'toolsets': ['host'],
       'variables': {
         'js_files': [
           'd8.js',
+          'macros.py',
         ],
       },
+      'conditions': [
+        [ 'want_separate_host_toolset==1', {
+          'toolsets': ['host'],
+        }, {
+          'toolsets': ['target'],
+        }]
+      ],
       'actions': [
         {
           'action_name': 'd8_js2c',
@@ -72,13 +100,13 @@
           ],
           'outputs': [
             '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
-            '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
           ],
           'action': [
             'python',
             '../tools/js2c.py',
             '<@(_outputs)',
             'D8',
+            'off',  # compress startup data
             '<@(js_files)'
           ],
         },
diff --git a/src/d8.h b/src/d8.h
index de1fe0d..15d8d5d 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,16 +28,19 @@
 #ifndef V8_D8_H_
 #define V8_D8_H_
 
-#include "v8.h"
+#ifndef V8_SHARED
+#include "allocation.h"
 #include "hashmap.h"
-
+#include "smart-array-pointer.h"
+#include "v8.h"
+#else
+#include "../include/v8.h"
+#endif  // V8_SHARED
 
 namespace v8 {
 
 
-namespace i = v8::internal;
-
-
+#ifndef V8_SHARED
 // A single counter in a counter collection.
 class Counter {
  public:
@@ -104,14 +107,143 @@
     i::HashMap* map_;
     i::HashMap::Entry* entry_;
   };
+
  private:
   static int Hash(const char* name);
   static bool Match(void* key1, void* key2);
   i::HashMap hash_map_;
 };
+#endif  // V8_SHARED
 
 
-class Shell: public i::AllStatic {
+#ifndef V8_SHARED
+class LineEditor {
+ public:
+  enum Type { DUMB = 0, READLINE = 1 };
+  LineEditor(Type type, const char* name);
+  virtual ~LineEditor() { }
+
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt) = 0;
+  virtual bool Open() { return true; }
+  virtual bool Close() { return true; }
+  virtual void AddHistory(const char* str) { }
+
+  const char* name() { return name_; }
+  static LineEditor* Get();
+ private:
+  Type type_;
+  const char* name_;
+  LineEditor* next_;
+  static LineEditor* first_;
+};
+#endif  // V8_SHARED
+
+
+class SourceGroup {
+ public:
+  SourceGroup() :
+#ifndef V8_SHARED
+      next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+      done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+      thread_(NULL),
+#endif  // V8_SHARED
+      argv_(NULL),
+      begin_offset_(0),
+      end_offset_(0) {}
+
+  ~SourceGroup();
+
+  void Begin(char** argv, int offset) {
+    argv_ = const_cast<const char**>(argv);
+    begin_offset_ = offset;
+  }
+
+  void End(int offset) { end_offset_ = offset; }
+
+  void Execute();
+
+#ifndef V8_SHARED
+  void StartExecuteInThread();
+  void WaitForThread();
+
+ private:
+  class IsolateThread : public i::Thread {
+   public:
+    explicit IsolateThread(SourceGroup* group)
+        : i::Thread(GetThreadOptions()), group_(group) {}
+
+    virtual void Run() {
+      group_->ExecuteInThread();
+    }
+
+   private:
+    SourceGroup* group_;
+  };
+
+  static i::Thread::Options GetThreadOptions();
+  void ExecuteInThread();
+
+  i::Semaphore* next_semaphore_;
+  i::Semaphore* done_semaphore_;
+  i::Thread* thread_;
+#endif  // V8_SHARED
+
+  void ExitShell(int exit_code);
+  Handle<String> ReadFile(const char* name);
+
+  const char** argv_;
+  int begin_offset_;
+  int end_offset_;
+};
+
+
+class ShellOptions {
+ public:
+  ShellOptions() :
+#ifndef V8_SHARED
+     use_preemption(true),
+     preemption_interval(10),
+     num_parallel_files(0),
+     parallel_files(NULL),
+#endif  // V8_SHARED
+     script_executed(false),
+     last_run(true),
+     stress_opt(false),
+     stress_deopt(false),
+     interactive_shell(false),
+     test_shell(false),
+     num_isolates(1),
+     isolate_sources(NULL) { }
+
+  ~ShellOptions() {
+#ifndef V8_SHARED
+    delete[] parallel_files;
+#endif  // V8_SHARED
+    delete[] isolate_sources;
+  }
+
+#ifndef V8_SHARED
+  bool use_preemption;
+  int preemption_interval;
+  int num_parallel_files;
+  char** parallel_files;
+#endif  // V8_SHARED
+  bool script_executed;
+  bool last_run;
+  bool stress_opt;
+  bool stress_deopt;
+  bool interactive_shell;
+  bool test_shell;
+  int num_isolates;
+  SourceGroup* isolate_sources;
+};
+
+#ifdef V8_SHARED
+class Shell {
+#else
+class Shell : public i::AllStatic {
+#endif  // V8_SHARED
+
  public:
   static bool ExecuteString(Handle<String> source,
                             Handle<Value> name,
@@ -119,7 +251,15 @@
                             bool report_exceptions);
   static const char* ToCString(const v8::String::Utf8Value& value);
   static void ReportException(TryCatch* try_catch);
-  static void Initialize();
+  static Handle<String> ReadFile(const char* name);
+  static Persistent<Context> CreateEvaluationContext();
+  static int RunMain(int argc, char* argv[]);
+  static int Main(int argc, char* argv[]);
+  static void Exit(int exit_code);
+
+#ifndef V8_SHARED
+  static Handle<Array> GetCompletions(Handle<String> text,
+                                      Handle<String> full);
   static void OnExit();
   static int* LookupCounter(const char* name);
   static void* CreateHistogram(const char* name,
@@ -128,11 +268,8 @@
                                size_t buckets);
   static void AddHistogramSample(void* histogram, int sample);
   static void MapCounters(const char* name);
-  static Handle<String> ReadFile(const char* name);
-  static void RunShell();
-  static int Main(int argc, char* argv[]);
-  static Handle<Array> GetCompletions(Handle<String> text,
-                                      Handle<String> full);
+#endif  // V8_SHARED
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Handle<Object> DebugMessageDetails(Handle<String> message);
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
@@ -147,9 +284,20 @@
   static Handle<Value> Yield(const Arguments& args);
   static Handle<Value> Quit(const Arguments& args);
   static Handle<Value> Version(const Arguments& args);
+  static Handle<Value> EnableProfiler(const Arguments& args);
+  static Handle<Value> DisableProfiler(const Arguments& args);
   static Handle<Value> Read(const Arguments& args);
   static Handle<Value> ReadLine(const Arguments& args);
   static Handle<Value> Load(const Arguments& args);
+  static Handle<Value> Int8Array(const Arguments& args);
+  static Handle<Value> Uint8Array(const Arguments& args);
+  static Handle<Value> Int16Array(const Arguments& args);
+  static Handle<Value> Uint16Array(const Arguments& args);
+  static Handle<Value> Int32Array(const Arguments& args);
+  static Handle<Value> Uint32Array(const Arguments& args);
+  static Handle<Value> Float32Array(const Arguments& args);
+  static Handle<Value> Float64Array(const Arguments& args);
+  static Handle<Value> PixelArray(const Arguments& args);
   // The OS object on the global object contains methods for performing
   // operating system calls:
   //
@@ -186,42 +334,37 @@
   static Handle<Value> RemoveDirectory(const Arguments& args);
 
   static void AddOSMethods(Handle<ObjectTemplate> os_template);
-
-  static Handle<Context> utility_context() { return utility_context_; }
-
+#ifndef V8_SHARED
   static const char* kHistoryFileName;
+  static const int kMaxHistoryEntries;
+  static LineEditor* console;
+#endif  // V8_SHARED
   static const char* kPrompt;
+  static ShellOptions options;
+
  private:
-  static Persistent<Context> utility_context_;
   static Persistent<Context> evaluation_context_;
+#ifndef V8_SHARED
+  static Persistent<Context> utility_context_;
   static CounterMap* counter_map_;
   // We statically allocate a set of local counters to be used if we
   // don't want to store the stats in a memory-mapped file
   static CounterCollection local_counters_;
   static CounterCollection* counters_;
   static i::OS::MemoryMappedFile* counters_file_;
+  static i::Mutex* context_mutex_;
+
   static Counter* GetCounter(const char* name, bool is_histogram);
-};
-
-
-class LineEditor {
- public:
-  enum Type { DUMB = 0, READLINE = 1 };
-  LineEditor(Type type, const char* name);
-  virtual ~LineEditor() { }
-
-  virtual i::SmartPointer<char> Prompt(const char* prompt) = 0;
-  virtual bool Open() { return true; }
-  virtual bool Close() { return true; }
-  virtual void AddHistory(const char* str) { }
-
-  const char* name() { return name_; }
-  static LineEditor* Get();
- private:
-  Type type_;
-  const char* name_;
-  LineEditor* next_;
-  static LineEditor* first_;
+  static void InstallUtilityScript();
+#endif  // V8_SHARED
+  static void Initialize();
+  static void RunShell();
+  static bool SetOptions(int argc, char* argv[]);
+  static Handle<ObjectTemplate> CreateGlobalTemplate();
+  static Handle<Value> CreateExternalArray(const Arguments& args,
+                                           ExternalArrayType type,
+                                           size_t element_size);
+  static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
 };
 
 
diff --git a/src/d8.js b/src/d8.js
index 9798078..3009037 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -103,7 +103,8 @@
                     Local: 1,
                     With: 2,
                     Closure: 3,
-                    Catch: 4 };
+                    Catch: 4,
+                    Block: 5 };
 
 
 // Current debug state.
@@ -391,14 +392,14 @@
           this.frameCommandToJSONRequest_('' +
                                           (Debug.State.currentFrame + 1));
       break;
-      
+
     case 'down':
     case 'do':
       this.request_ =
           this.frameCommandToJSONRequest_('' +
                                           (Debug.State.currentFrame - 1));
       break;
-      
+
     case 'set':
     case 'print':
     case 'p':
@@ -977,9 +978,14 @@
     // specification it is considered a function break point.
     pos = target.indexOf(':');
     if (pos > 0) {
-      type = 'script';
       var tmp = target.substring(pos + 1, target.length);
       target = target.substring(0, pos);
+      if (target[0] == '/' && target[target.length - 1] == '/') {
+        type = 'scriptRegExp';
+        target = target.substring(1, target.length - 1);
+      } else {
+        type = 'script';
+      }
 
       // Check for both line and column.
       pos = tmp.indexOf(':');
@@ -1066,7 +1072,7 @@
         arg2 = 'uncaught';
       }
       excType = arg2;
-      
+
     // Check for:
     //   en[able] [all|unc[aught]] exc[eptions]
     //   dis[able] [all|unc[aught]] exc[eptions]
@@ -1125,7 +1131,7 @@
         request.arguments.ignoreCount = parseInt(otherArgs);
         break;
       default:
-        throw new Error('Invalid arguments.');  
+        throw new Error('Invalid arguments.');
     }
   } else {
     throw new Error('Invalid arguments.');
@@ -1246,7 +1252,7 @@
       start_index = parseInt(args[i]);
       // The user input start index starts at 1:
       if (start_index <= 0) {
-        throw new Error('Invalid index ' + args[i] + '.');                
+        throw new Error('Invalid index ' + args[i] + '.');
       }
       start_index -= 1;
       is_verbose = true;
@@ -1780,7 +1786,7 @@
 
 
 function decodeLolListResponse(body, title) {
-  
+
   var result;
   var total_count = body.count;
   var total_size = body.size;
@@ -1984,6 +1990,9 @@
           if (breakpoint.script_name) {
               result += ' script_name=' + breakpoint.script_name;
           }
+          if (breakpoint.script_regexp) {
+              result += ' script_regexp=' + breakpoint.script_regexp;
+          }
           result += ' line=' + (breakpoint.line + 1);
           if (breakpoint.column != null) {
             result += ' column=' + (breakpoint.column + 1);
@@ -2012,7 +2021,7 @@
         } else if (body.breakOnUncaughtExceptions) {
           result += '* breaking on UNCAUGHT exceptions is enabled\n';
         } else {
-          result += '* all exception breakpoints are disabled\n';            
+          result += '* all exception breakpoints are disabled\n';
         }
         details.text = result;
         break;
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 79339ed..6a3b05c 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -63,477 +63,4 @@
   current_value_ = val >> 1;
 }
 
-
-bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
-  Scope* scope = info->scope();
-  int size = scope->num_parameters() + scope->num_stack_slots();
-  if (size == 0) return true;
-  AssignedVariablesAnalyzer analyzer(info, size);
-  return analyzer.Analyze();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
-                                                     int size)
-    : info_(info), av_(size) {
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze() {
-  ASSERT(av_.length() > 0);
-  VisitStatements(info_->function()->body());
-  return !HasStackOverflow();
-}
-
-
-Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
-  // The loop must have all necessary parts.
-  if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
-    return NULL;
-  }
-  // The initialization statement has to be a simple assignment.
-  Assignment* init = stmt->init()->StatementAsSimpleAssignment();
-  if (init == NULL) return NULL;
-
-  // We only deal with local variables.
-  Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
-  if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
-
-  // Don't try to get clever with const or dynamic variables.
-  if (loop_var->mode() != Variable::VAR) return NULL;
-
-  // The initial value has to be a smi.
-  Literal* init_lit = init->value()->AsLiteral();
-  if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
-  int init_value = Smi::cast(*init_lit->handle())->value();
-
-  // The condition must be a compare of variable with <, <=, >, or >=.
-  CompareOperation* cond = stmt->cond()->AsCompareOperation();
-  if (cond == NULL) return NULL;
-  if (cond->op() != Token::LT
-      && cond->op() != Token::LTE
-      && cond->op() != Token::GT
-      && cond->op() != Token::GTE) return NULL;
-
-  // The lhs must be the same variable as in the init expression.
-  if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
-
-  // The rhs must be a smi.
-  Literal* term_lit = cond->right()->AsLiteral();
-  if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
-  int term_value = Smi::cast(*term_lit->handle())->value();
-
-  // The count operation updates the same variable as in the init expression.
-  CountOperation* update = stmt->next()->StatementAsCountOperation();
-  if (update == NULL) return NULL;
-  if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
-    return NULL;
-  }
-
-  // The direction of the count operation must agree with the start and the end
-  // value. We currently do not allow the initial value to be the same as the
-  // terminal value. This _would_ be ok as long as the loop body never executes
-  // or executes exactly one time.
-  if (init_value == term_value) return NULL;
-  if (init_value < term_value && update->op() != Token::INC) return NULL;
-  if (init_value > term_value && update->op() != Token::DEC) return NULL;
-
-  // Check that the update operation cannot overflow the smi range. This can
-  // occur in the two cases where the loop bound is equal to the largest or
-  // smallest smi.
-  if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
-  if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
-
-  // Found a smi loop variable.
-  return loop_var;
-}
-
-int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
-  ASSERT(var != NULL);
-  ASSERT(var->IsStackAllocated());
-  Slot* slot = var->AsSlot();
-  if (slot->type() == Slot::PARAMETER) {
-    return slot->index();
-  } else {
-    return info_->scope()->num_parameters() + slot->index();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
-  ASSERT(var != NULL);
-  if (var->IsStackAllocated()) {
-    av_.Add(BitIndex(var));
-  }
-}
-
-
-void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
-  Variable* var = expr->AsVariableProxy()->AsVariable();
-  if (var != NULL &&
-      var->IsStackAllocated() &&
-      !var->is_arguments() &&
-      var->mode() != Variable::CONST &&
-      (var->is_this() || !av_.Contains(BitIndex(var)))) {
-    expr->AsVariableProxy()->MarkAsTrivial();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(expr);
-  av_.Union(saved_av);
-}
-
-void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
-}
-
-
-void AssignedVariablesAnalyzer::VisitExpressionStatement(
-    ExpressionStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
-  // Do nothing.
-}
-
-
-void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
-  ProcessExpression(stmt->condition());
-  Visit(stmt->then_statement());
-  Visit(stmt->else_statement());
-}
-
-
-void AssignedVariablesAnalyzer::VisitContinueStatement(
-    ContinueStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithEnterStatement(
-    WithEnterStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithExitStatement(
-    WithExitStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
-  BitVector result(av_);
-  av_.Clear();
-  Visit(stmt->tag());
-  result.Union(av_);
-  for (int i = 0; i < stmt->cases()->length(); i++) {
-    CaseClause* clause = stmt->cases()->at(i);
-    if (!clause->is_default()) {
-      av_.Clear();
-      Visit(clause->label());
-      result.Union(av_);
-    }
-    VisitStatements(clause->statements());
-  }
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
-  if (stmt->init() != NULL) Visit(stmt->init());
-  if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
-  if (stmt->next() != NULL) Visit(stmt->next());
-
-  // Process loop body. After visiting the loop body av_ contains
-  // the assigned variables of the loop body.
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(stmt->body());
-
-  Variable* var = FindSmiLoopVariable(stmt);
-  if (var != NULL && !av_.Contains(BitIndex(var))) {
-    stmt->set_loop_variable(var);
-  }
-  av_.Union(saved_av);
-}
-
-
-void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
-  ProcessExpression(stmt->each());
-  ProcessExpression(stmt->enumerable());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryCatchStatement(
-    TryCatchStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->catch_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
-    TryFinallyStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->finally_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDebuggerStatement(
-    DebuggerStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
-  ASSERT(av_.IsEmpty());
-
-  Visit(expr->condition());
-
-  BitVector result(av_);
-  av_.Clear();
-  Visit(expr->then_expression());
-  result.Union(av_);
-
-  av_.Clear();
-  Visit(expr->else_expression());
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    Visit(expr->properties()->at(i)->value());
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->values()->length(); i++) {
-    Visit(expr->values()->at(i));
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
-    CatchExtensionObject* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->key());
-  ProcessExpression(expr->value());
-}
-
-
-void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
-  ASSERT(av_.IsEmpty());
-
-  // There are three kinds of assignments: variable assignments, property
-  // assignments, and reference errors (invalid left-hand sides).
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  Property* prop = expr->target()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-
-  if (var != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (expr->is_compound()) {
-      // Left-hand side occurs also as an rvalue.
-      MarkIfTrivial(expr->target());
-      ProcessExpression(expr->target());
-    }
-    RecordAssignedVar(var);
-
-  } else if (prop != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (!prop->key()->IsPropertyName()) {
-      MarkIfTrivial(prop->key());
-      ProcessExpression(prop->key());
-    }
-    MarkIfTrivial(prop->obj());
-    ProcessExpression(prop->obj());
-
-  } else {
-    Visit(expr->target());
-  }
-}
-
-
-void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->exception());
-}
-
-
-void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
-  ASSERT(av_.IsEmpty());
-  if (!expr->key()->IsPropertyName()) {
-    MarkIfTrivial(expr->key());
-    Visit(expr->key());
-  }
-  MarkIfTrivial(expr->obj());
-  ProcessExpression(expr->obj());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  if (expr->is_prefix()) MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-
-  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-  if (var != NULL) RecordAssignedVar(var);
-}
-
-
-void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
-  UNREACHABLE();
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 573d7d8..d69d6c7 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "compiler.h"
 #include "zone-inl.h"
@@ -37,9 +38,6 @@
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class Node;
-
 class BitVector: public ZoneObject {
  public:
   // Iterator for the elements of this BitVector.
@@ -201,178 +199,6 @@
   uint32_t* data_;
 };
 
-
-// An implementation of a sparse set whose elements are drawn from integers
-// in the range [0..universe_size[.  It supports constant-time Contains,
-// destructive Add, and destructuve Remove operations and linear-time (in
-// the number of elements) destructive Union.
-class SparseSet: public ZoneObject {
- public:
-  // Iterator for sparse set elements.  Elements should not be added or
-  // removed during iteration.
-  class Iterator BASE_EMBEDDED {
-   public:
-    explicit Iterator(SparseSet* target) : target_(target), current_(0) {
-      ASSERT(++target->iterator_count_ > 0);
-    }
-    ~Iterator() {
-      ASSERT(target_->iterator_count_-- > 0);
-    }
-    bool Done() const { return current_ >= target_->dense_.length(); }
-    void Advance() {
-      ASSERT(!Done());
-      ++current_;
-    }
-    int Current() {
-      ASSERT(!Done());
-      return target_->dense_[current_];
-    }
-
-   private:
-    SparseSet* target_;
-    int current_;
-
-    friend class SparseSet;
-  };
-
-  explicit SparseSet(int universe_size)
-      : dense_(4),
-        sparse_(ZONE->NewArray<int>(universe_size)) {
-#ifdef DEBUG
-    size_ = universe_size;
-    iterator_count_ = 0;
-#endif
-  }
-
-  bool Contains(int n) const {
-    ASSERT(0 <= n && n < size_);
-    int dense_index = sparse_[n];
-    return (0 <= dense_index) &&
-        (dense_index < dense_.length()) &&
-        (dense_[dense_index] == n);
-  }
-
-  void Add(int n) {
-    ASSERT(0 <= n && n < size_);
-    ASSERT(iterator_count_ == 0);
-    if (!Contains(n)) {
-      sparse_[n] = dense_.length();
-      dense_.Add(n);
-    }
-  }
-
-  void Remove(int n) {
-    ASSERT(0 <= n && n < size_);
-    ASSERT(iterator_count_ == 0);
-    if (Contains(n)) {
-      int dense_index = sparse_[n];
-      int last = dense_.RemoveLast();
-      if (dense_index < dense_.length()) {
-        dense_[dense_index] = last;
-        sparse_[last] = dense_index;
-      }
-    }
-  }
-
-  void Union(const SparseSet& other) {
-    for (int i = 0; i < other.dense_.length(); ++i) {
-      Add(other.dense_[i]);
-    }
-  }
-
- private:
-  // The set is implemented as a pair of a growable dense list and an
-  // uninitialized sparse array.
-  ZoneList<int> dense_;
-  int* sparse_;
-#ifdef DEBUG
-  int size_;
-  int iterator_count_;
-#endif
-};
-
-
-// Simple fixed-capacity list-based worklist (managed as a queue) of
-// pointers to T.
-template<typename T>
-class WorkList BASE_EMBEDDED {
- public:
-  // The worklist cannot grow bigger than size.  We keep one item empty to
-  // distinguish between empty and full.
-  explicit WorkList(int size)
-      : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
-    for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
-  }
-
-  bool is_empty() { return head_ == tail_; }
-
-  bool is_full() {
-    // The worklist is full if head is at 0 and tail is at capacity - 1:
-    //   head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
-    // or if tail is immediately to the left of head:
-    //   tail+1 == head  ==> tail - head == -1
-    int diff = tail_ - head_;
-    return (diff == -1 || diff == capacity_ - 1);
-  }
-
-  void Insert(T* item) {
-    ASSERT(!is_full());
-    queue_[tail_++] = item;
-    if (tail_ == capacity_) tail_ = 0;
-  }
-
-  T* Remove() {
-    ASSERT(!is_empty());
-    T* item = queue_[head_++];
-    if (head_ == capacity_) head_ = 0;
-    return item;
-  }
-
- private:
-  int capacity_;  // Including one empty slot.
-  int head_;      // Where the first item is.
-  int tail_;      // Where the next inserted item will go.
-  List<T*> queue_;
-};
-
-
-// Computes the set of assigned variables and annotates variables proxies
-// that are trivial sub-expressions and for-loops where the loop variable
-// is guaranteed to be a smi.
-class AssignedVariablesAnalyzer : public AstVisitor {
- public:
-  static bool Analyze(CompilationInfo* info);
-
- private:
-  AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
-  bool Analyze();
-
-  Variable* FindSmiLoopVariable(ForStatement* stmt);
-
-  int BitIndex(Variable* var);
-
-  void RecordAssignedVar(Variable* var);
-
-  void MarkIfTrivial(Expression* expr);
-
-  // Visits an expression saving the accumulator before, clearing
-  // it before visting and restoring it after visiting.
-  void ProcessExpression(Expression* expr);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
-  CompilationInfo* info_;
-
-  // Accumulator for assigned variables set.
-  BitVector av_;
-
-  DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
-};
-
-
 } }  // namespace v8::internal
 
 
diff --git a/src/date.js b/src/date.js
index 242ab7b..ccefce5 100644
--- a/src/date.js
+++ b/src/date.js
@@ -684,7 +684,7 @@
 
 // ECMA 262 - 15.9.5.16
 function DateGetDay() {
-  var t = %_ValueOf(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
   return WeekDay(LocalTimeNoCheck(t));
 }
@@ -692,7 +692,7 @@
 
 // ECMA 262 - 15.9.5.17
 function DateGetUTCDay() {
-  var t = %_ValueOf(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
   return WeekDay(t);
 }
@@ -981,11 +981,22 @@
 function DateToISOString() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return this.getUTCFullYear() + 
+  var year = this.getUTCFullYear();
+  var year_string;
+  if (year >= 0 && year <= 9999) {
+    year_string = PadInt(year, 4);
+  } else {
+    if (year < 0) {
+      year_string = "-" + PadInt(-year, 6);
+    } else {
+      year_string = "+" + PadInt(year, 6);
+    }
+  }
+  return year_string +
       '-' + PadInt(this.getUTCMonth() + 1, 2) +
-      '-' + PadInt(this.getUTCDate(), 2) + 
+      '-' + PadInt(this.getUTCDate(), 2) +
       'T' + PadInt(this.getUTCHours(), 2) +
-      ':' + PadInt(this.getUTCMinutes(), 2) + 
+      ':' + PadInt(this.getUTCMinutes(), 2) +
       ':' + PadInt(this.getUTCSeconds(), 2) +
       '.' + PadInt(this.getUTCMilliseconds(), 3) +
       'Z';
@@ -995,8 +1006,8 @@
 function DateToJSON(key) {
   var o = ToObject(this);
   var tv = DefaultNumber(o);
-  if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) { 
-    return null; 
+  if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
+    return null;
   }
   return o.toISOString();
 }
@@ -1037,18 +1048,19 @@
 
 // -------------------------------------------------------------------
 
-function SetupDate() {
-  // Setup non-enumerable properties of the Date object itself.
+function SetUpDate() {
+  %CheckIsBootstrapping();
+  // Set up non-enumerable properties of the Date object itself.
   InstallFunctions($Date, DONT_ENUM, $Array(
     "UTC", DateUTC,
     "parse", DateParse,
     "now", DateNow
   ));
 
-  // Setup non-enumerable constructor property of the Date prototype object.
+  // Set up non-enumerable constructor property of the Date prototype object.
   %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
 
-  // Setup non-enumerable functions of the Date prototype object and
+  // Set up non-enumerable functions of the Date prototype object and
   // set their names.
   InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
     "toString", DateToString,
@@ -1100,4 +1112,4 @@
   ));
 }
 
-SetupDate();
+SetUpDate();
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 7f8fac8..32f0f9e 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -39,16 +39,71 @@
                        UnicodeCache* unicode_cache) {
   ASSERT(out->length() >= OUTPUT_SIZE);
   InputReader<Char> in(unicode_cache, str);
+  DateStringTokenizer<Char> scanner(&in);
   TimeZoneComposer tz;
   TimeComposer time;
   DayComposer day;
 
-  while (!in.IsEnd()) {
-    if (in.IsAsciiDigit()) {
-      // Parse a number (possibly with 1 or 2 trailing colons).
-      int n = in.ReadUnsignedNumber();
-      if (in.Skip(':')) {
-        if (in.Skip(':')) {
+  // Specification:
+  // Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
+  // with Safari.
+  // ES5 ISO 8601 dates:
+  //   [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
+  //   where yyyy is in the range 0000..9999 and
+  //         +/-yyyyyy is in the range -999999..+999999 -
+  //           but -000000 is invalid (year zero must be positive),
+  //         MM is in the range 01..12,
+  //         DD is in the range 01..31,
+  //         MM and DD defaults to 01 if missing,,
+  //         HH is generally in the range 00..23, but can be 24 if mm, ss
+  //           and sss are zero (or missing), representing midnight at the
+  //           end of a day,
+  //         mm and ss are in the range 00..59,
+  //         sss is in the range 000..999,
+  //         hh is in the range 00..23,
+  //         mm, ss, and sss default to 00 if missing, and
+  //         timezone defaults to Z if missing.
+  //  Extensions:
+  //   We also allow sss to have more or less than three digits (but at
+  //   least one).
+  //   We allow hh:mm to be specified as hhmm.
+  // Legacy dates:
+  //  Any unrecognized word before the first number is ignored.
+  //  Parenthesized text is ignored.
+  //  An unsigned number followed by ':' is a time value, and is
+  //  added to the TimeComposer. A number followed by '::' adds a second
+  //  zero as well. A number followed by '.' is also a time and must be
+  //  followed by milliseconds.
+  //  Any other number is a date component and is added to DayComposer.
+  //  A month name (or really: any word having the same first three letters
+  //  as a month name) is recorded as a named month in the Day composer.
+  //  A word recognizable as a time-zone is recorded as such, as is
+  //  '(+|-)(hhmm|hh:)'.
+  //  Legacy dates don't allow extra signs ('+' or '-') or umatched ')'
+  //  after a number has been read (before the first number, any garbage
+  //  is allowed).
+  // Intersection of the two:
+  //  A string that matches both formats (e.g. 1970-01-01) will be
+  //  parsed as an ES5 date-time string - which means it will default
+  //  to UTC time-zone. That's unavoidable if following the ES5
+  //  specification.
+  //  After a valid "T" has been read while scanning an ES5 datetime string,
+  //  the input can no longer be a valid legacy date, since the "T" is a
+  //  garbage string after a number has been read.
+
+  // First try getting as far as possible with as ES5 Date Time String.
+  DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
+  if (next_unhandled_token.IsInvalid()) return false;
+  bool has_read_number = !day.IsEmpty();
+  // If there's anything left, continue with the legacy parser.
+  for (DateToken token = next_unhandled_token;
+       !token.IsEndOfInput();
+       token = scanner.Next()) {
+    if (token.IsNumber()) {
+      has_read_number = true;
+      int n = token.number();
+      if (scanner.SkipSymbol(':')) {
+        if (scanner.SkipSymbol(':')) {
           // n + "::"
           if (!time.IsEmpty()) return false;
           time.Add(n);
@@ -56,12 +111,13 @@
         } else {
           // n + ":"
           if (!time.Add(n)) return false;
-          in.Skip('.');
+          if (scanner.Peek().IsSymbol('.')) scanner.Next();
         }
-      } else if (in.Skip('.') && time.IsExpecting(n)) {
+      } else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
         time.Add(n);
-        if (!in.IsAsciiDigit()) return false;
-        int n = in.ReadMilliseconds();
+        if (!scanner.Peek().IsNumber()) return false;
+        int n = ReadMilliseconds(scanner.Next());
+        if (n < 0) return false;
         time.AddFinal(n);
       } else if (tz.IsExpecting(n)) {
         tz.SetAbsoluteMinute(n);
@@ -69,59 +125,206 @@
         time.AddFinal(n);
         // Require end, white space, "Z", "+" or "-" immediately after
         // finalizing time.
-        if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
-            !in.IsAsciiSign()) return false;
+        DateToken peek = scanner.Peek();
+        if (!peek.IsEndOfInput() &&
+            !peek.IsWhiteSpace() &&
+            !peek.IsKeywordZ() &&
+            !peek.IsAsciiSign()) return false;
       } else {
         if (!day.Add(n)) return false;
-        in.Skip('-');  // Ignore suffix '-' for year, month, or day.
-        // Skip trailing 'T' for ECMAScript 5 date string format but make
-        // sure that it is followed by a digit (for the time).
-        if (in.Skip('T') && !in.IsAsciiDigit()) return false;
+        scanner.SkipSymbol('-');
       }
-    } else if (in.IsAsciiAlphaOrAbove()) {
+    } else if (token.IsKeyword()) {
       // Parse a "word" (sequence of chars. >= 'A').
-      uint32_t pre[KeywordTable::kPrefixLength];
-      int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
-      int index = KeywordTable::Lookup(pre, len);
-      KeywordType type = KeywordTable::GetType(index);
-
+      KeywordType type = token.keyword_type();
+      int value = token.keyword_value();
       if (type == AM_PM && !time.IsEmpty()) {
-        time.SetHourOffset(KeywordTable::GetValue(index));
+        time.SetHourOffset(value);
       } else if (type == MONTH_NAME) {
-        day.SetNamedMonth(KeywordTable::GetValue(index));
-        in.Skip('-');  // Ignore suffix '-' for month names
-      } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
-        tz.Set(KeywordTable::GetValue(index));
+        day.SetNamedMonth(value);
+        scanner.SkipSymbol('-');
+      } else if (type == TIME_ZONE_NAME && has_read_number) {
+        tz.Set(value);
       } else {
         // Garbage words are illegal if a number has been read.
-        if (in.HasReadNumber()) return false;
+        if (has_read_number) return false;
       }
-    } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+    } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
       // Parse UTC offset (only after UTC or time).
-      tz.SetSign(in.GetAsciiSignValue());
-      in.Next();
-      int n = in.ReadUnsignedNumber();
-      if (in.Skip(':')) {
+      tz.SetSign(token.ascii_sign());
+      // The following number may be empty.
+      int n = 0;
+      if (scanner.Peek().IsNumber()) {
+        n = scanner.Next().number();
+      }
+      has_read_number = true;
+
+      if (scanner.Peek().IsSymbol(':')) {
         tz.SetAbsoluteHour(n);
         tz.SetAbsoluteMinute(kNone);
       } else {
         tz.SetAbsoluteHour(n / 100);
         tz.SetAbsoluteMinute(n % 100);
       }
-    } else if (in.Is('(')) {
-      // Ignore anything from '(' to a matching ')' or end of string.
-      in.SkipParentheses();
-    } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
+    } else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
+               has_read_number) {
       // Extra sign or ')' is illegal if a number has been read.
       return false;
     } else {
-      // Ignore other characters.
-      in.Next();
+      // Ignore other characters and whitespace.
     }
   }
+
   return day.Write(out) && time.Write(out) && tz.Write(out);
 }
 
+
+template<typename CharType>
+DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
+  int pre_pos = in_->position();
+  if (in_->IsEnd()) return DateToken::EndOfInput();
+  if (in_->IsAsciiDigit()) {
+    int n = in_->ReadUnsignedNumeral();
+    int length = in_->position() - pre_pos;
+    return DateToken::Number(n, length);
+  }
+  if (in_->Skip(':')) return DateToken::Symbol(':');
+  if (in_->Skip('-')) return DateToken::Symbol('-');
+  if (in_->Skip('+')) return DateToken::Symbol('+');
+  if (in_->Skip('.')) return DateToken::Symbol('.');
+  if (in_->Skip(')')) return DateToken::Symbol(')');
+  if (in_->IsAsciiAlphaOrAbove()) {
+    ASSERT(KeywordTable::kPrefixLength == 3);
+    uint32_t buffer[3] = {0, 0, 0};
+    int length = in_->ReadWord(buffer, 3);
+    int index = KeywordTable::Lookup(buffer, length);
+    return DateToken::Keyword(KeywordTable::GetType(index),
+                              KeywordTable::GetValue(index),
+                              length);
+  }
+  if (in_->SkipWhiteSpace()) {
+    return DateToken::WhiteSpace(in_->position() - pre_pos);
+  }
+  if (in_->SkipParentheses()) {
+    return DateToken::Unknown();
+  }
+  in_->Next();
+  return DateToken::Unknown();
+}
+
+
+template <typename Char>
+DateParser::DateToken DateParser::ParseES5DateTime(
+    DateStringTokenizer<Char>* scanner,
+    DayComposer* day,
+    TimeComposer* time,
+    TimeZoneComposer* tz) {
+  ASSERT(day->IsEmpty());
+  ASSERT(time->IsEmpty());
+  ASSERT(tz->IsEmpty());
+
+  // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
+  if (scanner->Peek().IsAsciiSign()) {
+    // Keep the sign token, so we can pass it back to the legacy
+    // parser if we don't use it.
+    DateToken sign_token = scanner->Next();
+    if (!scanner->Peek().IsFixedLengthNumber(6)) return sign_token;
+    int sign = sign_token.ascii_sign();
+    int year = scanner->Next().number();
+    if (sign < 0 && year == 0) return sign_token;
+    day->Add(sign * year);
+  } else if (scanner->Peek().IsFixedLengthNumber(4)) {
+    day->Add(scanner->Next().number());
+  } else {
+    return scanner->Next();
+  }
+  if (scanner->SkipSymbol('-')) {
+    if (!scanner->Peek().IsFixedLengthNumber(2) ||
+        !DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
+    day->Add(scanner->Next().number());
+    if (scanner->SkipSymbol('-')) {
+      if (!scanner->Peek().IsFixedLengthNumber(2) ||
+          !DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
+      day->Add(scanner->Next().number());
+    }
+  }
+  // Check for optional time string: 'T'HH':'mm[':'ss['.'sss]]Z
+  if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
+    if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
+  } else {
+    // ES5 Date Time String time part is present.
+    scanner->Next();
+    if (!scanner->Peek().IsFixedLengthNumber(2) ||
+        !Between(scanner->Peek().number(), 0, 24)) {
+      return DateToken::Invalid();
+    }
+    // Allow 24:00[:00[.000]], but no other time starting with 24.
+    bool hour_is_24 = (scanner->Peek().number() == 24);
+    time->Add(scanner->Next().number());
+    if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+    if (!scanner->Peek().IsFixedLengthNumber(2) ||
+        !TimeComposer::IsMinute(scanner->Peek().number()) ||
+        (hour_is_24 && scanner->Peek().number() > 0)) {
+      return DateToken::Invalid();
+    }
+    time->Add(scanner->Next().number());
+    if (scanner->SkipSymbol(':')) {
+      if (!scanner->Peek().IsFixedLengthNumber(2) ||
+          !TimeComposer::IsSecond(scanner->Peek().number()) ||
+          (hour_is_24 && scanner->Peek().number() > 0)) {
+        return DateToken::Invalid();
+      }
+      time->Add(scanner->Next().number());
+      if (scanner->SkipSymbol('.')) {
+        if (!scanner->Peek().IsNumber() ||
+            (hour_is_24 && scanner->Peek().number() > 0)) {
+          return DateToken::Invalid();
+        }
+        // Allow more or less than the mandated three digits.
+        time->Add(ReadMilliseconds(scanner->Next()));
+      }
+    }
+    // Check for optional timezone designation: 'Z' | ('+'|'-')hh':'mm
+    if (scanner->Peek().IsKeywordZ()) {
+      scanner->Next();
+      tz->Set(0);
+    } else if (scanner->Peek().IsSymbol('+') ||
+               scanner->Peek().IsSymbol('-')) {
+      tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
+      if (scanner->Peek().IsFixedLengthNumber(4)) {
+        // hhmm extension syntax.
+        int hourmin = scanner->Next().number();
+        int hour = hourmin / 100;
+        int min = hourmin % 100;
+        if (!TimeComposer::IsHour(hour) || !TimeComposer::IsMinute(min)) {
+          return DateToken::Invalid();
+        }
+        tz->SetAbsoluteHour(hour);
+        tz->SetAbsoluteMinute(min);
+      } else {
+        // hh:mm standard syntax.
+        if (!scanner->Peek().IsFixedLengthNumber(2) ||
+            !TimeComposer::IsHour(scanner->Peek().number())) {
+          return DateToken::Invalid();
+        }
+        tz->SetAbsoluteHour(scanner->Next().number());
+        if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+        if (!scanner->Peek().IsFixedLengthNumber(2) ||
+            !TimeComposer::IsMinute(scanner->Peek().number())) {
+          return DateToken::Invalid();
+        }
+        tz->SetAbsoluteMinute(scanner->Next().number());
+      }
+    }
+    if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
+  }
+  // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
+  if (tz->IsEmpty()) tz->Set(0);
+  day->set_iso_date();
+  return DateToken::EndOfInput();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_DATEPARSER_INL_H_
diff --git a/src/dateparser.cc b/src/dateparser.cc
index 6d80488..4a0721f 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,7 @@
   int day = kNone;
 
   if (named_month_ == kNone) {
-    if (index_ == 3 && !IsDay(comp_[0])) {
+    if (is_iso_date_ || (index_ == 3 && !IsDay(comp_[0]))) {
       // YMD
       year = comp_[0];
       month = comp_[1];
@@ -71,8 +71,10 @@
     }
   }
 
-  if (Between(year, 0, 49)) year += 2000;
-  else if (Between(year, 50, 99)) year += 1900;
+  if (!is_iso_date_) {
+    if (Between(year, 0, 49)) year += 2000;
+    else if (Between(year, 50, 99)) year += 1900;
+  }
 
   if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
 
@@ -151,6 +153,7 @@
   {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
   {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
   {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+  {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
   {'\0', '\0', '\0', DateParser::INVALID, 0},
 };
 
@@ -175,4 +178,35 @@
 }
 
 
+int DateParser::ReadMilliseconds(DateToken token) {
+  // Read first three significant digits of the original numeral,
+  // as inferred from the value and the number of digits.
+  // I.e., use the number of digits to see if there were
+  // leading zeros.
+  int number = token.number();
+  int length = token.length();
+  if (length < 3) {
+    // Less than three digits. Multiply to put most significant digit
+    // in hundreds position.
+    if (length == 1) {
+      number *= 100;
+    } else if (length == 2) {
+      number *= 10;
+    }
+  } else if (length > 3) {
+    if (length > kMaxSignificantDigits) length = kMaxSignificantDigits;
+    // More than three digits. Divide by 10^(length - 3) to get three
+    // most significant digits.
+    int factor = 1;
+    do {
+      ASSERT(factor <= 100000000);  // factor won't overflow.
+      factor *= 10;
+      length--;
+    } while (length > 3);
+    number /= factor;
+  }
+  return number;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/dateparser.h b/src/dateparser.h
index 9d29715..27584ce 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -28,15 +28,14 @@
 #ifndef V8_DATEPARSER_H_
 #define V8_DATEPARSER_H_
 
+#include "allocation.h"
 #include "char-predicates-inl.h"
-#include "scanner-base.h"
 
 namespace v8 {
 namespace internal {
 
 class DateParser : public AllStatic {
  public:
-
   // Parse the string as a date. If parsing succeeds, return true after
   // filling out the output array as follows (all integers are Smis):
   // [0]: year
@@ -60,9 +59,14 @@
   static inline bool Between(int x, int lo, int hi) {
     return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
   }
+
   // Indicates a missing value.
   static const int kNone = kMaxInt;
 
+  // Maximal number of digits used to build the value of a numeral.
+  // Remaining digits are ignored.
+  static const int kMaxSignificantDigits = 9;
+
   // InputReader provides basic string parsing and character classification.
   template <typename Char>
   class InputReader BASE_EMBEDDED {
@@ -70,32 +74,28 @@
     InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
         : index_(0),
           buffer_(s),
-          has_read_number_(false),
           unicode_cache_(unicode_cache) {
       Next();
     }
 
-    // Advance to the next character of the string.
-    void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
+    int position() { return index_; }
 
-    // Read a string of digits as an unsigned number (cap just below kMaxInt).
-    int ReadUnsignedNumber() {
-      has_read_number_ = true;
-      int n;
-      for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
-        n = n * 10 + ch_ - '0';
-      }
-      return n;
+    // Advance to the next character of the string.
+    void Next() {
+      ch_ = (index_ < buffer_.length()) ? buffer_[index_] : 0;
+      index_++;
     }
 
-    // Read a string of digits, take the first three or fewer as an unsigned
-    // number of milliseconds, and ignore any digits after the first three.
-    int ReadMilliseconds() {
-      has_read_number_ = true;
+    // Read a string of digits as an unsigned number. Cap value at
+    // kMaxSignificantDigits, but skip remaining digits if the numeral
+    // is longer.
+    int ReadUnsignedNumeral() {
       int n = 0;
-      int power;
-      for (power = 100; IsAsciiDigit(); Next(), power = power / 10) {
-        n = n + power * (ch_ - '0');
+      int i = 0;
+      while (IsAsciiDigit()) {
+        if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
+        i++;
+        Next();
       }
       return n;
     }
@@ -150,18 +150,140 @@
     // Return 1 for '+' and -1 for '-'.
     int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
 
-    // Indicates whether any (possibly empty!) numbers have been read.
-    bool HasReadNumber() const { return has_read_number_; }
-
    private:
     int index_;
     Vector<Char> buffer_;
-    bool has_read_number_;
     uint32_t ch_;
     UnicodeCache* unicode_cache_;
   };
 
-  enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
+  enum KeywordType {
+      INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
+  };
+
+  struct DateToken {
+   public:
+    bool IsInvalid() { return tag_ == kInvalidTokenTag; }
+    bool IsUnknown() { return tag_ == kUnknownTokenTag; }
+    bool IsNumber() { return tag_ == kNumberTag; }
+    bool IsSymbol() { return tag_ == kSymbolTag; }
+    bool IsWhiteSpace() { return tag_ == kWhiteSpaceTag; }
+    bool IsEndOfInput() { return tag_ == kEndOfInputTag; }
+    bool IsKeyword() { return tag_ >= kKeywordTagStart; }
+
+    int length() { return length_; }
+
+    int number() {
+      ASSERT(IsNumber());
+      return value_;
+    }
+    KeywordType keyword_type() {
+      ASSERT(IsKeyword());
+      return static_cast<KeywordType>(tag_);
+    }
+    int keyword_value() {
+      ASSERT(IsKeyword());
+      return value_;
+    }
+    char symbol() {
+      ASSERT(IsSymbol());
+      return static_cast<char>(value_);
+    }
+    bool IsSymbol(char symbol) {
+      return IsSymbol() && this->symbol() == symbol;
+    }
+    bool IsKeywordType(KeywordType tag) {
+      return tag_ == tag;
+    }
+    bool IsFixedLengthNumber(int length) {
+      return IsNumber() && length_ == length;
+    }
+    bool IsAsciiSign() {
+      return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
+    }
+    int ascii_sign() {
+      ASSERT(IsAsciiSign());
+      return 44 - value_;
+    }
+    bool IsKeywordZ() {
+      return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
+    }
+    bool IsUnknown(int character) {
+      return IsUnknown() && value_ == character;
+    }
+    // Factory functions.
+    static DateToken Keyword(KeywordType tag, int value, int length) {
+      return DateToken(tag, length, value);
+    }
+    static DateToken Number(int value, int length) {
+      return DateToken(kNumberTag, length, value);
+    }
+    static DateToken Symbol(char symbol) {
+      return DateToken(kSymbolTag, 1, symbol);
+    }
+    static DateToken EndOfInput() {
+      return DateToken(kEndOfInputTag, 0, -1);
+    }
+    static DateToken WhiteSpace(int length) {
+      return DateToken(kWhiteSpaceTag, length, -1);
+    }
+    static DateToken Unknown() {
+      return DateToken(kUnknownTokenTag, 1, -1);
+    }
+    static DateToken Invalid() {
+      return DateToken(kInvalidTokenTag, 0, -1);
+    }
+
+   private:
+    enum TagType {
+      kInvalidTokenTag = -6,
+      kUnknownTokenTag = -5,
+      kWhiteSpaceTag = -4,
+      kNumberTag = -3,
+      kSymbolTag = -2,
+      kEndOfInputTag = -1,
+      kKeywordTagStart = 0
+    };
+    DateToken(int tag, int length, int value)
+        : tag_(tag),
+          length_(length),
+          value_(value) { }
+
+    int tag_;
+    int length_;  // Number of characters.
+    int value_;
+  };
+
+  template <typename Char>
+  class DateStringTokenizer {
+   public:
+    explicit DateStringTokenizer(InputReader<Char>* in)
+        : in_(in), next_(Scan()) { }
+    DateToken Next() {
+      DateToken result = next_;
+      next_ = Scan();
+      return result;
+    }
+
+    DateToken Peek() {
+      return next_;
+    }
+    bool SkipSymbol(char symbol) {
+      if (next_.IsSymbol(symbol)) {
+        next_ = Scan();
+        return true;
+      }
+      return false;
+    }
+
+   private:
+    DateToken Scan();
+
+    InputReader<Char>* in_;
+    DateToken next_;
+  };
+
+  static int ReadMilliseconds(DateToken number);
 
   // KeywordTable maps names of months, time zones, am/pm to numbers.
   class KeywordTable : public AllStatic {
@@ -200,6 +322,7 @@
     }
     bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
     bool Write(FixedArray* output);
+    bool IsEmpty() { return hour_ == kNone; }
    private:
     int sign_;
     int hour_;
@@ -227,10 +350,11 @@
     bool Write(FixedArray* output);
 
     static bool IsMinute(int x) { return Between(x, 0, 59); }
-   private:
     static bool IsHour(int x) { return Between(x, 0, 23); }
-    static bool IsHour12(int x) { return Between(x, 0, 12); }
     static bool IsSecond(int x) { return Between(x, 0, 59); }
+
+   private:
+    static bool IsHour12(int x) { return Between(x, 0, 12); }
     static bool IsMillisecond(int x) { return Between(x, 0, 999); }
 
     static const int kSize = 4;
@@ -241,22 +365,42 @@
 
   class DayComposer BASE_EMBEDDED {
    public:
-    DayComposer() : index_(0), named_month_(kNone) {}
+    DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
     bool IsEmpty() const { return index_ == 0; }
     bool Add(int n) {
-      return index_ < kSize ? (comp_[index_++] = n, true) : false;
+      if (index_ < kSize) {
+        comp_[index_] = n;
+        index_++;
+        return true;
+      }
+      return false;
     }
     void SetNamedMonth(int n) { named_month_ = n; }
     bool Write(FixedArray* output);
-   private:
+    void set_iso_date() { is_iso_date_ = true; }
     static bool IsMonth(int x) { return Between(x, 1, 12); }
     static bool IsDay(int x) { return Between(x, 1, 31); }
 
+   private:
     static const int kSize = 3;
     int comp_[kSize];
     int index_;
     int named_month_;
+    // If set, ensures that data is always parsed in year-month-date order.
+    bool is_iso_date_;
   };
+
+  // Tries to parse an ES5 Date Time String. Returns the next token
+  // to continue with in the legacy date string parser. If parsing is
+  // complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
+  // returns DateToken::Invalid(). Otherwise parsing continues in the
+  // legacy parser.
+  template <typename Char>
+  static DateParser::DateToken ParseES5DateTime(
+      DateStringTokenizer<Char>* scanner,
+      DayComposer* day,
+      TimeComposer* time,
+      TimeZoneComposer* tz);
 };
 
 
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 498b88a..591d0b3 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -116,8 +116,8 @@
   }
 
   // Create a new session and hook up the debug message handler.
-  session_ = new DebuggerAgentSession(isolate(), this, client);
-  v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
+  session_ = new DebuggerAgentSession(this, client);
+  isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler);
   session_->Start();
 }
 
@@ -169,7 +169,8 @@
 
   while (true) {
     // Read data from the debugger front end.
-    SmartPointer<char> message = DebuggerAgentUtil::ReceiveMessage(client_);
+    SmartArrayPointer<char> message =
+        DebuggerAgentUtil::ReceiveMessage(client_);
 
     const char* msg = *message;
     bool is_closing_session = (msg == NULL);
@@ -203,7 +204,9 @@
 
     // Send the request received to the debugger.
     v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
-                           len);
+                           len,
+                           NULL,
+                           reinterpret_cast<v8::Isolate*>(agent_->isolate()));
 
     if (is_closing_session) {
       // Session is closed.
@@ -230,7 +233,7 @@
     StrLength(kContentLength);
 
 
-SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
   int received;
 
   // Read header.
@@ -248,7 +251,7 @@
       received = conn->Receive(&c, 1);
       if (received <= 0) {
         PrintF("Error %d\n", Socket::LastError());
-        return SmartPointer<char>();
+        return SmartArrayPointer<char>();
       }
 
       // Add character to header buffer.
@@ -285,12 +288,12 @@
     if (strcmp(key, kContentLength) == 0) {
       // Get the content length value if present and within a sensible range.
       if (value == NULL || strlen(value) > 7) {
-        return SmartPointer<char>();
+        return SmartArrayPointer<char>();
       }
       for (int i = 0; value[i] != '\0'; i++) {
         // Bail out if illegal data.
         if (value[i] < '0' || value[i] > '9') {
-          return SmartPointer<char>();
+          return SmartArrayPointer<char>();
         }
         content_length = 10 * content_length + (value[i] - '0');
       }
@@ -302,7 +305,7 @@
 
   // Return now if no body.
   if (content_length == 0) {
-    return SmartPointer<char>();
+    return SmartArrayPointer<char>();
   }
 
   // Read body.
@@ -310,11 +313,11 @@
   received = ReceiveAll(conn, buffer, content_length);
   if (received < content_length) {
     PrintF("Error %d\n", Socket::LastError());
-    return SmartPointer<char>();
+    return SmartArrayPointer<char>();
   }
   buffer[content_length] = '\0';
 
-  return SmartPointer<char>(buffer);
+  return SmartArrayPointer<char>(buffer);
 }
 
 
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a25002e..a07fb0f 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -43,24 +43,27 @@
 // handles connection from a remote debugger.
 class DebuggerAgent: public Thread {
  public:
-  DebuggerAgent(Isolate* isolate, const char* name, int port)
-      : Thread(isolate, name),
+  DebuggerAgent(const char* name, int port)
+      : Thread(name),
+        isolate_(Isolate::Current()),
         name_(StrDup(name)), port_(port),
         server_(OS::CreateSocket()), terminate_(false),
         session_access_(OS::CreateMutex()), session_(NULL),
         terminate_now_(OS::CreateSemaphore(0)),
         listening_(OS::CreateSemaphore(0)) {
-    ASSERT(Isolate::Current()->debugger_agent_instance() == NULL);
-    Isolate::Current()->set_debugger_agent_instance(this);
+    ASSERT(isolate_->debugger_agent_instance() == NULL);
+    isolate_->set_debugger_agent_instance(this);
   }
   ~DebuggerAgent() {
-     Isolate::Current()->set_debugger_agent_instance(NULL);
+     isolate_->set_debugger_agent_instance(NULL);
      delete server_;
   }
 
   void Shutdown();
   void WaitUntilListening();
 
+  Isolate* isolate() { return isolate_; }
+
  private:
   void Run();
   void CreateSession(Socket* socket);
@@ -68,7 +71,8 @@
   void CloseSession();
   void OnSessionClosed(DebuggerAgentSession* session);
 
-  SmartPointer<const char> name_;  // Name of the embedding application.
+  Isolate* isolate_;
+  SmartArrayPointer<const char> name_;  // Name of the embedding application.
   int port_;  // Port to use for the agent.
   Socket* server_;  // Server socket for listen/accept.
   bool terminate_;  // Termination flag.
@@ -88,8 +92,8 @@
 // debugger and sends debugger events/responses to the remote debugger.
 class DebuggerAgentSession: public Thread {
  public:
-  DebuggerAgentSession(Isolate* isolate, DebuggerAgent* agent, Socket* client)
-      : Thread(isolate, "v8:DbgAgntSessn"),
+  DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
+      : Thread("v8:DbgAgntSessn"),
         agent_(agent), client_(client) {}
 
   void DebuggerMessage(Vector<uint16_t> message);
@@ -113,7 +117,7 @@
   static const char* const kContentLength;
   static const int kContentLengthSize;
 
-  static SmartPointer<char> ReceiveMessage(const Socket* conn);
+  static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
   static bool SendConnectMessage(const Socket* conn,
                                  const char* embedding_host);
   static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index bc0f966..d254ee5 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -68,7 +68,8 @@
 
 // The different script break point types.
 Debug.ScriptBreakPointType = { ScriptId: 0,
-                               ScriptName: 1 };
+                               ScriptName: 1,
+                               ScriptRegExp: 2 };
 
 function ScriptTypeFlag(type) {
   return (1 << type);
@@ -255,8 +256,12 @@
   this.type_ = type;
   if (type == Debug.ScriptBreakPointType.ScriptId) {
     this.script_id_ = script_id_or_name;
-  } else {  // type == Debug.ScriptBreakPointType.ScriptName
+  } else if (type == Debug.ScriptBreakPointType.ScriptName) {
     this.script_name_ = script_id_or_name;
+  } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
+    this.script_regexp_object_ = new RegExp(script_id_or_name);
+  } else {
+    throw new Error("Unexpected breakpoint type " + type);
   }
   this.line_ = opt_line || 0;
   this.column_ = opt_column;
@@ -309,6 +314,11 @@
 };
 
 
+ScriptBreakPoint.prototype.script_regexp_object = function() {
+  return this.script_regexp_object_;
+};
+
+
 ScriptBreakPoint.prototype.line = function() {
   return this.line_;
 };
@@ -384,10 +394,19 @@
 ScriptBreakPoint.prototype.matchesScript = function(script) {
   if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
     return this.script_id_ == script.id;
-  } else {  // this.type_ == Debug.ScriptBreakPointType.ScriptName
-    return this.script_name_ == script.nameOrSourceURL() &&
-           script.line_offset <= this.line_  &&
-           this.line_ < script.line_offset + script.lineCount();
+  } else {
+    // We might want to account columns here as well.
+    if (!(script.line_offset <= this.line_  &&
+          this.line_ < script.line_offset + script.lineCount())) {
+      return false;
+    }
+    if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
+      return this.script_name_ == script.nameOrSourceURL();
+    } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
+      return this.script_regexp_object_.test(script.nameOrSourceURL());
+    } else {
+      throw new Error("Unexpected breakpoint type " + this.type_);
+    }
   }
 };
 
@@ -431,7 +450,8 @@
   }
   var actual_location = script.locationFromPosition(actual_position, true);
   break_point.actual_location = { line: actual_location.line,
-                                  column: actual_location.column };
+                                  column: actual_location.column,
+                                  script_id: script.id };
   this.break_points_.push(break_point);
   return break_point;
 };
@@ -644,7 +664,8 @@
     actual_position += this.sourcePosition(func);
     var actual_location = script.locationFromPosition(actual_position, true);
     break_point.actual_location = { line: actual_location.line,
-                                    column: actual_location.column };
+                                    column: actual_location.column,
+                                    script_id: script.id };
     break_point.setCondition(opt_condition);
     return break_point.number();
   }
@@ -799,6 +820,15 @@
 }
 
 
+Debug.setScriptBreakPointByRegExp = function(script_regexp,
+                                             opt_line, opt_column,
+                                             opt_condition, opt_groupId) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
+                                  script_regexp, opt_line, opt_column,
+                                  opt_condition, opt_groupId);
+}
+
+
 Debug.enableScriptBreakPoint = function(break_point_number) {
   var script_break_point = this.findScriptBreakPoint(break_point_number, false);
   script_break_point.enable();
@@ -1335,7 +1365,7 @@
   try {
     try {
       // Convert the JSON string to an object.
-      request = %CompileString('(' + json_request + ')')();
+      request = JSON.parse(json_request);
 
       // Create an initial response.
       response = this.createResponse(request);
@@ -1549,11 +1579,6 @@
     response.failed('Missing argument "type" or "target"');
     return;
   }
-  if (type != 'function' && type != 'handle' &&
-      type != 'script' && type != 'scriptId') {
-    response.failed('Illegal type "' + type + '"');
-    return;
-  }
 
   // Either function or script break point.
   var break_point_number;
@@ -1598,9 +1623,16 @@
     break_point_number =
         Debug.setScriptBreakPointByName(target, line, column, condition,
                                         groupId);
-  } else {  // type == 'scriptId.
+  } else if (type == 'scriptId') {
     break_point_number =
         Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+  } else if (type == 'scriptRegExp') {
+    break_point_number =
+        Debug.setScriptBreakPointByRegExp(target, line, column, condition,
+                                          groupId);
+  } else {
+    response.failed('Illegal type "' + type + '"');
+    return;
   }
 
   // Set additional break point properties.
@@ -1621,9 +1653,14 @@
     if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
       response.body.type = 'scriptId';
       response.body.script_id = break_point.script_id();
-    } else {
+    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
       response.body.type = 'scriptName';
       response.body.script_name = break_point.script_name();
+    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+      response.body.type = 'scriptRegExp';
+      response.body.script_regexp = break_point.script_regexp_object().source;
+    } else {
+      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
     }
     response.body.line = break_point.line();
     response.body.column = break_point.column();
@@ -1753,9 +1790,14 @@
     if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
       description.type = 'scriptId';
       description.script_id = break_point.script_id();
-    } else {
+    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
       description.type = 'scriptName';
       description.script_name = break_point.script_name();
+    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+      description.type = 'scriptRegExp';
+      description.script_regexp = break_point.script_regexp_object().source;
+    } else {
+      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
     }
     array.push(description);
   }
@@ -1796,7 +1838,7 @@
     enabled = !Debug.isBreakOnException();
   } else if (type == 'uncaught') {
     enabled = !Debug.isBreakOnUncaughtException();
-  }  
+  }
 
   // Pull out and check the 'enabled' argument if present:
   if (!IS_UNDEFINED(request.arguments.enabled)) {
@@ -1980,22 +2022,22 @@
   if (!IS_UNDEFINED(frame) && global) {
     return response.failed('Arguments "frame" and "global" are exclusive');
   }
-  
+
   var additional_context_object;
   if (additional_context) {
     additional_context_object = {};
     for (var i = 0; i < additional_context.length; i++) {
       var mapping = additional_context[i];
       if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
-        return response.failed("Context element #" + i + 
+        return response.failed("Context element #" + i +
             " must contain name:string and handle:number");
-      } 
+      }
       var context_value_mirror = LookupMirror(mapping.handle);
       if (!context_value_mirror) {
         return response.failed("Context object '" + mapping.name +
             "' #" + mapping.handle + "# not found");
       }
-      additional_context_object[mapping.name] = context_value_mirror.value(); 
+      additional_context_object[mapping.name] = context_value_mirror.value();
     }
   }
 
@@ -2269,21 +2311,10 @@
 
 
 DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-  var modules = parseInt(request.arguments.modules);
-  if (isNaN(modules)) {
-    return response.failed('Modules is not an integer');
-  }
-  var tag = parseInt(request.arguments.tag);
-  if (isNaN(tag)) {
-    tag = 0;
-  }
   if (request.arguments.command == 'resume') {
-    %ProfilerResume(modules, tag);
+    %ProfilerResume();
   } else if (request.arguments.command == 'pause') {
-    %ProfilerPause(modules, tag);
+    %ProfilerPause();
   } else {
     return response.failed('Unknown command');
   }
diff --git a/src/debug.cc b/src/debug.cc
index 6f0431c..20cd802 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,6 +40,7 @@
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
+#include "list.h"
 #include "messages.h"
 #include "natives.h"
 #include "stub-cache.h"
@@ -92,7 +93,7 @@
 }
 
 
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc,  Code::Kind kind) {
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
   CALL_HEAP_FUNCTION(
       isolate,
@@ -167,8 +168,10 @@
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           !code->is_type_recording_binary_op_stub() &&
-           !code->is_compare_ic_stub()) ||
+           !code->is_binary_op_stub() &&
+           !code->is_unary_op_stub() &&
+           !code->is_compare_ic_stub() &&
+           !code->is_to_boolean_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
         return;
@@ -477,21 +480,6 @@
     // calling convention used by the call site.
     Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
-
-    // For stubs that refer back to an inlined version clear the cached map for
-    // the inlined case to always go through the IC. As long as the break point
-    // is set the patching performed by the runtime system will take place in
-    // the code copy and will therefore have no effect on the running code
-    // keeping it from using the inlined code.
-    if (code->is_keyed_load_stub()) {
-      KeyedLoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_keyed_store_stub()) {
-      KeyedStoreIC::ClearInlinedVersion(pc());
-    } else if (code->is_load_stub()) {
-      LoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_store_stub()) {
-      StoreIC::ClearInlinedVersion(pc());
-    }
   }
 }
 
@@ -499,20 +487,6 @@
 void BreakLocationIterator::ClearDebugBreakAtIC() {
   // Patch the code to the original invoke.
   rinfo()->set_target_address(original_rinfo()->target_address());
-
-  RelocInfo::Mode mode = rmode();
-  if (RelocInfo::IsCodeTarget(mode)) {
-    AssertNoAllocation nogc;
-    Address target = original_rinfo()->target_address();
-    Code* code = Code::GetCodeFromTargetAddress(target);
-
-    // Restore the inlined version of keyed stores to get back to the
-    // fast case.  We need to patch back the keyed store because no
-    // patching happens when running normally.  For keyed loads, the
-    // map check will get patched back when running normally after ICs
-    // have been cleared at GC.
-    if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
-  }
 }
 
 
@@ -569,6 +543,7 @@
   thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
   thread_local_.step_count_ = 0;
   thread_local_.last_fp_ = 0;
+  thread_local_.queued_step_count_ = 0;
   thread_local_.step_into_fp_ = 0;
   thread_local_.step_out_fp_ = 0;
   thread_local_.after_break_target_ = 0;
@@ -800,9 +775,9 @@
   bool caught_exception = false;
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
-  Handle<Object> result =
-      Execution::TryCall(function, Handle<Object>(context->global()),
-                         0, NULL, &caught_exception);
+
+  Execution::TryCall(function, Handle<Object>(context->global()),
+                     0, NULL, &caught_exception);
 
   // Check for caught exceptions.
   if (caught_exception) {
@@ -824,7 +799,6 @@
   // Return if debugger is already loaded.
   if (IsLoaded()) return true;
 
-  ASSERT(Isolate::Current() == isolate_);
   Debugger* debugger = isolate_->debugger();
 
   // Bail out if we're already in the process of compiling the native
@@ -843,6 +817,7 @@
   HandleScope scope(isolate_);
   Handle<Context> context =
       isolate_->bootstrapper()->CreateEnvironment(
+          isolate_,
           Handle<Object>::null(),
           v8::Handle<ObjectTemplate>(),
           NULL);
@@ -984,14 +959,49 @@
     // Clear all current stepping setup.
     ClearStepping();
 
-    // Notify the debug event listeners.
-    isolate_->debugger()->OnDebugBreak(break_points_hit, false);
+    if (thread_local_.queued_step_count_ > 0) {
+      // Perform queued steps
+      int step_count = thread_local_.queued_step_count_;
+
+      // Clear queue
+      thread_local_.queued_step_count_ = 0;
+
+      PrepareStep(StepNext, step_count);
+    } else {
+      // Notify the debug event listeners.
+      isolate_->debugger()->OnDebugBreak(break_points_hit, false);
+    }
   } else if (thread_local_.last_step_action_ != StepNone) {
     // Hold on to last step action as it is cleared by the call to
     // ClearStepping.
     StepAction step_action = thread_local_.last_step_action_;
     int step_count = thread_local_.step_count_;
 
+    // If StepNext goes deeper in code, StepOut until original frame
+    // and keep step count queued up in the meantime.
+    if (step_action == StepNext && frame->fp() < thread_local_.last_fp_) {
+      // Count frames until target frame
+      int count = 0;
+      JavaScriptFrameIterator it(isolate_);
+      while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
+        count++;
+        it.Advance();
+      }
+
+      // If we found original frame
+      if (it.frame()->fp() == thread_local_.last_fp_) {
+        if (step_count > 1) {
+          // Save old count and action to continue stepping after
+          // StepOut
+          thread_local_.queued_step_count_ = step_count - 1;
+        }
+
+        // Set up for StepOut to reach target frame
+        step_action = StepOut;
+        step_count = count;
+      }
+    }
+
     // Clear all current stepping setup.
     ClearStepping();
 
@@ -1017,6 +1027,11 @@
   } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DIRECT_CALL) {
     // Nothing to do, after_break_target is not used here.
+  } else if (thread_local_.frame_drop_mode_ ==
+      FRAME_DROPPED_IN_RETURN_CALL) {
+    Code* plain_return = isolate_->builtins()->builtin(
+        Builtins::kFrameDropper_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
   } else {
     UNREACHABLE();
   }
@@ -1070,7 +1085,6 @@
 
 // Check whether a single break point object is triggered.
 bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
-  ASSERT(Isolate::Current() == isolate_);
   Factory* factory = isolate_->factory();
   HandleScope scope(isolate_);
 
@@ -1128,6 +1142,8 @@
                           int* source_position) {
   HandleScope scope(isolate_);
 
+  PrepareForBreakPoints();
+
   if (!EnsureDebugInfo(shared)) {
     // Return if retrieving debug info failed.
     return;
@@ -1201,6 +1217,7 @@
 
 
 void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+  PrepareForBreakPoints();
   // Make sure the function has setup the debug info.
   if (!EnsureDebugInfo(shared)) {
     // Return if we failed to retrieve the debug info.
@@ -1256,8 +1273,10 @@
 
 
 void Debug::PrepareStep(StepAction step_action, int step_count) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
+
+  PrepareForBreakPoints();
+
   ASSERT(Debug::InDebugger());
 
   // Remember this step action and count.
@@ -1472,6 +1491,13 @@
 // steps before reporting break back to the debugger.
 bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
                              JavaScriptFrame* frame) {
+  // StepNext and StepOut shouldn't bring us deeper in code, so last frame
+  // shouldn't be a parent of current frame.
+  if (thread_local_.last_step_action_ == StepNext ||
+      thread_local_.last_step_action_ == StepOut) {
+    if (frame->fp() < thread_local_.last_fp_) return true;
+  }
+
   // If the step last action was step next or step in make sure that a new
   // statement is hit.
   if (thread_local_.last_step_action_ == StepNext ||
@@ -1700,19 +1726,25 @@
 }
 
 
-// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
-  // Return if we already have the debug info for shared.
-  if (HasDebugInfo(shared)) return true;
-
-  // Ensure shared in compiled. Return false if this failed.
-  if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
-
+void Debug::PrepareForBreakPoints() {
   // If preparing for the first break point make sure to deoptimize all
   // functions as debugging does not work with optimized code.
   if (!has_break_points_) {
     Deoptimizer::DeoptimizeAll();
   }
+}
+
+
+// Ensures the debug information is present for shared.
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
+  // Return if we already have the debug info for shared.
+  if (HasDebugInfo(shared)) {
+    ASSERT(shared->is_compiled());
+    return true;
+  }
+
+  // Ensure shared in compiled. Return false if this failed.
+  if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
 
   // Create the debug info object.
   Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@@ -1761,9 +1793,10 @@
 
 
 void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
+  PrepareForBreakPoints();
+
   // Get the executing function in which the debug break occurred.
   Handle<SharedFunctionInfo> shared =
       Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
@@ -1847,6 +1880,15 @@
 bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
   HandleScope scope(isolate_);
 
+  // If there are no break points this cannot be break at return, as
+  // the debugger statement and stack guard bebug break cannot be at
+  // return.
+  if (!has_break_points_) {
+    return false;
+  }
+
+  PrepareForBreakPoints();
+
   // Get the executing function in which the debug break occurred.
   Handle<SharedFunctionInfo> shared =
       Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
@@ -1894,7 +1936,6 @@
 
 
 void Debug::ClearMirrorCache() {
-  ASSERT(Isolate::Current() == isolate_);
   PostponeInterruptsScope postpone(isolate_);
   HandleScope scope(isolate_);
   ASSERT(isolate_->context() == *Debug::debug_context());
@@ -1906,15 +1947,13 @@
       *function_name));
   ASSERT(fun->IsJSFunction());
   bool caught_exception;
-  Handle<Object> js_object = Execution::TryCall(
-      Handle<JSFunction>::cast(fun),
+  Execution::TryCall(Handle<JSFunction>::cast(fun),
       Handle<JSObject>(Debug::debug_context()->global()),
       0, NULL, &caught_exception);
 }
 
 
 void Debug::CreateScriptCache() {
-  ASSERT(Isolate::Current() == isolate_);
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
 
@@ -1956,7 +1995,6 @@
 
 
 Handle<FixedArray> Debug::GetLoadedScripts() {
-  ASSERT(Isolate::Current() == isolate_);
   // Create and fill the script cache when the loaded scripts is requested for
   // the first time.
   if (script_cache_ == NULL) {
@@ -2001,9 +2039,9 @@
       message_dispatch_helper_thread_(NULL),
       host_dispatch_micros_(100 * 1000),
       agent_(NULL),
-      command_queue_(kQueueInitialSize),
+      command_queue_(isolate->logger(), kQueueInitialSize),
       command_received_(OS::CreateSemaphore(0)),
-      event_command_queue_(kQueueInitialSize),
+      event_command_queue_(isolate->logger(), kQueueInitialSize),
       isolate_(isolate) {
 }
 
@@ -2019,7 +2057,6 @@
 Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
                                       int argc, Object*** argv,
                                       bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
 
   // Create the execution state object.
@@ -2041,7 +2078,6 @@
 
 
 Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   // Create the execution state object.
   Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
       isolate_->debug()->break_id());
@@ -2055,7 +2091,6 @@
 Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
                                         Handle<Object> break_points_hit,
                                         bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   // Create the new break event object.
   const int argc = 2;
   Object** argv[argc] = { exec_state.location(),
@@ -2071,7 +2106,6 @@
                                             Handle<Object> exception,
                                             bool uncaught,
                                             bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   Factory* factory = isolate_->factory();
   // Create the new exception event object.
   const int argc = 3;
@@ -2086,7 +2120,6 @@
 
 Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
                                               bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   // Create the new function event object.
   const int argc = 1;
   Object** argv[argc] = { function.location() };
@@ -2098,7 +2131,6 @@
 Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
                                           bool before,
                                           bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   Factory* factory = isolate_->factory();
   // Create the compile event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
@@ -2118,7 +2150,6 @@
 
 Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
                                                   bool* caught_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   // Create the script collected event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
@@ -2133,7 +2164,6 @@
 
 
 void Debugger::OnException(Handle<Object> exception, bool uncaught) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
   Debug* debug = isolate_->debug();
 
@@ -2178,7 +2208,6 @@
 
 void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
                             bool auto_continue) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
   // Debugger has already been entered by caller.
@@ -2188,7 +2217,7 @@
   if (!Debugger::EventActive(v8::Break)) return;
 
   // Debugger must be entered in advance.
-  ASSERT(Isolate::Current()->context() == *isolate_->debug()->debug_context());
+  ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
 
   // Create the event data object.
   bool caught_exception = false;
@@ -2211,7 +2240,6 @@
 
 
 void Debugger::OnBeforeCompile(Handle<Script> script) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
   // Bail out based on state or if there is no listener for this event
@@ -2241,7 +2269,6 @@
 // Handle debugger actions when a new script is compiled.
 void Debugger::OnAfterCompile(Handle<Script> script,
                               AfterCompileFlags after_compile_flags) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
   Debug* debug = isolate_->debug();
 
@@ -2283,8 +2310,7 @@
   bool caught_exception = false;
   const int argc = 1;
   Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
-  Handle<Object> result = Execution::TryCall(
-      Handle<JSFunction>::cast(update_script_break_points),
+  Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
       Isolate::Current()->js_builtins_object(), argc, argv,
       &caught_exception);
   if (caught_exception) {
@@ -2310,7 +2336,6 @@
 
 
 void Debugger::OnScriptCollected(int id) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
   // No more to do if not debugging.
@@ -2340,7 +2365,6 @@
 void Debugger::ProcessDebugEvent(v8::DebugEvent event,
                                  Handle<JSObject> event_data,
                                  bool auto_continue) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
   // Clear any pending debug break if this is a real break.
@@ -2387,7 +2411,7 @@
                                  Handle<Object> exec_state,
                                  Handle<Object> event_data,
                                  v8::Debug::ClientData* client_data) {
-  if (event_listener_->IsProxy()) {
+  if (event_listener_->IsForeign()) {
     CallCEventCallback(event, exec_state, event_data, client_data);
   } else {
     CallJSEventCallback(event, exec_state, event_data);
@@ -2399,9 +2423,9 @@
                                   Handle<Object> exec_state,
                                   Handle<Object> event_data,
                                   v8::Debug::ClientData* client_data) {
-  Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+  Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
@@ -2416,7 +2440,6 @@
                                    Handle<Object> exec_state,
                                    Handle<Object> event_data) {
   ASSERT(event_listener_->IsJSFunction());
-  ASSERT(Isolate::Current() == isolate_);
   Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
 
   // Invoke the JavaScript debug event listener.
@@ -2432,7 +2455,6 @@
 
 
 Handle<Context> Debugger::GetDebugContext() {
-  ASSERT(Isolate::Current() == isolate_);
   never_unload_debugger_ = true;
   EnterDebugger debugger;
   return isolate_->debug()->debug_context();
@@ -2440,7 +2462,6 @@
 
 
 void Debugger::UnloadDebugger() {
-  ASSERT(Isolate::Current() == isolate_);
   Debug* debug = isolate_->debug();
 
   // Make sure that there are no breakpoints left.
@@ -2460,7 +2481,6 @@
                                     Handle<JSObject> exec_state,
                                     Handle<JSObject> event_data,
                                     bool auto_continue) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
 
   if (!isolate_->debug()->Load()) return;
@@ -2556,7 +2576,8 @@
 
     // Get the command from the queue.
     CommandMessage command = command_queue_.Get();
-    LOGGER->DebugTag("Got request from command queue, in interactive loop.");
+    isolate_->logger()->DebugTag(
+        "Got request from command queue, in interactive loop.");
     if (!Debugger::IsDebuggerActive()) {
       // Delete command text and user data.
       command.Dispose();
@@ -2630,7 +2651,6 @@
 
 void Debugger::SetEventListener(Handle<Object> callback,
                                 Handle<Object> data) {
-  ASSERT(Isolate::Current() == isolate_);
   HandleScope scope(isolate_);
   GlobalHandles* global_handles = isolate_->global_handles();
 
@@ -2664,7 +2684,6 @@
 
 
 void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
-  ASSERT(Isolate::Current() == isolate_);
   ScopedLock with(debugger_access_);
 
   message_handler_ = handler;
@@ -2680,7 +2699,6 @@
 
 
 void Debugger::ListenersChanged() {
-  ASSERT(Isolate::Current() == isolate_);
   if (IsDebuggerActive()) {
     // Disable the compilation cache when the debugger is active.
     isolate_->compilation_cache()->Disable();
@@ -2696,7 +2714,6 @@
 
 void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
                                       int period) {
-  ASSERT(Isolate::Current() == isolate_);
   host_dispatch_handler_ = handler;
   host_dispatch_micros_ = period * 1000;
 }
@@ -2704,7 +2721,6 @@
 
 void Debugger::SetDebugMessageDispatchHandler(
     v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
-  ASSERT(Isolate::Current() == isolate_);
   ScopedLock with(dispatch_handler_access_);
   debug_message_dispatch_handler_ = handler;
 
@@ -2718,7 +2734,6 @@
 // Calls the registered debug message handler. This callback is part of the
 // public API.
 void Debugger::InvokeMessageHandler(MessageImpl message) {
-  ASSERT(Isolate::Current() == isolate_);
   ScopedLock with(debugger_access_);
 
   if (message_handler_ != NULL) {
@@ -2733,13 +2748,12 @@
 // by the API client thread.
 void Debugger::ProcessCommand(Vector<const uint16_t> command,
                               v8::Debug::ClientData* client_data) {
-  ASSERT(Isolate::Current() == isolate_);
   // Need to cast away const.
   CommandMessage message = CommandMessage::New(
       Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
                        command.length()),
       client_data);
-  LOGGER->DebugTag("Put command on command_queue.");
+  isolate_->logger()->DebugTag("Put command on command_queue.");
   command_queue_.Put(message);
   command_received_->Signal();
 
@@ -2763,13 +2777,11 @@
 
 
 bool Debugger::HasCommands() {
-  ASSERT(Isolate::Current() == isolate_);
   return !command_queue_.IsEmpty();
 }
 
 
 void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
-  ASSERT(Isolate::Current() == isolate_);
   CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
   event_command_queue_.Put(message);
 
@@ -2781,7 +2793,6 @@
 
 
 bool Debugger::IsDebuggerActive() {
-  ASSERT(Isolate::Current() == isolate_);
   ScopedLock with(debugger_access_);
 
   return message_handler_ != NULL || !event_listener_.is_null();
@@ -2791,7 +2802,6 @@
 Handle<Object> Debugger::Call(Handle<JSFunction> fun,
                               Handle<Object> data,
                               bool* pending_exception) {
-  ASSERT(Isolate::Current() == isolate_);
   // When calling functions in the debugger prevent it from beeing unloaded.
   Debugger::never_unload_debugger_ = true;
 
@@ -2841,7 +2851,7 @@
 
   if (Socket::Setup()) {
     if (agent_ == NULL) {
-      agent_ = new DebuggerAgent(isolate_, name, port);
+      agent_ = new DebuggerAgent(name, port);
       agent_->Start();
     }
     return true;
@@ -2870,7 +2880,6 @@
 
 
 void Debugger::CallMessageDispatchHandler() {
-  ASSERT(Isolate::Current() == isolate_);
   v8::Debug::DebugMessageDispatchHandler handler;
   {
     ScopedLock with(dispatch_handler_access_);
@@ -2978,7 +2987,7 @@
   v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
   // Isolate::context() may be NULL when "script collected" event occures.
   ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
-  return GetDebugEventContext(isolate);
+  return context;
 }
 
 
@@ -3104,8 +3113,8 @@
 }
 
 
-LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
-    : queue_(size) {
+LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
+    : logger_(logger), queue_(size) {
   lock_ = OS::CreateMutex();
 }
 
@@ -3124,7 +3133,7 @@
 CommandMessage LockingCommandMessageQueue::Get() {
   ScopedLock sl(lock_);
   CommandMessage result = queue_.Get();
-  LOGGER->DebugEvent("Get", result.text());
+  logger_->DebugEvent("Get", result.text());
   return result;
 }
 
@@ -3132,7 +3141,7 @@
 void LockingCommandMessageQueue::Put(const CommandMessage& message) {
   ScopedLock sl(lock_);
   queue_.Put(message);
-  LOGGER->DebugEvent("Put", message.text());
+  logger_->DebugEvent("Put", message.text());
 }
 
 
@@ -3143,7 +3152,7 @@
 
 
 MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
-    : Thread(isolate, "v8:MsgDispHelpr"),
+    : Thread("v8:MsgDispHelpr"),
       sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
       already_signalled_(false) {
 }
diff --git a/src/debug.h b/src/debug.h
index 6be33a6..a5083eb 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,12 +28,14 @@
 #ifndef V8_DEBUG_H_
 #define V8_DEBUG_H_
 
+#include "allocation.h"
 #include "arguments.h"
 #include "assembler.h"
 #include "debug-agent.h"
 #include "execution.h"
 #include "factory.h"
 #include "flags.h"
+#include "frames-inl.h"
 #include "hashmap.h"
 #include "platform.h"
 #include "string-stream.h"
@@ -176,7 +178,9 @@
 
  private:
   // Calculate the hash value from the key (script id).
-  static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
+  static uint32_t Hash(int key) {
+    return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
+  }
 
   // Scripts match if their keys (script id) match.
   static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
@@ -245,6 +249,8 @@
   static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
   static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
 
+  void PrepareForBreakPoints();
+
   // Returns whether the operation succeeded.
   bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
 
@@ -422,7 +428,8 @@
     FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
     // The top JS frame had been calling some C++ function. The return address
     // gets patched automatically.
-    FRAME_DROPPED_IN_DIRECT_CALL
+    FRAME_DROPPED_IN_DIRECT_CALL,
+    FRAME_DROPPED_IN_RETURN_CALL
   };
 
   void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
@@ -503,6 +510,9 @@
     // Frame pointer from last step next action.
     Address last_fp_;
 
+    // Number of queued steps left to perform before debug event.
+    int queued_step_count_;
+
     // Frame pointer for frame from which step in was performed.
     Address step_into_fp_;
 
@@ -676,13 +686,14 @@
 // Mutex to CommandMessageQueue.  Includes logging of all puts and gets.
 class LockingCommandMessageQueue BASE_EMBEDDED {
  public:
-  explicit LockingCommandMessageQueue(int size);
+  LockingCommandMessageQueue(Logger* logger, int size);
   ~LockingCommandMessageQueue();
   bool IsEmpty() const;
   CommandMessage Get();
   void Put(const CommandMessage& message);
   void Clear();
  private:
+  Logger* logger_;
   CommandMessageQueue queue_;
   Mutex* lock_;
   DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
@@ -1022,6 +1033,7 @@
         return NULL;
     }
   }
+
  private:
   Debug::AddressId id_;
 };
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 2fc0e47..0ada28b 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,6 +44,9 @@
   lazy_deoptimization_entry_code_ = NULL;
   current_ = NULL;
   deoptimizing_code_list_ = NULL;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  deoptimized_frame_info_ = NULL;
+#endif
 }
 
 
@@ -58,6 +61,16 @@
   }
 }
 
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void DeoptimizerData::Iterate(ObjectVisitor* v) {
+  if (deoptimized_frame_info_ != NULL) {
+    deoptimized_frame_info_->Iterate(v);
+  }
+}
+#endif
+
+
 Deoptimizer* Deoptimizer::New(JSFunction* function,
                               BailoutType type,
                               unsigned bailout_id,
@@ -70,7 +83,8 @@
                                              type,
                                              bailout_id,
                                              from,
-                                             fp_to_sp_delta);
+                                             fp_to_sp_delta,
+                                             NULL);
   ASSERT(isolate->deoptimizer_data()->current_ == NULL);
   isolate->deoptimizer_data()->current_ = deoptimizer;
   return deoptimizer;
@@ -86,6 +100,77 @@
   return result;
 }
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
+    JavaScriptFrame* frame,
+    int frame_index,
+    Isolate* isolate) {
+  ASSERT(isolate == Isolate::Current());
+  ASSERT(frame->is_optimized());
+  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
+
+  // Get the function and code from the frame.
+  JSFunction* function = JSFunction::cast(frame->function());
+  Code* code = frame->LookupCode();
+
+  // Locate the deoptimization point in the code. As we are at a call the
+  // return address must be at a place in the code with deoptimization support.
+  SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
+  int deoptimization_index = safepoint_entry.deoptimization_index();
+  ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
+
+  // Always use the actual stack slots when calculating the fp to sp
+  // delta adding two for the function and context.
+  unsigned stack_slots = code->stack_slots();
+  unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+
+  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
+                                             function,
+                                             Deoptimizer::DEBUGGER,
+                                             deoptimization_index,
+                                             frame->pc(),
+                                             fp_to_sp_delta,
+                                             code);
+  Address tos = frame->fp() - fp_to_sp_delta;
+  deoptimizer->FillInputFrame(tos, frame);
+
+  // Calculate the output frames.
+  Deoptimizer::ComputeOutputFrames(deoptimizer);
+
+  // Create the GC safe output frame information and register it for GC
+  // handling.
+  ASSERT_LT(frame_index, deoptimizer->output_count());
+  DeoptimizedFrameInfo* info =
+      new DeoptimizedFrameInfo(deoptimizer, frame_index);
+  isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
+
+  // Get the "simulated" top and size for the requested frame.
+  Address top =
+      reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
+  uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
+
+  // Done with the GC-unsafe frame descriptions. This re-enables allocation.
+  deoptimizer->DeleteFrameDescriptions();
+
+  // Allocate a heap number for the doubles belonging to this frame.
+  deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
+      top, size, info);
+
+  // Finished using the deoptimizer instance.
+  delete deoptimizer;
+
+  return info;
+}
+
+
+void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+                                                 Isolate* isolate) {
+  ASSERT(isolate == Isolate::Current());
+  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
+  delete info;
+  isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
+}
+#endif
 
 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
                                                 int count,
@@ -209,18 +294,24 @@
                          BailoutType type,
                          unsigned bailout_id,
                          Address from,
-                         int fp_to_sp_delta)
+                         int fp_to_sp_delta,
+                         Code* optimized_code)
     : isolate_(isolate),
       function_(function),
       bailout_id_(bailout_id),
       bailout_type_(type),
       from_(from),
       fp_to_sp_delta_(fp_to_sp_delta),
+      input_(NULL),
       output_count_(0),
       output_(NULL),
       deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
-    PrintF("**** DEOPT: ");
+    if (type == DEBUGGER) {
+      PrintF("**** DEOPT FOR DEBUGGER: ");
+    } else {
+      PrintF("**** DEOPT: ");
+    }
     function->PrintName();
     PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
            bailout_id,
@@ -248,10 +339,16 @@
     optimized_code_ = function_->code();
     ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
     ASSERT(!optimized_code_->contains(from));
+  } else if (type == DEBUGGER) {
+    optimized_code_ = optimized_code;
+    ASSERT(optimized_code_->contains(from));
   }
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
   input_ = new(size) FrameDescription(size, function);
+#ifdef DEBUG
+  input_->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 }
 
 
@@ -417,6 +514,7 @@
 
 
 void Deoptimizer::MaterializeHeapNumbers() {
+  ASSERT_NE(DEBUGGER, bailout_type_);
   for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
     HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
     Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@@ -432,6 +530,47 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
+    Address top, uint32_t size, DeoptimizedFrameInfo* info) {
+  ASSERT_EQ(DEBUGGER, bailout_type_);
+  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+    HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+
+    // Check of the heap number to materialize actually belong to the frame
+    // being extracted.
+    Address slot = d.slot_address();
+    if (top <= slot && slot < top + size) {
+      Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+      // Calculate the index with the botton of the expression stack
+      // at index 0, and the fixed part (including incoming arguments)
+      // at negative indexes.
+      int index = static_cast<int>(
+          info->expression_count_ - (slot - top) / kPointerSize - 1);
+      if (FLAG_trace_deopt) {
+        PrintF("Materializing a new heap number %p [%e] in slot %p"
+               "for stack index %d\n",
+               reinterpret_cast<void*>(*num),
+               d.value(),
+               d.slot_address(),
+               index);
+      }
+      if (index >=0) {
+        info->SetExpression(index, *num);
+      } else {
+        // Calculate parameter index subtracting one for the receiver.
+        int parameter_index =
+            index +
+            static_cast<int>(size) / kPointerSize -
+            info->expression_count_ - 1;
+        info->SetParameter(parameter_index, *num);
+      }
+    }
+  }
+}
+#endif
+
+
 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
                                      int frame_index,
                                      unsigned output_offset) {
@@ -901,6 +1040,9 @@
   ASSERT(desc.reloc_size == 0);
 
   LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  if (chunk == NULL) {
+    V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+  }
   memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
   CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
   return chunk;
@@ -969,18 +1111,48 @@
   if (slot_index >= 0) {
     // Local or spill slots. Skip the fixed part of the frame
     // including all arguments.
-    unsigned base = static_cast<unsigned>(
-        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
+    unsigned base =
+        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
     return base - ((slot_index + 1) * kPointerSize);
   } else {
     // Incoming parameter.
-    unsigned base = static_cast<unsigned>(GetFrameSize() -
-        deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
+    unsigned base = GetFrameSize() -
+        deoptimizer->ComputeIncomingArgumentSize(GetFunction());
     return base - ((slot_index + 1) * kPointerSize);
   }
 }
 
 
+int FrameDescription::ComputeParametersCount() {
+  return function_->shared()->formal_parameter_count();
+}
+
+
+Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
+  ASSERT(index >= 0);
+  ASSERT(index < ComputeParametersCount());
+  // The slot indexes for incoming arguments are negative.
+  unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
+                                           index - ComputeParametersCount());
+  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
+}
+
+
+unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
+  unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+  return size / kPointerSize;
+}
+
+
+Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
+  unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
+  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
+}
+
+
 void TranslationBuffer::Add(int32_t value) {
   // Encode the sign bit in the least significant bit.
   bool is_negative = (value < 0);
@@ -997,11 +1169,11 @@
 
 
 int32_t TranslationIterator::Next() {
-  ASSERT(HasNext());
   // Run through the bytes until we reach one with a least significant
   // bit of zero (marks the end).
   uint32_t bits = 0;
   for (int i = 0; true; i += 7) {
+    ASSERT(HasNext());
     uint8_t next = buffer_->get(index_++);
     bits |= (next >> 1) << i;
     if ((next & 1) == 0) break;
@@ -1104,7 +1276,7 @@
 }
 
 
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
 
 const char* Translation::StringFor(Opcode opcode) {
   switch (opcode) {
@@ -1252,5 +1424,36 @@
   UNREACHABLE();
 }
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(
+    Deoptimizer* deoptimizer, int frame_index) {
+  FrameDescription* output_frame = deoptimizer->output_[frame_index];
+  SetFunction(output_frame->GetFunction());
+  expression_count_ = output_frame->GetExpressionCount(deoptimizer);
+  parameters_count_ = output_frame->ComputeParametersCount();
+  parameters_ = new Object*[parameters_count_];
+  for (int i = 0; i < parameters_count_; i++) {
+    SetParameter(i, output_frame->GetParameter(deoptimizer, i));
+  }
+  expression_stack_ = new Object*[expression_count_];
+  for (int i = 0; i < expression_count_; i++) {
+    SetExpression(i, output_frame->GetExpression(deoptimizer, i));
+  }
+}
+
+
+DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
+  delete[] expression_stack_;
+  delete[] parameters_;
+}
+
+void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
+  v->VisitPointer(BitCast<Object**>(&function_));
+  v->VisitPointers(parameters_, parameters_ + parameters_count_);
+  v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index cb82f44..8641261 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "macro-assembler.h"
 #include "zone-inl.h"
 
@@ -40,7 +41,7 @@
 class FrameDescription;
 class TranslationIterator;
 class DeoptimizingCodeListNode;
-
+class DeoptimizedFrameInfo;
 
 class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
  public:
@@ -80,11 +81,19 @@
   DeoptimizerData();
   ~DeoptimizerData();
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void Iterate(ObjectVisitor* v);
+#endif
+
  private:
   LargeObjectChunk* eager_deoptimization_entry_code_;
   LargeObjectChunk* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  DeoptimizedFrameInfo* deoptimized_frame_info_;
+#endif
+
   // List of deoptimized code which still have references from active stack
   // frames. These code objects are needed by the deoptimizer when deoptimizing
   // a frame for which the code object for the function function has been
@@ -102,7 +111,10 @@
   enum BailoutType {
     EAGER,
     LAZY,
-    OSR
+    OSR,
+    // This last bailout type is not really a bailout, but used by the
+    // debugger to deoptimize stack frames to allow inspection.
+    DEBUGGER
   };
 
   int output_count() const { return output_count_; }
@@ -115,6 +127,16 @@
                           Isolate* isolate);
   static Deoptimizer* Grab(Isolate* isolate);
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // The returned object with information on the optimized frame needs to be
+  // freed before another one can be generated.
+  static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
+                                                        int frame_index,
+                                                        Isolate* isolate);
+  static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+                                             Isolate* isolate);
+#endif
+
   // Makes sure that there is enough room in the relocation
   // information of a code object to perform lazy deoptimization
   // patching. If there is not enough room a new relocation
@@ -170,6 +192,10 @@
   ~Deoptimizer();
 
   void MaterializeHeapNumbers();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void MaterializeHeapNumbersForDebuggerInspectableFrame(
+      Address top, uint32_t size, DeoptimizedFrameInfo* info);
+#endif
 
   static void ComputeOutputFrames(Deoptimizer* deoptimizer);
 
@@ -232,7 +258,8 @@
               BailoutType type,
               unsigned bailout_id,
               Address from,
-              int fp_to_sp_delta);
+              int fp_to_sp_delta,
+              Code* optimized_code);
   void DeleteFrameDescriptions();
 
   void DoComputeOutputFrames();
@@ -268,6 +295,11 @@
   static Code* FindDeoptimizingCodeFromAddress(Address addr);
   static void RemoveDeoptimizingCode(Code* code);
 
+  // Fill the input from from a JavaScript frame. This is used when
+  // the debugger needs to inspect an optimized frame. For normal
+  // deoptimizations the input frame is filled in generated code.
+  void FillInputFrame(Address tos, JavaScriptFrame* frame);
+
   Isolate* isolate_;
   JSFunction* function_;
   Code* optimized_code_;
@@ -285,10 +317,11 @@
 
   List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
-  static int table_entry_size_;
+  static const int table_entry_size_;
 
   friend class FrameDescription;
   friend class DeoptimizingCodeListNode;
+  friend class DeoptimizedFrameInfo;
 };
 
 
@@ -303,11 +336,18 @@
     return malloc(size + frame_size - kPointerSize);
   }
 
+  void operator delete(void* pointer, uint32_t frame_size) {
+    free(pointer);
+  }
+
   void operator delete(void* description) {
     free(description);
   }
 
-  intptr_t GetFrameSize() const { return frame_size_; }
+  uint32_t GetFrameSize() const {
+    ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+    return static_cast<uint32_t>(frame_size_);
+  }
 
   JSFunction* GetFunction() const { return function_; }
 
@@ -359,6 +399,23 @@
 
   void SetContinuation(intptr_t pc) { continuation_ = pc; }
 
+#ifdef DEBUG
+  Code::Kind GetKind() const { return kind_; }
+  void SetKind(Code::Kind kind) { kind_ = kind; }
+#endif
+
+  // Get the incoming arguments count.
+  int ComputeParametersCount();
+
+  // Get a parameter value for an unoptimized frame.
+  Object* GetParameter(Deoptimizer* deoptimizer, int index);
+
+  // Get the expression stack height for a unoptimized frame.
+  unsigned GetExpressionCount(Deoptimizer* deoptimizer);
+
+  // Get the expression stack value for an unoptimized frame.
+  Object* GetExpression(Deoptimizer* deoptimizer, int index);
+
   static int registers_offset() {
     return OFFSET_OF(FrameDescription, registers_);
   }
@@ -390,6 +447,9 @@
  private:
   static const uint32_t kZapUint32 = 0xbeeddead;
 
+  // Frame_size_ must hold a uint32_t value.  It is only a uintptr_t to
+  // keep the variable-size array frame_content_ of type intptr_t at
+  // the end of the structure aligned.
   uintptr_t frame_size_;  // Number of bytes.
   JSFunction* function_;
   intptr_t registers_[Register::kNumRegisters];
@@ -398,6 +458,9 @@
   intptr_t pc_;
   intptr_t fp_;
   Smi* state_;
+#ifdef DEBUG
+  Code::Kind kind_;
+#endif
 
   // Continuation is the PC where the execution continues after
   // deoptimizing.
@@ -438,9 +501,7 @@
 
   int32_t Next();
 
-  bool HasNext() const { return index_ >= 0; }
-
-  void Done() { index_ = -1; }
+  bool HasNext() const { return index_ < buffer_->length(); }
 
   void Skip(int n) {
     for (int i = 0; i < n; i++) Next();
@@ -494,7 +555,7 @@
 
   static int NumberOfOperandsFor(Opcode opcode);
 
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
   static const char* StringFor(Opcode opcode);
 #endif
 
@@ -596,6 +657,70 @@
 };
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Class used to represent an unoptimized frame when the debugger
+// needs to inspect a frame that is part of an optimized frame. The
+// internally used FrameDescription objects are not GC safe so for use
+// by the debugger frame information is copied to an object of this type.
+class DeoptimizedFrameInfo : public Malloced {
+ public:
+  DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
+  virtual ~DeoptimizedFrameInfo();
+
+  // GC support.
+  void Iterate(ObjectVisitor* v);
+
+  // Return the number of incoming arguments.
+  int parameters_count() { return parameters_count_; }
+
+  // Return the height of the expression stack.
+  int expression_count() { return expression_count_; }
+
+  // Get the frame function.
+  JSFunction* GetFunction() {
+    return function_;
+  }
+
+  // Get an incoming argument.
+  Object* GetParameter(int index) {
+    ASSERT(0 <= index && index < parameters_count());
+    return parameters_[index];
+  }
+
+  // Get an expression from the expression stack.
+  Object* GetExpression(int index) {
+    ASSERT(0 <= index && index < expression_count());
+    return expression_stack_[index];
+  }
+
+ private:
+  // Set the frame function.
+  void SetFunction(JSFunction* function) {
+    function_ = function;
+  }
+
+  // Set an incoming argument.
+  void SetParameter(int index, Object* obj) {
+    ASSERT(0 <= index && index < parameters_count());
+    parameters_[index] = obj;
+  }
+
+  // Set an expression on the expression stack.
+  void SetExpression(int index, Object* obj) {
+    ASSERT(0 <= index && index < expression_count());
+    expression_stack_[index] = obj;
+  }
+
+  JSFunction* function_;
+  int parameters_count_;
+  int expression_count_;
+  Object** parameters_;
+  Object** expression_stack_;
+
+  friend class Deoptimizer;
+};
+#endif
+
 } }  // namespace v8::internal
 
 #endif  // V8_DEOPTIMIZER_H_
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 65e1668..1e67b4c 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -97,14 +97,17 @@
 }
 
 
-static void DumpBuffer(FILE* f, char* buff) {
+static void DumpBuffer(FILE* f, StringBuilder* out) {
   if (f == NULL) {
-    PrintF("%s", buff);
+    PrintF("%s\n", out->Finalize());
   } else {
-    fprintf(f, "%s", buff);
+    fprintf(f, "%s\n", out->Finalize());
   }
+  out->Reset();
 }
 
+
+
 static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
 static const int kRelocInfoPosition = 57;
 
@@ -119,6 +122,7 @@
 
   v8::internal::EmbeddedVector<char, 128> decode_buffer;
   v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
+  StringBuilder out(out_buffer.start(), out_buffer.length());
   byte* pc = begin;
   disasm::Disassembler d(converter);
   RelocIterator* it = NULL;
@@ -181,17 +185,12 @@
       }
     }
 
-    StringBuilder out(out_buffer.start(), out_buffer.length());
-
     // Comments.
     for (int i = 0; i < comments.length(); i++) {
-      out.AddFormatted("                  %s\n", comments[i]);
+      out.AddFormatted("                  %s", comments[i]);
+      DumpBuffer(f, &out);
     }
 
-    // Write out comments, resets outp so that we can format the next line.
-    DumpBuffer(f, out.Finalize());
-    out.Reset();
-
     // Instruction address and instruction offset.
     out.AddFormatted("%p  %4d  ", prev_pc, prev_pc - begin);
 
@@ -209,7 +208,7 @@
         out.AddPadding(' ', kRelocInfoPosition - out.position());
       } else {
         // Additional reloc infos are printed on separate lines.
-        out.AddFormatted("\n");
+        DumpBuffer(f, &out);
         out.AddPadding(' ', kRelocInfoPosition);
       }
 
@@ -224,7 +223,7 @@
         HeapStringAllocator allocator;
         StringStream accumulator(&allocator);
         relocinfo.target_object()->ShortPrint(&accumulator);
-        SmartPointer<const char> obj_name = accumulator.ToCString();
+        SmartArrayPointer<const char> obj_name = accumulator.ToCString();
         out.AddFormatted("    ;; object: %s", *obj_name);
       } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
         const char* reference_name =
@@ -248,9 +247,6 @@
             PropertyType type = code->type();
             out.AddFormatted(", %s", Code::PropertyType2String(type));
           }
-          if (code->ic_in_loop() == IN_LOOP) {
-            out.AddFormatted(", in_loop");
-          }
           if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
             out.AddFormatted(", argc = %d", code->arguments_count());
           }
@@ -282,6 +278,9 @@
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
+        if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+          out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
+        }
       } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
                  Isolate::Current()->deoptimizer_data() != NULL) {
         // A runtime entry reloinfo might be a deoptimization bailout.
@@ -296,9 +295,18 @@
         out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
       }
     }
-    out.AddString("\n");
-    DumpBuffer(f, out.Finalize());
-    out.Reset();
+    DumpBuffer(f, &out);
+  }
+
+  // Emit comments following the last instruction (if any).
+  if (it != NULL) {
+    for ( ; !it->done(); it->next()) {
+      if (RelocInfo::IsComment(it->rinfo()->rmode())) {
+        out.AddFormatted("                  %s",
+                         reinterpret_cast<const char*>(it->rinfo()->data()));
+        DumpBuffer(f, &out);
+      }
+    }
   }
 
   delete it;
diff --git a/src/disassembler.h b/src/disassembler.h
index 68a338d..4a87dca 100644
--- a/src/disassembler.h
+++ b/src/disassembler.h
@@ -28,6 +28,8 @@
 #ifndef V8_DISASSEMBLER_H_
 #define V8_DISASSEMBLER_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/diy-fp.cc b/src/diy-fp.cc
index c54bd1d..4913877 100644
--- a/src/diy-fp.cc
+++ b/src/diy-fp.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,8 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
+#include "../include/v8stdint.h"
+#include "globals.h"
+#include "checks.h"
 #include "diy-fp.h"
 
 namespace v8 {
diff --git a/src/diy-fp.h b/src/diy-fp.h
index cfe05ef..26ff1a2 100644
--- a/src/diy-fp.h
+++ b/src/diy-fp.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -80,7 +80,7 @@
 
     // This method is mainly called for normalizing boundaries. In general
     // boundaries need to be shifted by 10 bits. We thus optimize for this case.
-    const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
+    const uint64_t k10MSBits = static_cast<uint64_t>(0x3FF) << 54;
     while ((f & k10MSBits) == 0) {
       f <<= 10;
       e -= 10;
@@ -106,7 +106,7 @@
   void set_e(int new_value) { e_ = new_value; }
 
  private:
-  static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
+  static const uint64_t kUint64MSB = static_cast<uint64_t>(1) << 63;
 
   uint64_t f_;
   int e_;
diff --git a/src/dtoa.cc b/src/dtoa.cc
index b857a5d..00233a8 100644
--- a/src/dtoa.cc
+++ b/src/dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,10 @@
 
 #include <math.h>
 
-#include "v8.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
+
 #include "dtoa.h"
 
 #include "bignum-dtoa.h"
diff --git a/src/elements.cc b/src/elements.cc
new file mode 100644
index 0000000..0454644
--- /dev/null
+++ b/src/elements.cc
@@ -0,0 +1,635 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "objects.h"
+#include "elements.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+ElementsAccessor** ElementsAccessor::elements_accessors_;
+
+
+bool HasKey(FixedArray* array, Object* key) {
+  int len0 = array->length();
+  for (int i = 0; i < len0; i++) {
+    Object* element = array->get(i);
+    if (element->IsSmi() && element == key) return true;
+    if (element->IsString() &&
+        key->IsString() && String::cast(element)->Equals(String::cast(key))) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// Base class for element handler implementations. Contains the
+// the common logic for objects with different ElementsKinds.
+// Subclasses must specialize method for which the element
+// implementation differs from the base class implementation.
+//
+// This class is intended to be used in the following way:
+//
+//   class SomeElementsAccessor :
+//       public ElementsAccessorBase<SomeElementsAccessor,
+//                                   BackingStoreClass> {
+//     ...
+//   }
+//
+// This is an example of the Curiously Recurring Template Pattern (see
+// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).  We use
+// CRTP to guarantee aggressive compile time optimizations (i.e.  inlining and
+// specialization of SomeElementsAccessor methods).
+template <typename ElementsAccessorSubclass, typename BackingStoreClass>
+class ElementsAccessorBase : public ElementsAccessor {
+ protected:
+  ElementsAccessorBase() { }
+  virtual MaybeObject* Get(FixedArrayBase* backing_store,
+                           uint32_t key,
+                           JSObject* obj,
+                           Object* receiver) {
+    return ElementsAccessorSubclass::Get(
+        BackingStoreClass::cast(backing_store), key, obj, receiver);
+  }
+
+  static MaybeObject* Get(BackingStoreClass* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) {
+      return backing_store->get(key);
+    }
+    return backing_store->GetHeap()->the_hole_value();
+  }
+
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) = 0;
+
+  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
+                                               FixedArray* to,
+                                               JSObject* holder,
+                                               Object* receiver) {
+    int len0 = to->length();
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      for (int i = 0; i < len0; i++) {
+        ASSERT(!to->get(i)->IsTheHole());
+      }
+    }
+#endif
+    BackingStoreClass* backing_store = BackingStoreClass::cast(from);
+    uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store);
+
+    // Optimize if 'other' is empty.
+    // We cannot optimize if 'this' is empty, as other may have holes.
+    if (len1 == 0) return to;
+
+    // Compute how many elements are not in other.
+    int extra = 0;
+    for (uint32_t y = 0; y < len1; y++) {
+      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
+                                                      y,
+                                                      holder,
+                                                      receiver)) {
+        uint32_t key =
+            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+        MaybeObject* maybe_value =
+            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+        Object* value;
+        if (!maybe_value->ToObject(&value)) return maybe_value;
+        ASSERT(!value->IsTheHole());
+        if (!HasKey(to, value)) {
+          extra++;
+        }
+      }
+    }
+
+    if (extra == 0) return to;
+
+    // Allocate the result
+    FixedArray* result;
+    MaybeObject* maybe_obj =
+        backing_store->GetHeap()->AllocateFixedArray(len0 + extra);
+    if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj;
+
+    // Fill in the content
+    {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+      for (int i = 0; i < len0; i++) {
+        Object* e = to->get(i);
+        ASSERT(e->IsString() || e->IsNumber());
+        result->set(i, e, mode);
+      }
+    }
+    // Fill in the extra values.
+    int index = 0;
+    for (uint32_t y = 0; y < len1; y++) {
+      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
+                                                      y,
+                                                      holder,
+                                                      receiver)) {
+        uint32_t key =
+            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+        MaybeObject* maybe_value =
+            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+        Object* value;
+        if (!maybe_value->ToObject(&value)) return maybe_value;
+        if (!value->IsTheHole() && !HasKey(to, value)) {
+          result->set(len0 + index, value);
+          index++;
+        }
+      }
+    }
+    ASSERT(extra == index);
+    return result;
+  }
+
+ protected:
+  static uint32_t GetCapacity(BackingStoreClass* backing_store) {
+    return backing_store->length();
+  }
+
+  virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
+    return ElementsAccessorSubclass::GetCapacity(
+        BackingStoreClass::cast(backing_store));
+  }
+
+  static bool HasElementAtIndex(BackingStoreClass* backing_store,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    uint32_t key =
+        ElementsAccessorSubclass::GetKeyForIndex(backing_store, index);
+    MaybeObject* element = ElementsAccessorSubclass::Get(backing_store,
+                                                         key,
+                                                         holder,
+                                                         receiver);
+    return !element->IsTheHole();
+  }
+
+  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
+                                 uint32_t index,
+                                 JSObject* holder,
+                                 Object* receiver) {
+    return ElementsAccessorSubclass::HasElementAtIndex(
+        BackingStoreClass::cast(backing_store), index, holder, receiver);
+  }
+
+  static uint32_t GetKeyForIndex(BackingStoreClass* backing_store,
+                                 uint32_t index) {
+    return index;
+  }
+
+  virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
+                                              uint32_t index) {
+    return ElementsAccessorSubclass::GetKeyForIndex(
+        BackingStoreClass::cast(backing_store), index);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
+};
+
+
+class FastElementsAccessor
+    : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
+ public:
+  static MaybeObject* DeleteCommon(JSObject* obj,
+                                   uint32_t key) {
+    ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
+    Heap* heap = obj->GetHeap();
+    FixedArray* backing_store = FixedArray::cast(obj->elements());
+    if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
+      backing_store = FixedArray::cast(backing_store->get(1));
+    } else {
+      Object* writable;
+      MaybeObject* maybe = obj->EnsureWritableFastElements();
+      if (!maybe->ToObject(&writable)) return maybe;
+      backing_store = FixedArray::cast(writable);
+    }
+    uint32_t length = static_cast<uint32_t>(
+        obj->IsJSArray()
+        ? Smi::cast(JSArray::cast(obj)->length())->value()
+        : backing_store->length());
+    if (key < length) {
+      backing_store->set_the_hole(key);
+      // If an old space backing store is larger than a certain size and
+      // has too few used values, normalize it.
+      // To avoid doing the check on every delete we require at least
+      // one adjacent hole to the value being deleted.
+      Object* hole = heap->the_hole_value();
+      const int kMinLengthForSparsenessCheck = 64;
+      if (backing_store->length() >= kMinLengthForSparsenessCheck &&
+          !heap->InNewSpace(backing_store) &&
+          ((key > 0 && backing_store->get(key - 1) == hole) ||
+           (key + 1 < length && backing_store->get(key + 1) == hole))) {
+        int num_used = 0;
+        for (int i = 0; i < backing_store->length(); ++i) {
+          if (backing_store->get(i) != hole) ++num_used;
+          // Bail out early if more than 1/4 is used.
+          if (4 * num_used > backing_store->length()) break;
+        }
+        if (4 * num_used <= backing_store->length()) {
+          MaybeObject* result = obj->NormalizeElements();
+          if (result->IsFailure()) return result;
+        }
+      }
+    }
+    return heap->true_value();
+  }
+
+ protected:
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) {
+    return DeleteCommon(obj, key);
+  }
+};
+
+
+class FastDoubleElementsAccessor
+    : public ElementsAccessorBase<FastDoubleElementsAccessor,
+                                  FixedDoubleArray> {
+ protected:
+  friend class ElementsAccessorBase<FastDoubleElementsAccessor,
+                                    FixedDoubleArray>;
+
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) {
+    int length = obj->IsJSArray()
+        ? Smi::cast(JSArray::cast(obj)->length())->value()
+        : FixedDoubleArray::cast(obj->elements())->length();
+    if (key < static_cast<uint32_t>(length)) {
+      FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
+    }
+    return obj->GetHeap()->true_value();
+  }
+
+  static bool HasElementAtIndex(FixedDoubleArray* backing_store,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    return !backing_store->is_the_hole(index);
+  }
+};
+
+
+// Super class for all external element arrays.
+template<typename ExternalElementsAccessorSubclass,
+         typename ExternalArray>
+class ExternalElementsAccessor
+    : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
+                                  ExternalArray> {
+ protected:
+  friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
+                                    ExternalArray>;
+
+  static MaybeObject* Get(ExternalArray* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) {
+      return backing_store->get(key);
+    } else {
+      return backing_store->GetHeap()->undefined_value();
+    }
+  }
+
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) {
+    // External arrays always ignore deletes.
+    return obj->GetHeap()->true_value();
+  }
+};
+
+
+class ExternalByteElementsAccessor
+    : public ExternalElementsAccessor<ExternalByteElementsAccessor,
+                                      ExternalByteArray> {
+};
+
+
+class ExternalUnsignedByteElementsAccessor
+    : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
+                                      ExternalUnsignedByteArray> {
+};
+
+
+class ExternalShortElementsAccessor
+    : public ExternalElementsAccessor<ExternalShortElementsAccessor,
+                                      ExternalShortArray> {
+};
+
+
+class ExternalUnsignedShortElementsAccessor
+    : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
+                                      ExternalUnsignedShortArray> {
+};
+
+
+class ExternalIntElementsAccessor
+    : public ExternalElementsAccessor<ExternalIntElementsAccessor,
+                                      ExternalIntArray> {
+};
+
+
+class ExternalUnsignedIntElementsAccessor
+    : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
+                                      ExternalUnsignedIntArray> {
+};
+
+
+class ExternalFloatElementsAccessor
+    : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
+                                      ExternalFloatArray> {
+};
+
+
+class ExternalDoubleElementsAccessor
+    : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
+                                      ExternalDoubleArray> {
+};
+
+
+class PixelElementsAccessor
+    : public ExternalElementsAccessor<PixelElementsAccessor,
+                                      ExternalPixelArray> {
+};
+
+
+class DictionaryElementsAccessor
+    : public ElementsAccessorBase<DictionaryElementsAccessor,
+                                  SeededNumberDictionary> {
+ public:
+  static MaybeObject* DeleteCommon(JSObject* obj,
+                                   uint32_t key,
+                                   JSReceiver::DeleteMode mode) {
+    Isolate* isolate = obj->GetIsolate();
+    Heap* heap = isolate->heap();
+    FixedArray* backing_store = FixedArray::cast(obj->elements());
+    bool is_arguments =
+        (obj->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS);
+    if (is_arguments) {
+      backing_store = FixedArray::cast(backing_store->get(1));
+    }
+    SeededNumberDictionary* dictionary =
+        SeededNumberDictionary::cast(backing_store);
+    int entry = dictionary->FindEntry(key);
+    if (entry != SeededNumberDictionary::kNotFound) {
+      Object* result = dictionary->DeleteProperty(entry, mode);
+      if (result == heap->true_value()) {
+        MaybeObject* maybe_elements = dictionary->Shrink(key);
+        FixedArray* new_elements = NULL;
+        if (!maybe_elements->To(&new_elements)) {
+          return maybe_elements;
+        }
+        if (is_arguments) {
+          FixedArray::cast(obj->elements())->set(1, new_elements);
+        } else {
+          obj->set_elements(new_elements);
+        }
+      }
+      if (mode == JSObject::STRICT_DELETION &&
+          result == heap->false_value()) {
+        // In strict mode, attempting to delete a non-configurable property
+        // throws an exception.
+        HandleScope scope(isolate);
+        Handle<Object> holder(obj);
+        Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
+        Handle<Object> args[2] = { name, holder };
+        Handle<Object> error =
+            isolate->factory()->NewTypeError("strict_delete_property",
+                                             HandleVector(args, 2));
+        return isolate->Throw(*error);
+      }
+    }
+    return heap->true_value();
+  }
+
+ protected:
+  friend class ElementsAccessorBase<DictionaryElementsAccessor,
+                                    SeededNumberDictionary>;
+
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) {
+    return DeleteCommon(obj, key, mode);
+  }
+
+  static MaybeObject* Get(SeededNumberDictionary* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    int entry = backing_store->FindEntry(key);
+    if (entry != SeededNumberDictionary::kNotFound) {
+      Object* element = backing_store->ValueAt(entry);
+      PropertyDetails details = backing_store->DetailsAt(entry);
+      if (details.type() == CALLBACKS) {
+        return obj->GetElementWithCallback(receiver,
+                                           element,
+                                           key,
+                                           obj);
+      } else {
+        return element;
+      }
+    }
+    return obj->GetHeap()->the_hole_value();
+  }
+
+  static uint32_t GetKeyForIndex(SeededNumberDictionary* dict,
+                                 uint32_t index) {
+    Object* key = dict->KeyAt(index);
+    return Smi::cast(key)->value();
+  }
+};
+
+
+class NonStrictArgumentsElementsAccessor
+    : public ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
+                                  FixedArray> {
+ protected:
+  friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
+                                    FixedArray>;
+
+  static MaybeObject* Get(FixedArray* parameter_map,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    Object* probe = GetParameterMapArg(parameter_map, key);
+    if (!probe->IsTheHole()) {
+      Context* context = Context::cast(parameter_map->get(0));
+      int context_index = Smi::cast(probe)->value();
+      ASSERT(!context->get(context_index)->IsTheHole());
+      return context->get(context_index);
+    } else {
+      // Object is not mapped, defer to the arguments.
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      return ElementsAccessor::ForArray(arguments)->Get(arguments,
+                                                        key,
+                                                        obj,
+                                                        receiver);
+    }
+  }
+
+  virtual MaybeObject* Delete(JSObject* obj,
+                              uint32_t key
+                              ,
+                              JSReceiver::DeleteMode mode) {
+    FixedArray* parameter_map = FixedArray::cast(obj->elements());
+    Object* probe = GetParameterMapArg(parameter_map, key);
+    if (!probe->IsTheHole()) {
+      // TODO(kmillikin): We could check if this was the last aliased
+      // parameter, and revert to normal elements in that case.  That
+      // would enable GC of the context.
+      parameter_map->set_the_hole(key + 2);
+    } else {
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
+      } else {
+        return FastElementsAccessor::DeleteCommon(obj, key);
+      }
+    }
+    return obj->GetHeap()->true_value();
+  }
+
+  static uint32_t GetCapacity(FixedArray* parameter_map) {
+    FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+    return Max(static_cast<uint32_t>(parameter_map->length() - 2),
+               ForArray(arguments)->GetCapacity(arguments));
+  }
+
+  static uint32_t GetKeyForIndex(FixedArray* dict,
+                                 uint32_t index) {
+    return index;
+  }
+
+  static bool HasElementAtIndex(FixedArray* parameter_map,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    Object* probe = GetParameterMapArg(parameter_map, index);
+    if (!probe->IsTheHole()) {
+      return true;
+    } else {
+      FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+      ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
+      return !accessor->Get(arguments, index, holder, receiver)->IsTheHole();
+    }
+  }
+
+ private:
+  static Object* GetParameterMapArg(FixedArray* parameter_map,
+                                    uint32_t key) {
+    uint32_t length = parameter_map->length();
+    return key < (length - 2 )
+        ? parameter_map->get(key + 2)
+        : parameter_map->GetHeap()->the_hole_value();
+  }
+};
+
+
+ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
+  switch (array->map()->instance_type()) {
+    case FIXED_ARRAY_TYPE:
+      if (array->IsDictionary()) {
+        return elements_accessors_[DICTIONARY_ELEMENTS];
+      } else {
+        return elements_accessors_[FAST_ELEMENTS];
+      }
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_UNSIGNED_BYTE_ELEMENTS];
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_SHORT_ELEMENTS];
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_UNSIGNED_SHORT_ELEMENTS];
+    case EXTERNAL_INT_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_INT_ELEMENTS];
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_UNSIGNED_INT_ELEMENTS];
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_FLOAT_ELEMENTS];
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_DOUBLE_ELEMENTS];
+    case EXTERNAL_PIXEL_ARRAY_TYPE:
+      return elements_accessors_[EXTERNAL_PIXEL_ELEMENTS];
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+void ElementsAccessor::InitializeOncePerProcess() {
+  static struct ConcreteElementsAccessors {
+    FastElementsAccessor fast_elements_handler;
+    FastDoubleElementsAccessor fast_double_elements_handler;
+    DictionaryElementsAccessor dictionary_elements_handler;
+    NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
+    ExternalByteElementsAccessor byte_elements_handler;
+    ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
+    ExternalShortElementsAccessor short_elements_handler;
+    ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
+    ExternalIntElementsAccessor int_elements_handler;
+    ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
+    ExternalFloatElementsAccessor float_elements_handler;
+    ExternalDoubleElementsAccessor double_elements_handler;
+    PixelElementsAccessor pixel_elements_handler;
+  } element_accessors;
+
+  static ElementsAccessor* accessor_array[] = {
+    &element_accessors.fast_elements_handler,
+    &element_accessors.fast_double_elements_handler,
+    &element_accessors.dictionary_elements_handler,
+    &element_accessors.non_strict_arguments_elements_handler,
+    &element_accessors.byte_elements_handler,
+    &element_accessors.unsigned_byte_elements_handler,
+    &element_accessors.short_elements_handler,
+    &element_accessors.unsigned_short_elements_handler,
+    &element_accessors.int_elements_handler,
+    &element_accessors.unsigned_int_elements_handler,
+    &element_accessors.float_elements_handler,
+    &element_accessors.double_elements_handler,
+    &element_accessors.pixel_elements_handler
+  };
+
+  elements_accessors_ = accessor_array;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/elements.h b/src/elements.h
new file mode 100644
index 0000000..851c8c3
--- /dev/null
+++ b/src/elements.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ELEMENTS_H_
+#define V8_ELEMENTS_H_
+
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Abstract base class for handles that can operate on objects with differing
+// ElementsKinds.
+class ElementsAccessor {
+ public:
+  ElementsAccessor() { }
+  virtual ~ElementsAccessor() { }
+  virtual MaybeObject* Get(FixedArrayBase* backing_store,
+                           uint32_t key,
+                           JSObject* holder,
+                           Object* receiver) = 0;
+
+  virtual MaybeObject* Delete(JSObject* holder,
+                              uint32_t key,
+                              JSReceiver::DeleteMode mode) = 0;
+
+  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
+                                               FixedArray* to,
+                                               JSObject* holder,
+                                               Object* receiver) = 0;
+
+  // Returns a shared ElementsAccessor for the specified ElementsKind.
+  static ElementsAccessor* ForKind(ElementsKind elements_kind) {
+    ASSERT(elements_kind < kElementsKindCount);
+    return elements_accessors_[elements_kind];
+  }
+
+  static ElementsAccessor* ForArray(FixedArrayBase* array);
+
+  static void InitializeOncePerProcess();
+
+ protected:
+  friend class NonStrictArgumentsElementsAccessor;
+
+  virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
+
+  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
+                                 uint32_t index,
+                                 JSObject* holder,
+                                 Object* receiver) = 0;
+
+  // Element handlers distinguish between indexes and keys when the manipulate
+  // elements.  Indexes refer to elements in terms of their location in the
+  // underlying storage's backing store representation, and are between 0
+  // GetCapacity.  Keys refer to elements in terms of the value that would be
+  // specific in JavaScript to access the element. In most implementations, keys
+  // are equivalent to indexes, and GetKeyForIndex returns the same value it is
+  // passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps the
+  // index to a key using the KeyAt method on the NumberDictionary.
+  virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
+                                  uint32_t index) = 0;
+
+ private:
+  static ElementsAccessor** elements_accessors_;
+
+  DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ELEMENTS_H_
diff --git a/src/execution.cc b/src/execution.cc
index 7a2bbc6..f36d4e4 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -145,11 +145,33 @@
 }
 
 
-Handle<Object> Execution::Call(Handle<JSFunction> func,
+Handle<Object> Execution::Call(Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
                                Object*** args,
-                               bool* pending_exception) {
+                               bool* pending_exception,
+                               bool convert_receiver) {
+  if (!callable->IsJSFunction()) {
+    callable = TryGetFunctionDelegate(callable, pending_exception);
+    if (*pending_exception) return callable;
+  }
+  Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
+
+  // In non-strict mode, convert receiver.
+  if (convert_receiver && !receiver->IsJSReceiver() &&
+      !func->shared()->native() && !func->shared()->strict_mode()) {
+    if (receiver->IsUndefined() || receiver->IsNull()) {
+      Object* global = func->context()->global()->global_receiver();
+      // Under some circumstances, 'global' can be the JSBuiltinsObject
+      // In that case, don't rewrite.
+      // (FWIW, the same holds for GetIsolate()->global()->global_receiver().)
+      if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
+    } else {
+      receiver = ToObject(receiver, pending_exception);
+    }
+    if (*pending_exception) return callable;
+  }
+
   return Invoke(false, func, receiver, argc, args, pending_exception);
 }
 
@@ -205,27 +227,17 @@
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a function.
 
-  // Regular expressions can be called as functions in both Firefox
-  // and Safari so we allow it too.
-  if (object->IsJSRegExp()) {
-    Handle<String> exec = factory->exec_symbol();
-    // TODO(lrn): Bug 617.  We should use the default function here, not the
-    // one on the RegExp object.
-    Object* exec_function;
-    { MaybeObject* maybe_exec_function = object->GetProperty(*exec);
-      // This can lose an exception, but the alternative is to put a failure
-      // object in a handle, which is not GC safe.
-      if (!maybe_exec_function->ToObject(&exec_function)) {
-        return factory->undefined_value();
-      }
-    }
-    return Handle<Object>(exec_function);
+  // If object is a function proxy, get its handler. Iterate if necessary.
+  Object* fun = *object;
+  while (fun->IsJSFunctionProxy()) {
+    fun = JSFunctionProxy::cast(fun)->call_trap();
   }
+  if (fun->IsJSFunction()) return Handle<Object>(fun);
 
   // Objects created through the API can have an instance-call handler
   // that should be used when calling the object as a function.
-  if (object->IsHeapObject() &&
-      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+  if (fun->IsHeapObject() &&
+      HeapObject::cast(fun)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
         isolate->global_context()->call_as_function_delegate());
   }
@@ -234,6 +246,37 @@
 }
 
 
+Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+                                                 bool* has_pending_exception) {
+  ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+
+  // If object is a function proxy, get its handler. Iterate if necessary.
+  Object* fun = *object;
+  while (fun->IsJSFunctionProxy()) {
+    fun = JSFunctionProxy::cast(fun)->call_trap();
+  }
+  if (fun->IsJSFunction()) return Handle<Object>(fun);
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (fun->IsHeapObject() &&
+      HeapObject::cast(fun)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        isolate->global_context()->call_as_function_delegate());
+  }
+
+  // If the Object doesn't have an instance-call handler we should
+  // throw a non-callable exception.
+  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+  isolate->Throw(*error_obj);
+  *has_pending_exception = true;
+
+  return isolate->factory()->undefined_value();
+}
+
+
 Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
   Isolate* isolate = Isolate::Current();
@@ -241,10 +284,17 @@
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a constructor.
 
+  // If object is a function proxies, get its handler. Iterate if necessary.
+  Object* fun = *object;
+  while (fun->IsJSFunctionProxy()) {
+    fun = JSFunctionProxy::cast(fun)->call_trap();
+  }
+  if (fun->IsJSFunction()) return Handle<Object>(fun);
+
   // Objects created through the API can have an instance-call handler
   // that should be used when calling the object as a function.
-  if (object->IsHeapObject() &&
-      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+  if (fun->IsHeapObject() &&
+      HeapObject::cast(fun)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
         isolate->global_context()->call_as_constructor_delegate());
   }
@@ -253,6 +303,41 @@
 }
 
 
+Handle<Object> Execution::TryGetConstructorDelegate(
+    Handle<Object> object,
+    bool* has_pending_exception) {
+  ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+
+  // If you return a function from here, it will be called when an
+  // attempt is made to call the given object as a constructor.
+
+  // If object is a function proxies, get its handler. Iterate if necessary.
+  Object* fun = *object;
+  while (fun->IsJSFunctionProxy()) {
+    fun = JSFunctionProxy::cast(fun)->call_trap();
+  }
+  if (fun->IsJSFunction()) return Handle<Object>(fun);
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (fun->IsHeapObject() &&
+      HeapObject::cast(fun)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        isolate->global_context()->call_as_constructor_delegate());
+  }
+
+  // If the Object doesn't have an instance-call handler we should
+  // throw a non-callable exception.
+  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+  isolate->Throw(*error_obj);
+  *has_pending_exception = true;
+
+  return isolate->factory()->undefined_value();
+}
+
+
 bool StackGuard::IsStackOverflow() {
   ExecutionAccess access(isolate_);
   return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -272,7 +357,7 @@
   ExecutionAccess access(isolate_);
   // If the current limits are special (eg due to a pending interrupt) then
   // leave them alone.
-  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
+  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
   if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
     thread_local_.jslimit_ = jslimit;
   }
@@ -412,8 +497,9 @@
 
 
 void StackGuard::FreeThreadResources() {
-  Isolate::CurrentPerIsolateThreadData()->set_stack_limit(
-      thread_local_.real_climit_);
+  Isolate::PerIsolateThreadData* per_thread =
+      isolate_->FindOrAllocatePerThreadDataForThisThread();
+  per_thread->set_stack_limit(thread_local_.real_climit_);
 }
 
 
@@ -428,7 +514,7 @@
 }
 
 
-bool StackGuard::ThreadLocal::Initialize() {
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
   bool should_set_stack_limits = false;
   if (real_climit_ == kIllegalLimit) {
     // Takes the address of the limit variable in order to find out where
@@ -436,8 +522,8 @@
     const uintptr_t kLimitSize = FLAG_stack_size * KB;
     uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
     ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
-    real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
-    jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+    jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
     real_climit_ = limit;
     climit_ = limit;
     should_set_stack_limits = true;
@@ -456,12 +542,13 @@
 
 
 void StackGuard::InitThread(const ExecutionAccess& lock) {
-  if (thread_local_.Initialize()) isolate_->heap()->SetStackLimits();
-  uintptr_t stored_limit =
-      Isolate::CurrentPerIsolateThreadData()->stack_limit();
+  if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+  Isolate::PerIsolateThreadData* per_thread =
+      isolate_->FindOrAllocatePerThreadDataForThisThread();
+  uintptr_t stored_limit = per_thread->stack_limit();
   // You should hold the ExecutionAccess lock when you call this.
   if (stored_limit != 0) {
-    StackGuard::SetStackLimit(stored_limit);
+    SetStackLimit(stored_limit);
   }
 }
 
@@ -511,7 +598,7 @@
 
 
 Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
-  if (obj->IsJSObject()) return obj;
+  if (obj->IsSpecObject()) return obj;
   RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
 }
 
@@ -681,13 +768,13 @@
     isolate->debug()->PreemptionWhileInDebugger();
   } else {
     // Perform preemption.
-    v8::Unlocker unlocker;
+    v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
     Thread::YieldCPU();
   }
 #else
   { // NOLINT
     // Perform preemption.
-    v8::Unlocker unlocker;
+    v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
     Thread::YieldCPU();
   }
 #endif
diff --git a/src/execution.h b/src/execution.h
index d4b80d2..5cd7141 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -28,6 +28,8 @@
 #ifndef V8_EXECUTION_H_
 #define V8_EXECUTION_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -51,11 +53,16 @@
   // *pending_exception tells whether the invoke resulted in
   // a pending exception.
   //
-  static Handle<Object> Call(Handle<JSFunction> func,
+  // When convert_receiver is set, and the receiver is not an object,
+  // and the function called is not in strict mode, receiver is converted to
+  // an object.
+  //
+  static Handle<Object> Call(Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
                              Object*** args,
-                             bool* pending_exception);
+                             bool* pending_exception,
+                             bool convert_receiver = false);
 
   // Construct object from function, the caller supplies an array of
   // arguments. Arguments are Object* type. After function returns,
@@ -138,10 +145,14 @@
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as functions.
   static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+  static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+                                               bool* has_pending_exception);
 
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as constructors.
   static Handle<Object> GetConstructorDelegate(Handle<Object> object);
+  static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+                                                  bool* has_pending_exception);
 };
 
 
@@ -252,7 +263,7 @@
     void Clear();
 
     // Returns true if the heap's stack limits should be set, false if not.
-    bool Initialize();
+    bool Initialize(Isolate* isolate);
 
     // The stack limit is split into a JavaScript and a C++ stack limit. These
     // two are the same except when running on a simulator where the C++ and
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
index e8baea7..e695a3e 100644
--- a/src/extensions/experimental/break-iterator.cc
+++ b/src/extensions/experimental/break-iterator.cc
@@ -25,7 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "break-iterator.h"
+#include "src/extensions/experimental/break-iterator.h"
+
+#include <string.h>
 
 #include "unicode/brkiter.h"
 #include "unicode/locid.h"
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
index fac1ed8..73b9bbd 100644
--- a/src/extensions/experimental/break-iterator.h
+++ b/src/extensions/experimental/break-iterator.h
@@ -28,7 +28,7 @@
 #ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
 #define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
 
-#include <v8.h>
+#include "include/v8.h"
 
 #include "unicode/uversion.h"
 
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
new file mode 100644
index 0000000..5cf2192
--- /dev/null
+++ b/src/extensions/experimental/collator.cc
@@ -0,0 +1,222 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/collator.h"
+
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+  if (collator_template_->HasInstance(obj)) {
+    return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a collator.
+  delete UnpackCollator(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("Collator method called on an object "
+                      "that is not a Collator.")));
+}
+
+// Extract a boolean option named in |option| and set it to |result|.
+// Return true if it's specified. Otherwise, return false.
+static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
+                                 const char* option,
+                                 bool* result) {
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull()) {
+    if (value->IsBoolean()) {
+      *result = value->BooleanValue();
+      return true;
+    }
+  }
+  return false;
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+  return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Two string arguments are required.")));
+  }
+
+  icu::Collator* collator = UnpackCollator(args.Holder());
+  if (!collator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  v8::String::Value string_value1(args[0]);
+  v8::String::Value string_value2(args[1]);
+  const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+  const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+  UErrorCode status = U_ZERO_ERROR;
+  UCollationResult result = collator->compare(
+      string1, string_value1.length(), string2, string_value2.length(), status);
+
+  if (U_FAILURE(status)) {
+    return ThrowExceptionForICUError(
+        "Unexpected failure in Collator.compare.");
+  }
+
+  return v8::Int32::New(result);
+}
+
+v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and collation options are required.")));
+  }
+
+  v8::String::AsciiValue locale(args[0]);
+  icu::Locale icu_locale(*locale);
+
+  icu::Collator* collator = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  collator = icu::Collator::createInstance(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete collator;
+    return ThrowExceptionForICUError("Failed to create collator.");
+  }
+
+  v8::Local<v8::Object> options(args[1]->ToObject());
+
+  // Below, we change collation options that are explicitly specified
+  // by a caller in JavaScript. Otherwise, we don't touch because
+  // we don't want to change the locale-dependent default value.
+  // The three options below are very likely to have the same default
+  // across locales, but I haven't checked them all. Others we may add
+  // in the future have certainly locale-dependent default (e.g.
+  // caseFirst is upperFirst for Danish while is off for most other locales).
+
+  bool ignore_case, ignore_accents, numeric;
+
+  if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+    // We need to explicitly set the level to secondary to get case ignored.
+    // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
+    if (ignore_case) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    }
+    collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
+                           status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set ignoreCase.");
+    }
+  }
+
+  // Accents are taken into account with strength secondary or higher.
+  if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
+    if (!ignore_accents) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    } else {
+      collator->setStrength(icu::Collator::PRIMARY);
+    }
+  }
+
+  if (ExtractBooleanOption(options, "numeric", &numeric)) {
+    collator->setAttribute(UCOL_NUMERIC_COLLATION,
+                           numeric ? UCOL_ON : UCOL_OFF, status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set numeric sort option.");
+    }
+  }
+
+  if (collator_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+    raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal fields for icu collator.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("compare"),
+               v8::FunctionTemplate::New(CollatorCompare));
+
+    collator_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      collator_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set collator as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, collator);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteCollator);
+
+  return wrapper;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
new file mode 100644
index 0000000..ca7e4dc
--- /dev/null
+++ b/src/extensions/experimental/collator.h
@@ -0,0 +1,68 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
+#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class Collator {
+ public:
+  static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks collator object from corresponding JavaScript object.
+  static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the Collator once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
+
+  // Compare two strings and returns -1, 0 and 1 depending on
+  // whether string1 is smaller than, equal to or larger than string2.
+  static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
+
+ private:
+  Collator() {}
+
+  static v8::Persistent<v8::FunctionTemplate> collator_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
diff --git a/src/extensions/experimental/datetime-format.cc b/src/extensions/experimental/datetime-format.cc
new file mode 100644
index 0000000..94a29ac
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.cc
@@ -0,0 +1,384 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/datetime-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/smpdtfmt.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
+
+static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
+                                             v8::Handle<v8::Object>);
+static v8::Handle<v8::Value> GetSymbols(
+    const v8::Arguments&,
+    const icu::UnicodeString*, int32_t,
+    const icu::UnicodeString*, int32_t,
+    const icu::UnicodeString*, int32_t);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
+
+icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
+    v8::Handle<v8::Object> obj) {
+  if (datetime_format_template_->HasInstance(obj)) {
+    return static_cast<icu::SimpleDateFormat*>(
+        obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+                                          void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a date time formatter.
+  delete UnpackDateTimeFormat(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  double millis = 0.0;
+  if (args.Length() != 1 || !args[0]->IsDate()) {
+    // Create a new date.
+    v8::TryCatch try_catch;
+    v8::Local<v8::Script> date_script =
+        v8::Script::Compile(v8::String::New("eval('new Date()')"));
+    millis = date_script->Run()->NumberValue();
+    if (try_catch.HasCaught()) {
+      return try_catch.ReThrow();
+    }
+  } else {
+    millis = v8::Date::Cast(*args[0])->NumberValue();
+  }
+
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  icu::UnicodeString result;
+  date_format->format(millis, result);
+
+  return v8::String::New(
+      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getMonths(
+      narrow_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::NARROW);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getMonths(
+      abbrev_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::ABBREVIATED);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getMonths(
+      wide_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::WIDE);
+
+  return GetSymbols(
+      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getWeekdays(
+      narrow_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::NARROW);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getWeekdays(
+      abbrev_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::ABBREVIATED);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getWeekdays(
+      wide_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::WIDE);
+
+  // getXXXWeekdays always returns 8 elements - ICU stable API.
+  // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
+  if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
+    return v8::ThrowException(v8::Exception::Error(
+        v8::String::New("Failed to get weekday information.")));
+  }
+
+  // ICU documentation says we should ignore element 0 of the returned array.
+  return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
+                    abbrev_count -1 , wide + 1, wide_count - 1);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
+
+  return GetSymbols(
+      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  // In this case narrow == abbreviated == wide
+  int32_t count;
+  const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
+
+  return GetSymbols(args, wide, count, wide, count, wide, count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
+    const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and date/time options are required.")));
+  }
+
+  icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
+      CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
+
+  if (datetime_format_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+    raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal field for icu date time formatter.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("format"),
+               v8::FunctionTemplate::New(Format));
+    proto->Set(v8::String::New("getMonths"),
+               v8::FunctionTemplate::New(GetMonths));
+    proto->Set(v8::String::New("getWeekdays"),
+               v8::FunctionTemplate::New(GetWeekdays));
+    proto->Set(v8::String::New("getEras"),
+               v8::FunctionTemplate::New(GetEras));
+    proto->Set(v8::String::New("getAmPm"),
+               v8::FunctionTemplate::New(GetAmPm));
+
+    datetime_format_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      datetime_format_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set date time formatter as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, date_format);
+
+  // Set resolved pattern in options.pattern.
+  icu::UnicodeString pattern;
+  date_format->toPattern(pattern);
+  v8::Local<v8::Object> options = v8::Object::New();
+  options->Set(v8::String::New("pattern"),
+               v8::String::New(reinterpret_cast<const uint16_t*>(
+                   pattern.getBuffer()), pattern.length()));
+  wrapper->Set(v8::String::New("options"), options);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
+
+  return wrapper;
+}
+
+// Returns SimpleDateFormat.
+static icu::DateFormat* CreateDateTimeFormat(
+    v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
+  v8::HandleScope handle_scope;
+
+  v8::String::AsciiValue ascii_locale(locale);
+  icu::Locale icu_locale(*ascii_locale);
+
+  // Make formatter from skeleton.
+  icu::SimpleDateFormat* date_format = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  icu::UnicodeString skeleton;
+  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
+    v8::Local<icu::DateTimePatternGenerator> generator(
+        icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+    icu::UnicodeString pattern =
+        generator->getBestPattern(skeleton, status);
+
+    date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+    if (U_SUCCESS(status)) {
+      return date_format;
+    } else {
+      delete date_format;
+    }
+  }
+
+  // Extract date style and time style from settings.
+  icu::UnicodeString date_style;
+  icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
+  if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
+    icu_date_style = GetDateTimeStyle(date_style);
+  }
+
+  icu::UnicodeString time_style;
+  icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
+  if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
+    icu_time_style = GetDateTimeStyle(time_style);
+  }
+
+  // Try all combinations of date/time styles.
+  if (icu_date_style == icu::DateFormat::kNone &&
+      icu_time_style == icu::DateFormat::kNone) {
+    // Return default short date, short
+    return icu::DateFormat::createDateTimeInstance(
+        icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
+  } else if (icu_date_style != icu::DateFormat::kNone &&
+             icu_time_style != icu::DateFormat::kNone) {
+    return icu::DateFormat::createDateTimeInstance(
+        icu_date_style, icu_time_style, icu_locale);
+  } else if (icu_date_style != icu::DateFormat::kNone) {
+    return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
+  } else {
+    // icu_time_style != icu::DateFormat::kNone
+    return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
+  }
+}
+
+// Creates a v8::Array of narrow, abbrev or wide symbols.
+static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
+                                        const icu::UnicodeString* narrow,
+                                        int32_t narrow_count,
+                                        const icu::UnicodeString* abbrev,
+                                        int32_t abbrev_count,
+                                        const icu::UnicodeString* wide,
+                                        int32_t wide_count) {
+  v8::HandleScope handle_scope;
+
+  // Make wide width default.
+  const icu::UnicodeString* result = wide;
+  int32_t count = wide_count;
+
+  if (args.Length() == 1 && args[0]->IsString()) {
+    v8::String::AsciiValue ascii_value(args[0]);
+    if (strcmp(*ascii_value, "abbreviated") == 0) {
+      result = abbrev;
+      count = abbrev_count;
+    } else if (strcmp(*ascii_value, "narrow") == 0) {
+      result = narrow;
+      count = narrow_count;
+    }
+  }
+
+  v8::Handle<v8::Array> symbols = v8::Array::New();
+  for (int32_t i = 0; i < count; ++i) {
+    symbols->Set(i, v8::String::New(
+        reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
+        result[i].length()));
+  }
+
+  return handle_scope.Close(symbols);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("DateTimeFormat method called on an object "
+                      "that is not a DateTimeFormat.")));
+}
+
+// Returns icu date/time style.
+static icu::DateFormat::EStyle GetDateTimeStyle(
+    const icu::UnicodeString& type) {
+  if (type == UNICODE_STRING_SIMPLE("medium")) {
+    return icu::DateFormat::kMedium;
+  } else if (type == UNICODE_STRING_SIMPLE("long")) {
+    return icu::DateFormat::kLong;
+  } else if (type == UNICODE_STRING_SIMPLE("full")) {
+    return icu::DateFormat::kFull;
+  }
+
+  return icu::DateFormat::kShort;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/datetime-format.h b/src/extensions/experimental/datetime-format.h
new file mode 100644
index 0000000..a6a228c
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class DateTimeFormat {
+ public:
+  static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks date format object from corresponding JavaScript object.
+  static icu::SimpleDateFormat* UnpackDateTimeFormat(
+      v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the DateFormat once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+                                   void* param);
+
+  // Formats date and returns corresponding string.
+  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+  // All date time symbol methods below return stand-alone names in
+  // either narrow, abbreviated or wide width.
+
+  // Get list of months.
+  static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
+
+  // Get list of weekdays.
+  static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
+
+  // Get list of eras.
+  static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
+
+  // Get list of day periods.
+  static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
+
+ private:
+  DateTimeFormat();
+
+  static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
index a8585fd..24fb683 100644
--- a/src/extensions/experimental/experimental.gyp
+++ b/src/extensions/experimental/experimental.gyp
@@ -39,48 +39,66 @@
       'sources': [
         'break-iterator.cc',
         'break-iterator.h',
+        'collator.cc',
+        'collator.h',
+        'datetime-format.cc',
+        'datetime-format.h',
         'i18n-extension.cc',
         'i18n-extension.h',
-	'<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+        'i18n-locale.cc',
+        'i18n-locale.h',
+        'i18n-natives.h',
+        'i18n-utils.cc',
+        'i18n-utils.h',
+        'language-matcher.cc',
+        'language-matcher.h',
+        'number-format.cc',
+        'number-format.h',
+        '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
       ],
       'include_dirs': [
         '<(icu_src_dir)/public/common',
-        '../..',
+        # v8/ is root for all includes.
+        '../../..'
       ],
       'dependencies': [
         '<(icu_src_dir)/icu.gyp:*',
-	'js2c_i18n#host',
+        'js2c_i18n#host',
         '../../../tools/gyp/v8.gyp:v8',
       ],
+      'direct_dependent_settings': {
+        # Adds -Iv8 for embedders.
+        'include_dirs': [
+          '../../..'
+        ],
+      },
     },
     {
       'target_name': 'js2c_i18n',
       'type': 'none',
       'toolsets': ['host'],
       'variables': {
-        'library_files': [
-	  'i18n.js'
-	],
+        'js_files': [
+          'i18n.js'
+        ],
       },
       'actions': [
         {
-	  'action_name': 'js2c_i18n',
-	  'inputs': [
-	    '../../../tools/js2c.py',
-	    '<@(library_files)',
-	  ],
-	  'outputs': [
-	    '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
-	    '<(SHARED_INTERMEDIATE_DIR)/i18n-js-empty.cc'
-	  ],
-	  'action': [
-	    'python',
-	    '../../../tools/js2c.py',
-	    '<@(_outputs)',
-	    'I18N',
-	    '<@(library_files)'
-	  ],
-	},
+          'action_name': 'js2c_i18n',
+          'inputs': [
+            'i18n-js2c.py',
+            '<@(js_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+          ],
+          'action': [
+            'python',
+            'i18n-js2c.py',
+            '<@(_outputs)',
+            '<@(js_files)'
+          ],
+        },
       ],
     },
   ],  # targets
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index 6e3ab15..c5afcf0 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,198 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "i18n-extension.h"
+#include "src/extensions/experimental/i18n-extension.h"
 
-#include <algorithm>
-#include <string>
-
-#include "break-iterator.h"
-#include "natives.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
+#include "src/extensions/experimental/break-iterator.h"
+#include "src/extensions/experimental/collator.h"
+#include "src/extensions/experimental/datetime-format.h"
+#include "src/extensions/experimental/i18n-locale.h"
+#include "src/extensions/experimental/i18n-natives.h"
+#include "src/extensions/experimental/number-format.h"
 
 namespace v8 {
 namespace internal {
 
 I18NExtension* I18NExtension::extension_ = NULL;
 
-// Returns a pointer to static string containing the actual
-// JavaScript code generated from i18n.js file.
-static const char* GetScriptSource() {
-  int index = NativesCollection<I18N>::GetIndex("i18n");
-  Vector<const char> script_data =
-      NativesCollection<I18N>::GetScriptSource(index);
-
-  return script_data.start();
-}
-
 I18NExtension::I18NExtension()
-    : v8::Extension("v8/i18n", GetScriptSource()) {
+    : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
 }
 
 v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
     v8::Handle<v8::String> name) {
   if (name->Equals(v8::String::New("NativeJSLocale"))) {
-    return v8::FunctionTemplate::New(JSLocale);
-  } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
-    return v8::FunctionTemplate::New(JSAvailableLocales);
-  } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMaximizedLocale);
-  } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMinimizedLocale);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
-    return v8::FunctionTemplate::New(JSDisplayLanguage);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
-    return v8::FunctionTemplate::New(JSDisplayScript);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
-    return v8::FunctionTemplate::New(JSDisplayRegion);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
-    return v8::FunctionTemplate::New(JSDisplayName);
+    return v8::FunctionTemplate::New(I18NLocale::JSLocale);
   } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
     return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
+  } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
+    return v8::FunctionTemplate::New(Collator::JSCollator);
+  } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
+    return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
+  } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
+    return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
   }
 
   return v8::Handle<v8::FunctionTemplate>();
 }
 
-v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
-  // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
-  // We could possibly pass browser locale as a parameter in the constructor.
-  std::string locale_name("en-US");
-  if (args.Length() == 1 && args[0]->IsString()) {
-    locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  }
-
-  v8::Local<v8::Object> locale = v8::Object::New();
-  locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
-
-  icu::Locale icu_locale(locale_name.c_str());
-
-  const char* language = icu_locale.getLanguage();
-  locale->Set(v8::String::New("language"), v8::String::New(language));
-
-  const char* script = icu_locale.getScript();
-  if (strlen(script)) {
-    locale->Set(v8::String::New("script"), v8::String::New(script));
-  }
-
-  const char* region = icu_locale.getCountry();
-  if (strlen(region)) {
-    locale->Set(v8::String::New("region"), v8::String::New(region));
-  }
-
-  return locale;
-}
-
-// TODO(cira): Filter out locales that Chrome doesn't support.
-v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
-    const v8::Arguments& args) {
-  v8::Local<v8::Array> all_locales = v8::Array::New();
-
-  int count = 0;
-  const icu::Locale* icu_locales = icu::Locale::getAvailableLocales(count);
-  for (int i = 0; i < count; ++i) {
-    all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
-  }
-
-  return all_locales;
-}
-
-// Use - as tag separator, not _ that ICU uses.
-static std::string NormalizeLocale(const std::string& locale) {
-  std::string result(locale);
-  // TODO(cira): remove STL dependency.
-  std::replace(result.begin(), result.end(), '_', '-');
-  return result;
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char max_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_addLikelySubtags(locale_name.c_str(), max_locale,
-                        sizeof(max_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(max_locale).c_str());
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char min_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_minimizeSubtags(locale_name.c_str(), min_locale,
-                       sizeof(min_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(min_locale).c_str());
-}
-
-// Common code for JSDisplayXXX methods.
-static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
-                                            const std::string& item) {
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
-    return v8::Undefined();
-  }
-
-  std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
-  icu::Locale icu_locale(base_locale.c_str());
-  icu::Locale display_locale =
-      icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
-  icu::UnicodeString result;
-  if (item == "language") {
-    icu_locale.getDisplayLanguage(display_locale, result);
-  } else if (item == "script") {
-    icu_locale.getDisplayScript(display_locale, result);
-  } else if (item == "region") {
-    icu_locale.getDisplayCountry(display_locale, result);
-  } else if (item == "name") {
-    icu_locale.getDisplayName(display_locale, result);
-  } else {
-    return v8::Undefined();
-  }
-
-  if (result.length()) {
-    return v8::String::New(
-        reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-  }
-
-  return v8::Undefined();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "language");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "script");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "region");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
-  return GetDisplayItem(args, "name");
-}
-
 I18NExtension* I18NExtension::get() {
   if (!extension_) {
     extension_ = new I18NExtension();
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
index 54c973f..5401f25 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/src/extensions/experimental/i18n-extension.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,7 @@
 #ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
 #define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
 
-#include <v8.h>
+#include "include/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -41,16 +41,6 @@
   virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
       v8::Handle<v8::String> name);
 
-  // Implementations of window.Locale methods.
-  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
-
   // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
   static void Register();
   static I18NExtension* get();
diff --git a/src/extensions/experimental/i18n-js2c.py b/src/extensions/experimental/i18n-js2c.py
new file mode 100644
index 0000000..9c3128b
--- /dev/null
+++ b/src/extensions/experimental/i18n-js2c.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting I18N JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+# This is a pared down copy of v8/tools/js2c.py that avoids use of
+# v8/src/natives.h and produces different cc template.
+
+import os, re, sys, string
+
+
+def ToCArray(lines):
+  result = []
+  for chr in lines:
+    value = ord(chr)
+    assert value < 128
+    result.append(str(value))
+  result.append("0")
+  return ", ".join(result)
+
+
+def RemoveCommentsAndTrailingWhitespace(lines):
+  lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+  lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+  lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+  return lines
+
+
+def ReadFile(filename):
+  file = open(filename, "rt")
+  try:
+    lines = file.read()
+  finally:
+    file.close()
+  return lines
+
+
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+  lines = RemoveCommentsAndTrailingWhitespace(lines)
+  # Because of simplified context setup, eval and with is not
+  # allowed in the natives files.
+  eval_match = EVAL_PATTERN.search(lines)
+  if eval_match:
+    raise ("Eval disallowed in natives: %s" % file)
+  with_match = WITH_PATTERN.search(lines)
+  if with_match:
+    raise ("With statements disallowed in natives: %s" % file)
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by gyp.  If you
+// want to make changes to this file you should either change the
+// javascript source files or the i18n-js2c.py script.
+
+#include "src/extensions/experimental/i18n-natives.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+const char* I18Natives::GetScriptSource() {
+  // JavaScript source gets injected here.
+  static const char i18n_source[] = {%s};
+
+  return i18n_source;
+}
+
+}  // internal
+}  // v8
+"""
+
+
+def JS2C(source, target):
+  filename = str(source)
+
+  lines = ReadFile(filename)
+  Validate(lines, filename)
+  data = ToCArray(lines)
+
+  # Emit result
+  output = open(target, "w")
+  output.write(HEADER_TEMPLATE % data)
+  output.close()
+
+
+def main():
+  target = sys.argv[1]
+  source = sys.argv[2]
+  JS2C(source, target)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
new file mode 100644
index 0000000..46a5f87
--- /dev/null
+++ b/src/extensions/experimental/i18n-locale.cc
@@ -0,0 +1,111 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-locale.h"
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "src/extensions/experimental/language-matcher.h"
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const I18NLocale::kLocaleID = "localeID";
+const char* const I18NLocale::kRegionID = "regionID";
+const char* const I18NLocale::kICULocaleID = "icuLocaleID";
+
+v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 1 || !args[0]->IsObject()) {
+    return v8::Undefined();
+  }
+
+  v8::Local<v8::Object> settings = args[0]->ToObject();
+
+  // Get best match for locale.
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  LocaleIDMatch result;
+  if (locale_id->IsArray()) {
+    LanguageMatcher::GetBestMatchForPriorityList(
+        v8::Handle<v8::Array>::Cast(locale_id), &result);
+  } else if (locale_id->IsString()) {
+    LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
+  } else {
+    LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
+  }
+
+  // Get best match for region.
+  char region_id[ULOC_COUNTRY_CAPACITY];
+  I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+
+  v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
+    // Set region id to empty string because region couldn't be inferred.
+    I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+  }
+
+  // Build JavaScript object that contains bcp and icu locale ID and region ID.
+  v8::Handle<v8::Object> locale = v8::Object::New();
+  locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
+  locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
+  locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
+
+  return handle_scope.Close(locale);
+}
+
+bool I18NLocale::GetBestMatchForRegionID(
+    const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
+  if (region_id->IsString() && region_id->ToString()->Length() != 0) {
+    icu::Locale user_locale(
+        icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
+    I18NUtils::StrNCopy(
+        result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
+    return true;
+  }
+  // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
+  // and grab "DE" from the result).
+  UErrorCode status = U_ZERO_ERROR;
+  char maximized_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_addLikelySubtags(
+      locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
+  uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
+
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/frame-element.cc b/src/extensions/experimental/i18n-locale.h
similarity index 62%
copy from src/frame-element.cc
copy to src/extensions/experimental/i18n-locale.h
index f629900..607818c 100644
--- a/src/frame-element.cc
+++ b/src/extensions/experimental/i18n-locale.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,36 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "include/v8.h"
 
 namespace v8 {
 namespace internal {
 
+class I18NLocale {
+ public:
+  I18NLocale() {}
+
+  // Implementations of window.Locale methods.
+  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+
+  // Infers region id given the locale id, or uses user specified region id.
+  // Result is canonicalized.
+  // Returns status of ICU operation (maximizing locale or get region call).
+  static bool GetBestMatchForRegionID(
+      const char* locale_id, v8::Handle<v8::Value> regions, char* result);
+
+ private:
+  // Key name for localeID parameter.
+  static const char* const kLocaleID;
+  // Key name for regionID parameter.
+  static const char* const kRegionID;
+  // Key name for the icuLocaleID result.
+  static const char* const kICULocaleID;
+};
 
 } }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/frame-element.cc b/src/extensions/experimental/i18n-natives.h
similarity index 80%
copy from src/frame-element.cc
copy to src/extensions/experimental/i18n-natives.h
index f629900..37362d0 100644
--- a/src/frame-element.cc
+++ b/src/extensions/experimental/i18n-natives.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
-#include "frame-element.h"
-#include "zone-inl.h"
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
 
 namespace v8 {
 namespace internal {
 
+class I18Natives {
+ public:
+  // Gets script source from generated file.
+  // Source is statically allocated string.
+  static const char* GetScriptSource();
+};
 
 } }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/src/extensions/experimental/i18n-utils.cc b/src/extensions/experimental/i18n-utils.cc
new file mode 100644
index 0000000..dc2be1a
--- /dev/null
+++ b/src/extensions/experimental/i18n-utils.cc
@@ -0,0 +1,87 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-utils.h"
+
+#include <string.h>
+
+#include "unicode/unistr.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
+  if (!dest || !src) return;
+
+  strncpy(dest, src, length);
+  dest[length - 1] = '\0';
+}
+
+// static
+bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+                                     const char* setting,
+                                     icu::UnicodeString* result) {
+  if (!setting || !result) return false;
+
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
+    v8::String::Utf8Value utf8_value(value);
+    if (*utf8_value == NULL) return false;
+    result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
+    return true;
+  }
+  return false;
+}
+
+// static
+void I18NUtils::AsciiToUChar(const char* source,
+                             int32_t source_length,
+                             UChar* target,
+                             int32_t target_length) {
+  int32_t length =
+      source_length < target_length ? source_length : target_length;
+
+  if (length <= 0) {
+    return;
+  }
+
+  for (int32_t i = 0; i < length - 1; ++i) {
+    target[i] = static_cast<UChar>(source[i]);
+  }
+
+  target[length - 1] = 0x0u;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-utils.h b/src/extensions/experimental/i18n-utils.h
new file mode 100644
index 0000000..7c31528
--- /dev/null
+++ b/src/extensions/experimental/i18n-utils.h
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class I18NUtils {
+ public:
+  // Safe string copy. Null terminates the destination. Copies at most
+  // (length - 1) bytes.
+  // We can't use snprintf since it's not supported on all relevant platforms.
+  // We can't use OS::SNPrintF, it's only for internal code.
+  static void StrNCopy(char* dest, int length, const char* src);
+
+  // Extract a string setting named in |settings| and set it to |result|.
+  // Return true if it's specified. Otherwise, return false.
+  static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+                                   const char* setting,
+                                   icu::UnicodeString* result);
+
+  // Converts ASCII array into UChar array.
+  // Target is always \0 terminated.
+  static void AsciiToUChar(const char* source,
+                           int32_t source_length,
+                           UChar* target,
+                           int32_t target_length);
+
+ private:
+  I18NUtils() {}
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
index baf3859..56bcf9e 100644
--- a/src/extensions/experimental/i18n.js
+++ b/src/extensions/experimental/i18n.js
@@ -25,70 +25,72 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
-v8Locale = function(optLocale) {
+// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
+/**
+ * LocaleInfo class is an aggregate class of all i18n API calls.
+ * @param {Object} settings - localeID and regionID to create LocaleInfo from.
+ *   {Array.<string>|string} settings.localeID -
+ *     Unicode identifier of the locale.
+ *     See http://unicode.org/reports/tr35/#BCP_47_Conformance
+ *   {string} settings.regionID - ISO3166 region ID with addition of
+ *     invalid, undefined and reserved region codes.
+ * @constructor
+ */
+v8Locale = function(settings) {
   native function NativeJSLocale();
-  var properties = NativeJSLocale(optLocale);
-  this.locale = properties.locale;
-  this.language = properties.language;
-  this.script = properties.script;
-  this.region = properties.region;
-};
 
-v8Locale.availableLocales = function() {
-  native function NativeJSAvailableLocales();
-  return NativeJSAvailableLocales();
-};
-
-v8Locale.prototype.maximizedLocale = function() {
-  native function NativeJSMaximizedLocale();
-  return new v8Locale(NativeJSMaximizedLocale(this.locale));
-};
-
-v8Locale.prototype.minimizedLocale = function() {
-  native function NativeJSMinimizedLocale();
-  return new v8Locale(NativeJSMinimizedLocale(this.locale));
-};
-
-v8Locale.prototype.displayLocale_ = function(displayLocale) {
-  var result = this.locale;
-  if (displayLocale !== undefined) {
-    result = displayLocale.locale;
+  // Assume user wanted to do v8Locale("sr");
+  if (typeof(settings) === "string") {
+    settings = {'localeID': settings};
   }
-  return result;
+
+  var properties = NativeJSLocale(
+      v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
+
+  // Keep the resolved ICU locale ID around to avoid resolving localeID to
+  // ICU locale ID every time BreakIterator, Collator and so forth are called.
+  this.__icuLocaleID = properties.icuLocaleID;
+  this.options = {'localeID': properties.localeID,
+                  'regionID': properties.regionID};
 };
 
-v8Locale.prototype.displayLanguage = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayLanguage();
-  return NativeJSDisplayLanguage(this.locale, displayLocale);
+/**
+ * Clones existing locale with possible overrides for some of the options.
+ * @param {!Object} settings - overrides for current locale settings.
+ * @returns {Object} - new LocaleInfo object.
+ */
+v8Locale.prototype.derive = function(settings) {
+  return new v8Locale(
+      v8Locale.__createSettingsOrDefault(settings, this.options));
 };
 
-v8Locale.prototype.displayScript = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayScript();
-  return NativeJSDisplayScript(this.locale, displayLocale);
-};
-
-v8Locale.prototype.displayRegion = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayRegion();
-  return NativeJSDisplayRegion(this.locale, displayLocale);
-};
-
-v8Locale.prototype.displayName = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayName();
-  return NativeJSDisplayName(this.locale, displayLocale);
-};
-
+/**
+ * v8BreakIterator class implements locale aware segmenatation.
+ * It is not part of EcmaScript proposal.
+ * @param {Object} locale - locale object to pass to break
+ *   iterator implementation.
+ * @param {string} type - type of segmenatation:
+ *   - character
+ *   - word
+ *   - sentence
+ *   - line
+ * @private
+ * @constructor
+ */
 v8Locale.v8BreakIterator = function(locale, type) {
   native function NativeJSBreakIterator();
-  var iterator = NativeJSBreakIterator(locale, type);
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  // BCP47 ID would work in this case, but we use ICU locale for consistency.
+  var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
   iterator.type = type;
   return iterator;
 };
 
+/**
+ * Type of the break we encountered during previous iteration.
+ * @type{Enum}
+ */
 v8Locale.v8BreakIterator.BreakType = {
   'unknown': -1,
   'none': 0,
@@ -98,6 +100,281 @@
   'ideo': 400
 };
 
+/**
+ * Creates new v8BreakIterator based on current locale.
+ * @param {string} - type of segmentation. See constructor.
+ * @returns {Object} - new v8BreakIterator object.
+ */
 v8Locale.prototype.v8CreateBreakIterator = function(type) {
-  return new v8Locale.v8BreakIterator(this.locale, type);
+  return new v8Locale.v8BreakIterator(this, type);
+};
+
+// TODO(jungshik): Set |collator.options| to actually recognized / resolved
+// values.
+/**
+ * Collator class implements locale-aware sort.
+ * @param {Object} locale - locale object to pass to collator implementation.
+ * @param {Object} settings - collation flags:
+ *   - ignoreCase
+ *   - ignoreAccents
+ *   - numeric
+ * @private
+ * @constructor
+ */
+v8Locale.Collator = function(locale, settings) {
+  native function NativeJSCollator();
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  var collator = NativeJSCollator(
+      locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
+  return collator;
+};
+
+/**
+ * Creates new Collator based on current locale.
+ * @param {Object} - collation flags. See constructor.
+ * @returns {Object} - new Collator object.
+ */
+v8Locale.prototype.createCollator = function(settings) {
+  return new v8Locale.Collator(this, settings);
+};
+
+/**
+ * DateTimeFormat class implements locale-aware date and time formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ *   - skeleton
+ *   - dateStyle
+ *   - timeStyle
+ * @private
+ * @constructor
+ */
+v8Locale.__DateTimeFormat = function(locale, settings) {
+  native function NativeJSDateTimeFormat();
+
+  settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+  var cleanSettings = {};
+  if (settings.hasOwnProperty('skeleton')) {
+    cleanSettings['skeleton'] = settings['skeleton'];
+  } else {
+    cleanSettings = {};
+    if (settings.hasOwnProperty('dateStyle')) {
+      var ds = settings['dateStyle'];
+      if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
+      cleanSettings['dateStyle'] = ds;
+    } else if (settings.hasOwnProperty('dateType')) {
+      // Obsolete. New spec requires dateStyle, but we'll keep this around
+      // for current users.
+      // TODO(cira): Remove when all internal users switch to dateStyle.
+      var dt = settings['dateType'];
+      if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
+      cleanSettings['dateStyle'] = dt;
+    }
+
+    if (settings.hasOwnProperty('timeStyle')) {
+      var ts = settings['timeStyle'];
+      if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
+      cleanSettings['timeStyle'] = ts;
+    } else if (settings.hasOwnProperty('timeType')) {
+      // TODO(cira): Remove when all internal users switch to timeStyle.
+      var tt = settings['timeType'];
+      if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
+      cleanSettings['timeStyle'] = tt;
+    }
+  }
+
+  // Default is to show short date and time.
+  if (!cleanSettings.hasOwnProperty('skeleton') &&
+      !cleanSettings.hasOwnProperty('dateStyle') &&
+      !cleanSettings.hasOwnProperty('timeStyle')) {
+    cleanSettings = {'dateStyle': 'short',
+                     'timeStyle': 'short'};
+  }
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
+
+  // NativeJSDateTimeFormat creates formatter.options for us, we just need
+  // to append actual settings to it.
+  for (key in cleanSettings) {
+    formatter.options[key] = cleanSettings[key];
+  }
+
+  /**
+   * Clones existing date time format with possible overrides for some
+   * of the options.
+   * @param {!Object} overrideSettings - overrides for current format settings.
+   * @returns {Object} - new DateTimeFormat object.
+   * @public
+   */
+  formatter.derive = function(overrideSettings) {
+    // To remove a setting user can specify undefined as its value. We'll remove
+    // it from the map in that case.
+    for (var prop in overrideSettings) {
+      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+        delete settings[prop];
+      }
+    }
+    return new v8Locale.__DateTimeFormat(
+        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+  };
+
+  return formatter;
+};
+
+/**
+ * Creates new DateTimeFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new DateTimeFormat object.
+ */
+v8Locale.prototype.createDateTimeFormat = function(settings) {
+  return new v8Locale.__DateTimeFormat(this, settings);
+};
+
+/**
+ * NumberFormat class implements locale-aware number formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ *   - skeleton
+ *   - pattern
+ *   - style - decimal, currency, percent or scientific
+ *   - currencyCode - ISO 4217 3-letter currency code
+ * @private
+ * @constructor
+ */
+v8Locale.__NumberFormat = function(locale, settings) {
+  native function NativeJSNumberFormat();
+
+  settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+  var cleanSettings = {};
+  if (settings.hasOwnProperty('skeleton')) {
+    // Assign skeleton to cleanSettings and fix invalid currency pattern
+    // if present - 'ooxo' becomes 'o'.
+    cleanSettings['skeleton'] =
+        settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
+  } else if (settings.hasOwnProperty('pattern')) {
+    cleanSettings['pattern'] = settings['pattern'];
+  } else if (settings.hasOwnProperty('style')) {
+    var style = settings['style'];
+    if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
+      style = 'decimal';
+    }
+    cleanSettings['style'] = style;
+  }
+
+  // Default is to show decimal style.
+  if (!cleanSettings.hasOwnProperty('skeleton') &&
+      !cleanSettings.hasOwnProperty('pattern') &&
+      !cleanSettings.hasOwnProperty('style')) {
+    cleanSettings = {'style': 'decimal'};
+  }
+
+  // Add currency code if available and valid (3-letter ASCII code).
+  if (settings.hasOwnProperty('currencyCode') &&
+      /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
+    cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
+  }
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  // Pass in region ID for proper currency detection. Use ZZ if region is empty.
+  var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
+  var formatter = NativeJSNumberFormat(
+      locale.__icuLocaleID, 'und_' + region, cleanSettings);
+
+  // ICU doesn't always uppercase the currency code.
+  if (formatter.options.hasOwnProperty('currencyCode')) {
+    formatter.options['currencyCode'] =
+        formatter.options['currencyCode'].toUpperCase();
+  }
+
+  for (key in cleanSettings) {
+    // Don't overwrite keys that are alredy in.
+    if (formatter.options.hasOwnProperty(key)) continue;
+
+    formatter.options[key] = cleanSettings[key];
+  }
+
+  /**
+   * Clones existing number format with possible overrides for some
+   * of the options.
+   * @param {!Object} overrideSettings - overrides for current format settings.
+   * @returns {Object} - new or cached NumberFormat object.
+   * @public
+   */
+  formatter.derive = function(overrideSettings) {
+    // To remove a setting user can specify undefined as its value. We'll remove
+    // it from the map in that case.
+    for (var prop in overrideSettings) {
+      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+        delete settings[prop];
+      }
+    }
+    return new v8Locale.__NumberFormat(
+        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+  };
+
+  return formatter;
+};
+
+/**
+ * Creates new NumberFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new or cached NumberFormat object.
+ */
+v8Locale.prototype.createNumberFormat = function(settings) {
+  return new v8Locale.__NumberFormat(this, settings);
+};
+
+/**
+ * Merges user settings and defaults.
+ * Settings that are not of object type are rejected.
+ * Actual property values are not validated, but whitespace is trimmed if they
+ * are strings.
+ * @param {!Object} settings - user provided settings.
+ * @param {!Object} defaults - default values for this type of settings.
+ * @returns {Object} - valid settings object.
+ * @private
+ */
+v8Locale.__createSettingsOrDefault = function(settings, defaults) {
+  if (!settings || typeof(settings) !== 'object' ) {
+    return defaults;
+  }
+  for (var key in defaults) {
+    if (!settings.hasOwnProperty(key)) {
+      settings[key] = defaults[key];
+    }
+  }
+  // Clean up settings.
+  for (var key in settings) {
+    // Trim whitespace.
+    if (typeof(settings[key]) === "string") {
+      settings[key] = settings[key].trim();
+    }
+    // Remove all properties that are set to undefined/null. This allows
+    // derive method to remove a setting we don't need anymore.
+    if (!settings[key]) {
+      delete settings[key];
+    }
+  }
+
+  return settings;
+};
+
+/**
+ * If locale is valid (defined and of v8Locale type) we return it. If not
+ * we create default locale and return it.
+ * @param {!Object} locale - user provided locale.
+ * @returns {Object} - v8Locale object.
+ * @private
+ */
+v8Locale.__createLocaleOrDefault = function(locale) {
+  if (!locale || !(locale instanceof v8Locale)) {
+    return new v8Locale();
+  } else {
+    return locale;
+  }
 };
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
new file mode 100644
index 0000000..127e571
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.cc
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
+// language matching API.
+
+#include "src/extensions/experimental/language-matcher.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/datefmt.h"  // For getAvailableLocales
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const unsigned int LanguageMatcher::kLanguageWeight = 75;
+const unsigned int LanguageMatcher::kScriptWeight = 20;
+const unsigned int LanguageMatcher::kRegionWeight = 5;
+const unsigned int LanguageMatcher::kThreshold = 50;
+const unsigned int LanguageMatcher::kPositionBonus = 1;
+const char* const LanguageMatcher::kDefaultLocale = "root";
+
+static const char* GetLanguageException(const char*);
+static bool BCP47ToICUFormat(const char*, char*);
+static int CompareLocaleSubtags(const char*, const char*);
+static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
+
+LocaleIDMatch::LocaleIDMatch()
+    : score(-1) {
+  I18NUtils::StrNCopy(
+      bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+
+  I18NUtils::StrNCopy(
+      icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+}
+
+LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
+  I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
+  I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
+  this->score = rhs.score;
+
+  return *this;
+}
+
+// static
+void LanguageMatcher::GetBestMatchForPriorityList(
+    v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
+  v8::HandleScope handle_scope;
+
+  unsigned int position_bonus = locales->Length() * kPositionBonus;
+
+  int max_score = 0;
+  LocaleIDMatch match;
+  for (unsigned int i = 0; i < locales->Length(); ++i) {
+    position_bonus -= kPositionBonus;
+
+    v8::TryCatch try_catch;
+    v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
+
+    // Return default if exception is raised when reading parameter.
+    if (try_catch.HasCaught()) break;
+
+    // JavaScript arrays can be heterogenous so check each item
+    // if it's a string.
+    if (!locale_id->IsString()) continue;
+
+    if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
+      continue;
+    }
+
+    // Skip items under threshold.
+    if (match.score < kThreshold) continue;
+
+    match.score += position_bonus;
+    if (match.score > max_score) {
+      *result = match;
+
+      max_score = match.score;
+    }
+  }
+}
+
+// static
+void LanguageMatcher::GetBestMatchForString(
+    v8::Handle<v8::String> locale, LocaleIDMatch* result) {
+  LocaleIDMatch match;
+
+  if (CompareToSupportedLocaleIDList(locale, &match) &&
+      match.score >= kThreshold) {
+    *result = match;
+  }
+}
+
+// static
+bool LanguageMatcher::CompareToSupportedLocaleIDList(
+    v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
+  static int32_t available_count = 0;
+  // Depending on how ICU data is built, locales returned by
+  // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
+  // Collation and other services.  We can call getAvailableLocale() of all the
+  // services we want to support and take the intersection of them all, but
+  // using DateFormat::getAvailableLocales() should suffice.
+  // TODO(cira): Maybe make this thread-safe?
+  static const icu::Locale* available_locales =
+      icu::DateFormat::getAvailableLocales(available_count);
+
+  // Skip this locale_id if it's not in ASCII.
+  static LocaleIDMatch default_match;
+  v8::String::AsciiValue ascii_value(locale_id);
+  if (*ascii_value == NULL) return false;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
+
+  icu::Locale input_locale(locale);
+
+  // Position of the best match locale in list of available locales.
+  int position = -1;
+  const char* language = GetLanguageException(input_locale.getLanguage());
+  const char* script = input_locale.getScript();
+  const char* region = input_locale.getCountry();
+  for (int32_t i = 0; i < available_count; ++i) {
+    int current_score = 0;
+    int sign =
+        CompareLocaleSubtags(language, available_locales[i].getLanguage());
+    current_score += sign * kLanguageWeight;
+
+    sign = CompareLocaleSubtags(script, available_locales[i].getScript());
+    current_score += sign * kScriptWeight;
+
+    sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
+    current_score += sign * kRegionWeight;
+
+    if (current_score >= kThreshold && current_score > result->score) {
+      result->score = current_score;
+      position = i;
+    }
+  }
+
+  // Didn't find any good matches so use defaults.
+  if (position == -1) return false;
+
+  return BuildLocaleName(available_locales[position].getBaseName(),
+                         input_locale.getName(), result);
+}
+
+// For some unsupported language subtags it is better to fallback to related
+// language that is supported than to default.
+static const char* GetLanguageException(const char* language) {
+  // Serbo-croatian to Serbian.
+  if (!strcmp(language, "sh")) return "sr";
+
+  // Norweigan to Norweiaan to Norwegian Bokmal.
+  if (!strcmp(language, "no")) return "nb";
+
+  // Moldavian to Romanian.
+  if (!strcmp(language, "mo")) return "ro";
+
+  // Tagalog to Filipino.
+  if (!strcmp(language, "tl")) return "fil";
+
+  return language;
+}
+
+// Converts user input from BCP47 locale id format to ICU compatible format.
+// Returns false if uloc_forLanguageTag call fails or if extension is too long.
+static bool BCP47ToICUFormat(const char* locale_id, char* result) {
+  UErrorCode status = U_ZERO_ERROR;
+  int32_t locale_size = 0;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
+
+  // uloc_forLanguageTag has a bug where long extension can crash the code.
+  // We need to check if extension part of language id conforms to the length.
+  // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
+  const char* extension = strstr(locale_id, "-u-");
+  if (extension != NULL &&
+      strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
+    // Truncate to get non-crashing string, but still preserve base language.
+    int base_length = strlen(locale_id) - strlen(extension);
+    locale[base_length] = '\0';
+  }
+
+  uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
+                      &locale_size, &status);
+  return !U_FAILURE(status);
+}
+
+// Compares locale id subtags.
+// Returns 1 for match or -1 for mismatch.
+static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
+  return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
+}
+
+// Builds a BCP47 compliant locale id from base name of matched locale and
+// full user specified locale.
+// Returns false if uloc_toLanguageTag failed to convert locale id.
+// Example:
+//   base_name of matched locale (ICU ID): de_DE
+//   input_locale_name (ICU ID): de_AT@collation=phonebk
+//   result (ICU ID): de_DE@collation=phonebk
+//   result (BCP47 ID): de-DE-u-co-phonebk
+static bool BuildLocaleName(const char* base_name,
+                            const char* input_locale_name,
+                            LocaleIDMatch* result) {
+  I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+
+  // Get extensions (if any) from the original locale.
+  const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
+  if (extension != NULL) {
+    I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
+                        ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
+  } else {
+    I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+  }
+
+  // Convert ICU locale name into BCP47 format.
+  UErrorCode status = U_ZERO_ERROR;
+  uloc_toLanguageTag(result->icu_id, result->bcp47_id,
+                     ULOC_FULLNAME_CAPACITY, false, &status);
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
new file mode 100644
index 0000000..dd29304
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+
+#include "include/v8.h"
+
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+struct LocaleIDMatch {
+  LocaleIDMatch();
+
+  LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
+
+  // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
+  char bcp47_id[ULOC_FULLNAME_CAPACITY];
+
+  // ICU locale id - "de_Latn_DE@collation=phonebk".
+  char icu_id[ULOC_FULLNAME_CAPACITY];
+
+  // Score for this locale.
+  int score;
+};
+
+class LanguageMatcher {
+ public:
+  // Default locale.
+  static const char* const kDefaultLocale;
+
+  // Finds best supported locale for a given a list of locale identifiers.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForPriorityList(
+      v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
+
+  // Finds best supported locale for a single locale identifier.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForString(
+      v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
+
+ private:
+  // If langauge subtags match add this amount to the score.
+  static const unsigned int kLanguageWeight;
+
+  // If script subtags match add this amount to the score.
+  static const unsigned int kScriptWeight;
+
+  // If region subtags match add this amount to the score.
+  static const unsigned int kRegionWeight;
+
+  // LocaleID match score has to be over this number to accept the match.
+  static const unsigned int kThreshold;
+
+  // For breaking ties in priority queue.
+  static const unsigned int kPositionBonus;
+
+  LanguageMatcher();
+
+  // Compares locale_id to the supported list of locales and returns best
+  // match.
+  // Returns false if it fails to convert locale id from ICU to BCP47 format.
+  static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
+                                             LocaleIDMatch* result);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/src/extensions/experimental/number-format.cc b/src/extensions/experimental/number-format.cc
new file mode 100644
index 0000000..2932c52
--- /dev/null
+++ b/src/extensions/experimental/number-format.cc
@@ -0,0 +1,374 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/number-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/uchar.h"
+#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+namespace v8 {
+namespace internal {
+
+const int NumberFormat::kCurrencyCodeLength = 4;
+
+v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
+
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
+                                              v8::Handle<v8::String>,
+                                              v8::Handle<v8::Object>);
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+    const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
+static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
+static bool GetCurrencyCode(const icu::Locale&,
+                            const char* const,
+                            v8::Handle<v8::Object>,
+                            UChar*);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+    v8::Handle<v8::Object> obj) {
+  if (number_format_template_->HasInstance(obj)) {
+    return static_cast<icu::DecimalFormat*>(
+        obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
+                                      void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a number formatter.
+  delete UnpackNumberFormat(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 1 || !args[0]->IsNumber()) {
+    // Just return NaN on invalid input.
+    return v8::String::New("NaN");
+  }
+
+  icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
+  if (!number_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  // ICU will handle actual NaN value properly and return NaN string.
+  icu::UnicodeString result;
+  number_format->format(args[0]->NumberValue(), result);
+
+  return v8::String::New(
+      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  // Expect locale id, region id and settings.
+  if (args.Length() != 3 ||
+      !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale, region and number settings are required.")));
+  }
+
+  icu::DecimalFormat* number_format = CreateNumberFormat(
+      args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
+
+  if (number_format_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+    raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal field for icu number formatter.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("format"),
+               v8::FunctionTemplate::New(Format));
+
+    number_format_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      number_format_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set number formatter as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, number_format);
+
+  // Create options key.
+  v8::Local<v8::Object> options = v8::Object::New();
+
+  // Show what ICU decided to use for easier problem tracking.
+  // Keep it as v8 specific extension.
+  icu::UnicodeString pattern;
+  number_format->toPattern(pattern);
+  options->Set(v8::String::New("v8ResolvedPattern"),
+               v8::String::New(reinterpret_cast<const uint16_t*>(
+                   pattern.getBuffer()), pattern.length()));
+
+  // Set resolved currency code in options.currency if not empty.
+  icu::UnicodeString currency(number_format->getCurrency());
+  if (!currency.isEmpty()) {
+    options->Set(v8::String::New("currencyCode"),
+                 v8::String::New(reinterpret_cast<const uint16_t*>(
+                     currency.getBuffer()), currency.length()));
+  }
+
+  wrapper->Set(v8::String::New("options"), options);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteNumberFormat);
+
+  return wrapper;
+}
+
+// Returns DecimalFormat.
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
+                                              v8::Handle<v8::String> region,
+                                              v8::Handle<v8::Object> settings) {
+  v8::HandleScope handle_scope;
+
+  v8::String::AsciiValue ascii_locale(locale);
+  icu::Locale icu_locale(*ascii_locale);
+
+  // Make formatter from skeleton.
+  icu::DecimalFormat* number_format = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  icu::UnicodeString setting;
+
+  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
+    // TODO(cira): Use ICU skeleton once
+    // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
+    number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
+  } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
+    number_format =
+        new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
+  } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
+    if (setting == UNICODE_STRING_SIMPLE("currency")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createCurrencyInstance(icu_locale, status));
+    } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createPercentInstance(icu_locale, status));
+    } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createScientificInstance(icu_locale, status));
+    } else {
+      // Make it decimal in any other case.
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createInstance(icu_locale, status));
+    }
+  }
+
+  if (U_FAILURE(status)) {
+    delete number_format;
+    status = U_ZERO_ERROR;
+    number_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, status));
+  }
+
+  // Attach appropriate currency code to the formatter.
+  // It affects currency formatters only.
+  // Region is full language identifier in form 'und_' + region id.
+  v8::String::AsciiValue ascii_region(region);
+
+  UChar currency_code[NumberFormat::kCurrencyCodeLength];
+  if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
+    number_format->setCurrency(currency_code, status);
+  }
+
+  return number_format;
+}
+
+// Generates ICU number format pattern from given skeleton.
+// TODO(cira): Remove once ICU includes equivalent method
+// (see http://bugs.icu-project.org/trac/ticket/8610).
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+    const icu::Locale& icu_locale,
+    const icu::UnicodeString& skeleton,
+    UErrorCode* status) {
+  icu::DecimalFormat skeleton_format(
+      skeleton, GetFormatSymbols(icu_locale), *status);
+
+  // Find out if skeleton contains currency or percent symbol and create
+  // proper instance to tweak.
+  icu::DecimalFormat* base_format = NULL;
+
+  // UChar representation of U+00A4 currency symbol.
+  const UChar currency_symbol = 0xA4u;
+
+  int32_t index = skeleton.indexOf(currency_symbol);
+  if (index != -1) {
+    // Find how many U+00A4 are there. There is at least one.
+    // Case of non-consecutive U+00A4 is taken care of in i18n.js.
+    int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
+
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
+    icu::NumberFormat::EStyles style;
+    switch (end_index - index) {
+      case 0:
+        style = icu::NumberFormat::kCurrencyStyle;
+        break;
+      case 1:
+        style = icu::NumberFormat::kIsoCurrencyStyle;
+        break;
+      default:
+        style = icu::NumberFormat::kPluralCurrencyStyle;
+    }
+#else  // ICU version is 4.8 or above (we ignore versions below 4.0).
+    UNumberFormatStyle style;
+    switch (end_index - index) {
+      case 0:
+        style = UNUM_CURRENCY;
+        break;
+      case 1:
+        style = UNUM_CURRENCY_ISO;
+        break;
+      default:
+        style = UNUM_CURRENCY_PLURAL;
+    }
+#endif
+
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, style, *status));
+  } else if (skeleton.indexOf('%') != -1) {
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createPercentInstance(icu_locale, *status));
+  } else {
+    // TODO(cira): Handle scientific skeleton.
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, *status));
+  }
+
+  if (U_FAILURE(*status)) {
+    delete base_format;
+    return NULL;
+  }
+
+  // Copy important information from skeleton to the new formatter.
+  // TODO(cira): copy rounding information from skeleton?
+  base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
+
+  base_format->setMinimumIntegerDigits(
+      skeleton_format.getMinimumIntegerDigits());
+
+  base_format->setMinimumFractionDigits(
+      skeleton_format.getMinimumFractionDigits());
+
+  base_format->setMaximumFractionDigits(
+      skeleton_format.getMaximumFractionDigits());
+
+  return base_format;
+}
+
+// Gets decimal symbols for a locale.
+static icu::DecimalFormatSymbols* GetFormatSymbols(
+    const icu::Locale& icu_locale) {
+  UErrorCode status = U_ZERO_ERROR;
+  icu::DecimalFormatSymbols* symbols =
+      new icu::DecimalFormatSymbols(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete symbols;
+    // Use symbols from default locale.
+    symbols = new icu::DecimalFormatSymbols(status);
+  }
+
+  return symbols;
+}
+
+// Gets currency ISO 4217 3-letter code.
+// Check currencyCode setting first, then @currency=code and in the end
+// try to infer currency code from locale in the form 'und_' + region id.
+// Returns false in case of error.
+static bool GetCurrencyCode(const icu::Locale& icu_locale,
+                            const char* const und_region_locale,
+                            v8::Handle<v8::Object> settings,
+                            UChar* code) {
+  UErrorCode status = U_ZERO_ERROR;
+
+  // If there is user specified currency code, use it.
+  icu::UnicodeString currency;
+  if (I18NUtils::ExtractStringSetting(settings, "currencyCode", &currency)) {
+    currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
+    return true;
+  }
+
+  // If ICU locale has -cu- currency code use it.
+  char currency_code[NumberFormat::kCurrencyCodeLength];
+  int32_t length = icu_locale.getKeywordValue(
+      "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
+  if (length != 0) {
+    I18NUtils::AsciiToUChar(currency_code, length + 1,
+                            code, NumberFormat::kCurrencyCodeLength);
+    return true;
+  }
+
+  // Otherwise infer currency code from the region id.
+  ucurr_forLocale(
+      und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
+
+  return !!U_SUCCESS(status);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("NumberFormat method called on an object "
+                      "that is not a NumberFormat.")));
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/number-format.h b/src/extensions/experimental/number-format.h
new file mode 100644
index 0000000..bcfaed6
--- /dev/null
+++ b/src/extensions/experimental/number-format.h
@@ -0,0 +1,71 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class DecimalFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class NumberFormat {
+ public:
+  // 3-letter ISO 4217 currency code plus \0.
+  static const int kCurrencyCodeLength;
+
+  static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks date format object from corresponding JavaScript object.
+  static icu::DecimalFormat* UnpackNumberFormat(
+      v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the NumberFormat once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
+                                 void* param);
+
+  // Formats number and returns corresponding string.
+  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ private:
+  NumberFormat();
+
+  static v8::Persistent<v8::FunctionTemplate> number_format_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index b3f83fe..9fbf329 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -133,9 +133,11 @@
 
 
 void ExternalizeStringExtension::Register() {
-  static ExternalizeStringExtension externalize_extension;
+  static ExternalizeStringExtension* externalize_extension = NULL;
+  if (externalize_extension == NULL)
+    externalize_extension = new ExternalizeStringExtension;
   static v8::DeclareExtension externalize_extension_declaration(
-      &externalize_extension);
+      externalize_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 7dee66f..971f9f9 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,6 +34,7 @@
 #include "macro-assembler.h"
 #include "objects.h"
 #include "objects-visiting.h"
+#include "scopeinfo.h"
 
 namespace v8 {
 namespace internal {
@@ -58,6 +59,16 @@
 }
 
 
+Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
+                                                PretenureFlag pretenure) {
+  ASSERT(0 <= size);
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
+      FixedArray);
+}
+
+
 Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
@@ -66,11 +77,29 @@
 }
 
 
-Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
+Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
+    int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
-                     NumberDictionary::Allocate(at_least_space_for),
-                     NumberDictionary);
+                     SeededNumberDictionary::Allocate(at_least_space_for),
+                     SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
+    int at_least_space_for) {
+  ASSERT(0 <= at_least_space_for);
+  CALL_HEAP_FUNCTION(isolate(),
+                     UnseededNumberDictionary::Allocate(at_least_space_for),
+                     UnseededNumberDictionary);
+}
+
+
+Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
+  ASSERT(0 <= at_least_space_for);
+  CALL_HEAP_FUNCTION(isolate(),
+                     ObjectHashTable::Allocate(at_least_space_for),
+                     ObjectHashTable);
 }
 
 
@@ -111,12 +140,31 @@
                      String);
 }
 
+// Symbols are created in the old generation (data space).
+Handle<String> Factory::LookupSymbol(Handle<String> string) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->LookupSymbol(*string),
+                     String);
+}
+
 Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->LookupAsciiSymbol(string),
                      String);
 }
 
+
+Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                          int from,
+                                          int length) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->LookupAsciiSymbol(string,
+                                                          from,
+                                                          length),
+                     String);
+}
+
+
 Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->LookupTwoByteSymbol(string),
@@ -150,21 +198,21 @@
 }
 
 
-Handle<String> Factory::NewRawAsciiString(int length,
-                                          PretenureFlag pretenure) {
+Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+                                                  PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateRawAsciiString(length, pretenure),
-      String);
+      SeqAsciiString);
 }
 
 
-Handle<String> Factory::NewRawTwoByteString(int length,
-                                            PretenureFlag pretenure) {
+Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
+                                                      PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
-      String);
+      SeqTwoByteString);
 }
 
 
@@ -185,6 +233,16 @@
 }
 
 
+Handle<String> Factory::NewProperSubString(Handle<String> str,
+                                           int begin,
+                                           int end) {
+  ASSERT(begin > 0 || end < str->length());
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateSubString(*str, begin, end),
+                     String);
+}
+
+
 Handle<String> Factory::NewExternalStringFromAscii(
     ExternalAsciiString::Resource* resource) {
   CALL_HEAP_FUNCTION(
@@ -212,22 +270,47 @@
 
 
 Handle<Context> Factory::NewFunctionContext(int length,
-                                            Handle<JSFunction> closure) {
+                                            Handle<JSFunction> function) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateFunctionContext(length, *closure),
+      isolate()->heap()->AllocateFunctionContext(length, *function),
       Context);
 }
 
 
-Handle<Context> Factory::NewWithContext(Handle<Context> previous,
-                                        Handle<JSObject> extension,
-                                        bool is_catch_context) {
+Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
+                                         Handle<Context> previous,
+                                         Handle<String> name,
+                                         Handle<Object> thrown_object) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateWithContext(*previous,
-                                             *extension,
-                                             is_catch_context),
+      isolate()->heap()->AllocateCatchContext(*function,
+                                              *previous,
+                                              *name,
+                                              *thrown_object),
+      Context);
+}
+
+
+Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
+                                        Handle<Context> previous,
+                                        Handle<JSObject> extension) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateWithContext(*function, *previous, *extension),
+      Context);
+}
+
+
+Handle<Context> Factory::NewBlockContext(
+    Handle<JSFunction> function,
+    Handle<Context> previous,
+    Handle<SerializedScopeInfo> scope_info) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateBlockContext(*function,
+                                              *previous,
+                                              *scope_info),
       Context);
 }
 
@@ -266,7 +349,7 @@
   heap->SetLastScriptId(Smi::FromInt(id));
 
   // Create and initialize script object.
-  Handle<Proxy> wrapper = NewProxy(0, TENURED);
+  Handle<Foreign> wrapper = NewForeign(0, TENURED);
   Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
   script->set_source(*source);
   script->set_name(heap->undefined_value());
@@ -286,15 +369,15 @@
 }
 
 
-Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
+Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateProxy(addr, pretenure),
-                     Proxy);
+                     isolate()->heap()->AllocateForeign(addr, pretenure),
+                     Foreign);
 }
 
 
-Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
-  return NewProxy((Address) desc, TENURED);
+Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
+  return NewForeign((Address) desc, TENURED);
 }
 
 
@@ -392,13 +475,13 @@
 }
 
 
-Handle<Map> Factory::GetExternalArrayElementsMap(
+Handle<Map> Factory::GetElementsTransitionMap(
     Handle<Map> src,
-    ExternalArrayType array_type,
+    ElementsKind elements_kind,
     bool safe_to_add_transition) {
   CALL_HEAP_FUNCTION(isolate(),
-                     src->GetExternalArrayElementsMap(array_type,
-                                                      safe_to_add_transition),
+                     src->GetElementsTransitionMap(elements_kind,
+                                                   safe_to_add_transition),
                      Map);
 }
 
@@ -675,6 +758,14 @@
 }
 
 
+Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateSerializedScopeInfo(length),
+      SerializedScopeInfo);
+}
+
+
 Handle<Code> Factory::NewCode(const CodeDesc& desc,
                               Code::Flags flags,
                               Handle<Object> self_ref,
@@ -712,7 +803,7 @@
 
 
 // Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
+Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
     Handle<DescriptorArray> array,
     Handle<String> key,
     Handle<Object> value,
@@ -832,6 +923,31 @@
 }
 
 
+Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
+                                    Handle<Object> prototype) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateJSProxy(*handler, *prototype),
+      JSProxy);
+}
+
+
+void Factory::BecomeJSObject(Handle<JSReceiver> object) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      isolate()->heap()->ReinitializeJSReceiver(
+          *object, JS_OBJECT_TYPE, JSObject::kHeaderSize));
+}
+
+
+void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      isolate()->heap()->ReinitializeJSReceiver(
+          *object, JS_FUNCTION_TYPE, JSFunction::kSize));
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
@@ -884,13 +1000,23 @@
 }
 
 
-Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
-    Handle<NumberDictionary> dictionary,
+Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
+    Handle<SeededNumberDictionary> dictionary,
     uint32_t key,
     Handle<Object> value) {
   CALL_HEAP_FUNCTION(isolate(),
                      dictionary->AtNumberPut(key, *value),
-                     NumberDictionary);
+                     SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
+    Handle<UnseededNumberDictionary> dictionary,
+    uint32_t key,
+    Handle<Object> value) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     dictionary->AtNumberPut(key, *value),
+                     UnseededNumberDictionary);
 }
 
 
@@ -1161,12 +1287,14 @@
                                     JSRegExp::Flags flags,
                                     int capture_count) {
   Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
-
+  Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
   store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
   store->set(JSRegExp::kSourceIndex, *source);
   store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
-  store->set(JSRegExp::kIrregexpASCIICodeIndex, HEAP->the_hole_value());
-  store->set(JSRegExp::kIrregexpUC16CodeIndex, HEAP->the_hole_value());
+  store->set(JSRegExp::kIrregexpASCIICodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpASCIICodeSavedIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
   store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
   store->set(JSRegExp::kIrregexpCaptureCountIndex,
              Smi::FromInt(capture_count));
diff --git a/src/factory.h b/src/factory.h
index 71bfdc4..c9817fe 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,7 +39,7 @@
 
 class Factory {
  public:
-  // Allocate a new fixed array with undefined entries.
+  // Allocate a new uninitialized fixed array.
   Handle<FixedArray> NewFixedArray(
       int size,
       PretenureFlag pretenure = NOT_TENURED);
@@ -49,10 +49,21 @@
       int size,
       PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+  // Allocate a new uninitialized fixed double array.
+  Handle<FixedArray> NewFixedDoubleArray(
+      int size,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  Handle<SeededNumberDictionary> NewSeededNumberDictionary(
+      int at_least_space_for);
+
+  Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
+      int at_least_space_for);
 
   Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
+  Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+
   Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
   Handle<DeoptimizationInputData> NewDeoptimizationInputData(
       int deopt_entry_count,
@@ -62,7 +73,11 @@
       PretenureFlag pretenure);
 
   Handle<String> LookupSymbol(Vector<const char> str);
+  Handle<String> LookupSymbol(Handle<String> str);
   Handle<String> LookupAsciiSymbol(Vector<const char> str);
+  Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+                                   int from,
+                                   int length);
   Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
   Handle<String> LookupAsciiSymbol(const char* str) {
     return LookupSymbol(CStrVector(str));
@@ -108,10 +123,10 @@
   // Allocates and partially initializes an ASCII or TwoByte String. The
   // characters of the string are uninitialized. Currently used in regexp code
   // only, where they are pretenured.
-  Handle<String> NewRawAsciiString(
+  Handle<SeqAsciiString> NewRawAsciiString(
       int length,
       PretenureFlag pretenure = NOT_TENURED);
-  Handle<String> NewRawTwoByteString(
+  Handle<SeqTwoByteString> NewRawTwoByteString(
       int length,
       PretenureFlag pretenure = NOT_TENURED);
 
@@ -124,6 +139,11 @@
                               int begin,
                               int end);
 
+  // Create a new string object which holds a proper substring of a string.
+  Handle<String> NewProperSubString(Handle<String> str,
+                                    int begin,
+                                    int end);
+
   // Creates a new external String object.  There are two String encodings
   // in the system: ASCII and two byte.  Unlike other String types, it does
   // not make sense to have a UTF-8 factory function for external strings,
@@ -138,12 +158,23 @@
 
   // Create a function context.
   Handle<Context> NewFunctionContext(int length,
-                                     Handle<JSFunction> closure);
+                                     Handle<JSFunction> function);
+
+  // Create a catch context.
+  Handle<Context> NewCatchContext(Handle<JSFunction> function,
+                                  Handle<Context> previous,
+                                  Handle<String> name,
+                                  Handle<Object> thrown_object);
 
   // Create a 'with' context.
-  Handle<Context> NewWithContext(Handle<Context> previous,
-                                 Handle<JSObject> extension,
-                                 bool is_catch_context);
+  Handle<Context> NewWithContext(Handle<JSFunction> function,
+                                 Handle<Context> previous,
+                                 Handle<JSObject> extension);
+
+  // Create a 'block' context.
+  Handle<Context> NewBlockContext(Handle<JSFunction> function,
+                                  Handle<Context> previous,
+                                  Handle<SerializedScopeInfo> scope_info);
 
   // Return the Symbol matching the passed in string.
   Handle<String> SymbolFromString(Handle<String> value);
@@ -156,13 +187,13 @@
 
   Handle<Script> NewScript(Handle<String> source);
 
-  // Proxies are pretenured when allocated by the bootstrapper.
-  Handle<Proxy> NewProxy(Address addr,
-                         PretenureFlag pretenure = NOT_TENURED);
+  // Foreign objects are pretenured when allocated by the bootstrapper.
+  Handle<Foreign> NewForeign(Address addr,
+                             PretenureFlag pretenure = NOT_TENURED);
 
-  // Allocate a new proxy.  The proxy is pretenured (allocated directly in
-  // the old generation).
-  Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+  // Allocate a new foreign object.  The foreign is pretenured (allocated
+  // directly in the old generation).
+  Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
 
   Handle<ByteArray> NewByteArray(int length,
                                  PretenureFlag pretenure = NOT_TENURED);
@@ -192,9 +223,9 @@
 
   Handle<Map> GetSlowElementsMap(Handle<Map> map);
 
-  Handle<Map> GetExternalArrayElementsMap(Handle<Map> map,
-                                          ExternalArrayType array_type,
-                                          bool safe_to_add_transition);
+  Handle<Map> GetElementsTransitionMap(Handle<Map> map,
+                                       ElementsKind elements_kind,
+                                       bool safe_to_add_transition);
 
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
@@ -231,6 +262,12 @@
       Handle<FixedArray> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
+  Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+
+  // Change the type of the argument into a JS object/function and reinitialize.
+  void BecomeJSObject(Handle<JSReceiver> object);
+  void BecomeJSFunction(Handle<JSReceiver> object);
+
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
@@ -250,6 +287,8 @@
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
+  Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
+
   Handle<Code> NewCode(const CodeDesc& desc,
                        Code::Flags flags,
                        Handle<Object> self_reference,
@@ -314,7 +353,7 @@
   Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
                                                  Handle<Code> code);
 
-  Handle<DescriptorArray> CopyAppendProxyDescriptor(
+  Handle<DescriptorArray> CopyAppendForeignDescriptor(
       Handle<DescriptorArray> array,
       Handle<String> key,
       Handle<Object> value,
@@ -377,8 +416,13 @@
       Handle<Object> stack_trace,
       Handle<Object> stack_frames);
 
-  Handle<NumberDictionary> DictionaryAtNumberPut(
-      Handle<NumberDictionary>,
+  Handle<SeededNumberDictionary> DictionaryAtNumberPut(
+      Handle<SeededNumberDictionary>,
+      uint32_t key,
+      Handle<Object> value);
+
+  Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
+      Handle<UnseededNumberDictionary>,
       uint32_t key,
       Handle<Object> value);
 
diff --git a/src/fast-dtoa.cc b/src/fast-dtoa.cc
index c7f6aa1..e62bd01 100644
--- a/src/fast-dtoa.cc
+++ b/src/fast-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
 
 #include "fast-dtoa.h"
 
diff --git a/src/fixed-dtoa.cc b/src/fixed-dtoa.cc
index 8ad88f6..1fd974c 100644
--- a/src/fixed-dtoa.cc
+++ b/src/fixed-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,9 @@
 
 #include <math.h>
 
-#include "v8.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
 
 #include "double.h"
 #include "fixed-dtoa.h"
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 17e2015..e8f6349 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -96,6 +96,16 @@
 //
 #define FLAG FLAG_FULL
 
+// Flags for experimental language features.
+DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
+DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
+DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
+DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
+
+// Flags for experimental implementation features.
+DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
+DEFINE_bool(string_slices, false, "use string slices")
+
 // Flags for Crankshaft.
 #ifdef V8_TARGET_ARCH_MIPS
   DEFINE_bool(crankshaft, false, "use crankshaft")
@@ -128,12 +138,9 @@
 DEFINE_int(deopt_every_n_times,
            0,
            "deoptimize every n times a deopt point is passed")
-DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
 DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
 DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
 DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(aggressive_loop_invariant_motion, true,
-            "aggressive motion of instructions out of loops")
 DEFINE_bool(use_osr, true, "use on-stack replacement")
 
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
@@ -144,11 +151,8 @@
 DEFINE_bool(debug_code, false,
             "generate extra code (assertions) for debugging")
 DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(emit_branch_hints, false, "emit branch hints")
 DEFINE_bool(peephole_optimization, true,
             "perform peephole optimizations in assembly code")
-DEFINE_bool(print_peephole_optimization, false,
-            "print peephole optimizations in assembly code")
 DEFINE_bool(enable_sse2, true,
             "enable use of SSE2 instructions if available")
 DEFINE_bool(enable_sse3, true,
@@ -187,7 +191,6 @@
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(defer_negation, true, "defer negation operation")
 DEFINE_bool(mask_constants_with_cookie,
             true,
             "use random jit cookie to mask large constants")
@@ -200,33 +203,23 @@
 DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
 DEFINE_bool(always_opt, false, "always try to optimize functions")
 DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(debug_info, true, "add debug information to compiled functions")
 DEFINE_bool(deopt, true, "support deoptimization")
 DEFINE_bool(trace_deopt, false, "trace deoptimization")
 
 // compiler.cc
-DEFINE_bool(strict, false, "strict error checking")
 DEFINE_int(min_preparse_length, 1024,
            "minimum length for automatic enable preparsing")
-DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
 DEFINE_bool(always_full_compiler, false,
             "try to use the dedicated run-once backend for all code")
 DEFINE_bool(trace_bailout, false,
             "print reasons for falling back to using the classic V8 backend")
-DEFINE_bool(safe_int32_compiler, true,
-            "enable optimized side-effect-free int32 expressions.")
-DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
 
 DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
 
-// data-flow.cc
-DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
-
 // debug.cc
-DEFINE_bool(remote_debugging, false, "enable remote debugging")
 DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
 DEFINE_bool(debugger_auto_break, true,
             "automatically set the debug break flag when debugger commands are "
@@ -285,10 +278,9 @@
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
-DEFINE_bool(cleanup_ics_at_gc, true,
-            "Flush inline caches prior to mark compact collection.")
-DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
-            "Flush code caches in maps during mark compact cycle.")
+DEFINE_bool(cleanup_code_caches_at_gc, true,
+            "Flush inline caches prior to mark compact collection and "
+            "flush code caches in maps during mark compact cycle.")
 DEFINE_int(random_seed, 0,
            "Default seed for initializing random generator "
            "(0, the default, means to use system random).")
@@ -315,9 +307,6 @@
 DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
 DEFINE_bool(strict_mode, true, "allow strict mode directives")
 
-// rewriter.cc
-DEFINE_bool(optimize_ast, true, "optimize the ast")
-
 // simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "Trace simulator execution")
 DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
@@ -325,18 +314,25 @@
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
 
-// top.cc
+// isolate.cc
 DEFINE_bool(trace_exception, false,
             "print stack trace when throwing exceptions")
 DEFINE_bool(preallocate_message_memory, false,
             "preallocate some memory to build stack traces.")
+DEFINE_bool(randomize_hashes,
+            true,
+            "randomize hashes to avoid predictable hash collisions "
+            "(with snapshots this option cannot override the baked-in seed)")
+DEFINE_int(hash_seed,
+           0,
+           "Fixed seed to use to hash property keys (0 means random)"
+           "(with snapshots this option cannot override the baked-in seed)")
 
 // v8.cc
 DEFINE_bool(preemption, false,
             "activate a 100ms timer that switches between V8 threads")
 
 // Regexp
-DEFINE_bool(trace_regexps, false, "trace regexp execution")
 DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
 DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
 
@@ -387,6 +383,8 @@
 DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
 DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
 DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
+DEFINE_string(gdbjit_dump_filter, "",
+              "dump only objects containing this substring")
 
 //
 // Debug only flags
@@ -413,16 +411,12 @@
 DEFINE_bool(print_json_ast, false, "print source AST as JSON")
 DEFINE_bool(print_builtin_json_ast, false,
             "print source AST for builtins as JSON")
-DEFINE_bool(trace_calls, false, "trace calls")
-DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
 
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
 DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
-DEFINE_bool(print_graph_text, false,
-            "print a text representation of the flow graph")
 
 // contexts.cc
 DEFINE_bool(trace_contexts, false, "trace contexts operations")
@@ -471,14 +465,10 @@
             "trace regexp macro assembler calls.")
 
 //
-// Logging and profiling only flags
+// Logging and profiling flags
 //
 #undef FLAG
-#ifdef ENABLE_LOGGING_AND_PROFILING
 #define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
 
 // log.cc
 DEFINE_bool(log, false,
@@ -494,7 +484,6 @@
 DEFINE_bool(log_snapshot_positions, false,
             "log positions of (de)serialized objects in the snapshot.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
 DEFINE_bool(prof, false,
             "Log statistical profiling information (implies --log-code).")
 DEFINE_bool(prof_auto, true,
@@ -511,19 +500,6 @@
 DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
 
 //
-// Heap protection flags
-// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
-//
-#ifdef ENABLE_HEAP_PROTECTION
-#undef FLAG
-#define FLAG FLAG_FULL
-
-DEFINE_bool(protect_heap, false,
-            "Protect/unprotect V8's heap when leaving/entring the VM.")
-
-#endif
-
-//
 // Disassembler only flags
 //
 #undef FLAG
diff --git a/src/flags.cc b/src/flags.cc
index c20f5ee..ab5b57c 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -31,7 +31,7 @@
 #include "v8.h"
 
 #include "platform.h"
-#include "smart-pointer.h"
+#include "smart-array-pointer.h"
 #include "string-stream.h"
 
 
@@ -193,7 +193,7 @@
 }
 
 
-static SmartPointer<const char> ToString(Flag* flag) {
+static SmartArrayPointer<const char> ToString(Flag* flag) {
   HeapStringAllocator string_allocator;
   StringStream buffer(&string_allocator);
   switch (flag->type()) {
@@ -528,7 +528,7 @@
   printf("Options:\n");
   for (size_t i = 0; i < num_flags; ++i) {
     Flag* f = &flags[i];
-    SmartPointer<const char> value = ToString(f);
+    SmartArrayPointer<const char> value = ToString(f);
     printf("  --%s (%s)\n        type: %s  default: %s\n",
            f->name(), f->comment(), Type2String(f->type()), *value);
   }
diff --git a/src/frame-element.h b/src/frame-element.h
deleted file mode 100644
index 0c7d010..0000000
--- a/src/frame-element.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAME_ELEMENT_H_
-#define V8_FRAME_ELEMENT_H_
-
-#include "type-info.h"
-#include "macro-assembler.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames.  There are several kinds of
-// elements:
-//   * Invalid: elements that are uninitialized or not actually part
-//     of the virtual frame.  They should not be read.
-//   * Memory: an element that resides in the actual frame.  Its address is
-//     given by its position in the virtual frame.
-//   * Register: an element that resides in a register.
-//   * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
-  enum SyncFlag {
-    NOT_SYNCED,
-    SYNCED
-  };
-
-  inline TypeInfo type_info() {
-    // Copied elements do not have type info. Instead
-    // we have to inspect their backing element in the frame.
-    ASSERT(!is_copy());
-    return TypeInfo::FromInt(TypeInfoField::decode(value_));
-  }
-
-  inline void set_type_info(TypeInfo info) {
-    // Copied elements do not have type info. Instead
-    // we have to inspect their backing element in the frame.
-    ASSERT(!is_copy());
-    value_ = value_ & ~TypeInfoField::mask();
-    value_ = value_ | TypeInfoField::encode(info.ToInt());
-  }
-
-  // The default constructor creates an invalid frame element.
-  FrameElement() {
-    value_ = TypeField::encode(INVALID)
-        | CopiedField::encode(false)
-        | SyncedField::encode(false)
-        | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
-        | DataField::encode(0);
-  }
-
-  // Factory function to construct an invalid frame element.
-  static FrameElement InvalidElement() {
-    FrameElement result;
-    return result;
-  }
-
-  // Factory function to construct an in-memory frame element.
-  static FrameElement MemoryElement(TypeInfo info) {
-    FrameElement result(MEMORY, no_reg, SYNCED, info);
-    return result;
-  }
-
-  // Factory function to construct an in-register frame element.
-  static FrameElement RegisterElement(Register reg,
-                                      SyncFlag is_synced,
-                                      TypeInfo info) {
-    return FrameElement(REGISTER, reg, is_synced, info);
-  }
-
-  // Factory function to construct a frame element whose value is known at
-  // compile time.
-  static FrameElement ConstantElement(Handle<Object> value,
-                                      SyncFlag is_synced) {
-    TypeInfo info = TypeInfo::TypeFromValue(value);
-    FrameElement result(value, is_synced, info);
-    return result;
-  }
-
-  static bool ConstantPoolOverflowed() {
-    return !DataField::is_valid(
-        Isolate::Current()->frame_element_constant_list()->length());
-  }
-
-  bool is_synced() const { return SyncedField::decode(value_); }
-
-  void set_sync() {
-    ASSERT(type() != MEMORY);
-    value_ = value_ | SyncedField::encode(true);
-  }
-
-  void clear_sync() {
-    ASSERT(type() != MEMORY);
-    value_ = value_ & ~SyncedField::mask();
-  }
-
-  bool is_valid() const { return type() != INVALID; }
-  bool is_memory() const { return type() == MEMORY; }
-  bool is_register() const { return type() == REGISTER; }
-  bool is_constant() const { return type() == CONSTANT; }
-  bool is_copy() const { return type() == COPY; }
-
-  bool is_copied() const { return CopiedField::decode(value_); }
-  void set_copied() { value_ = value_ | CopiedField::encode(true); }
-  void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
-
-  // An untagged int32 FrameElement represents a signed int32
-  // on the stack.  These are only allowed in a side-effect-free
-  // int32 calculation, and if a non-int32 input shows up or an overflow
-  // occurs, we bail out and drop all the int32 values.
-  void set_untagged_int32(bool value) {
-    value_ &= ~UntaggedInt32Field::mask();
-    value_ |= UntaggedInt32Field::encode(value);
-  }
-  bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
-
-  Register reg() const {
-    ASSERT(is_register());
-    uint32_t reg = DataField::decode(value_);
-    Register result;
-    result.code_ = reg;
-    return result;
-  }
-
-  Handle<Object> handle() const {
-    ASSERT(is_constant());
-    return Isolate::Current()->frame_element_constant_list()->
-        at(DataField::decode(value_));
-  }
-
-  int index() const {
-    ASSERT(is_copy());
-    return DataField::decode(value_);
-  }
-
-  bool Equals(FrameElement other) {
-    uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
-    if (!masked_difference) {
-      // The elements are equal if they agree exactly except on copied field.
-      return true;
-    } else {
-      // If two constants have the same value, and agree otherwise, return true.
-       return !(masked_difference & ~DataField::mask()) &&
-              is_constant() &&
-              handle().is_identical_to(other.handle());
-    }
-  }
-
-  // Test if two FrameElements refer to the same memory or register location.
-  bool SameLocation(FrameElement* other) {
-    if (type() == other->type()) {
-      if (value_ == other->value_) return true;
-      if (is_constant() && handle().is_identical_to(other->handle())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  // Given a pair of non-null frame element pointers, return one of them
-  // as an entry frame candidate or null if they are incompatible.
-  FrameElement* Combine(FrameElement* other) {
-    // If either is invalid, the result is.
-    if (!is_valid()) return this;
-    if (!other->is_valid()) return other;
-
-    if (!SameLocation(other)) return NULL;
-    // If either is unsynced, the result is.
-    FrameElement* result = is_synced() ? other : this;
-    return result;
-  }
-
- private:
-  enum Type {
-    INVALID,
-    MEMORY,
-    REGISTER,
-    CONSTANT,
-    COPY
-  };
-
-  // Used to construct memory and register elements.
-  FrameElement(Type type,
-               Register reg,
-               SyncFlag is_synced,
-               TypeInfo info) {
-    value_ = TypeField::encode(type)
-        | CopiedField::encode(false)
-        | SyncedField::encode(is_synced != NOT_SYNCED)
-        | TypeInfoField::encode(info.ToInt())
-        | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
-  }
-
-  // Used to construct constant elements.
-  FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
-    ZoneObjectList* constant_list =
-        Isolate::Current()->frame_element_constant_list();
-    value_ = TypeField::encode(CONSTANT)
-        | CopiedField::encode(false)
-        | SyncedField::encode(is_synced != NOT_SYNCED)
-        | TypeInfoField::encode(info.ToInt())
-        | DataField::encode(constant_list->length());
-    constant_list->Add(value);
-  }
-
-  Type type() const { return TypeField::decode(value_); }
-  void set_type(Type type) {
-    value_ = value_ & ~TypeField::mask();
-    value_ = value_ | TypeField::encode(type);
-  }
-
-  void set_index(int new_index) {
-    ASSERT(is_copy());
-    value_ = value_ & ~DataField::mask();
-    value_ = value_ | DataField::encode(new_index);
-  }
-
-  void set_reg(Register new_reg) {
-    ASSERT(is_register());
-    value_ = value_ & ~DataField::mask();
-    value_ = value_ | DataField::encode(new_reg.code_);
-  }
-
-  // Encode type, copied, synced and data in one 32 bit integer.
-  uint32_t value_;
-
-  // Declare BitFields with template parameters <type, start, size>.
-  class TypeField: public BitField<Type, 0, 3> {};
-  class CopiedField: public BitField<bool, 3, 1> {};
-  class SyncedField: public BitField<bool, 4, 1> {};
-  class UntaggedInt32Field: public BitField<bool, 5, 1> {};
-  class TypeInfoField: public BitField<int, 6, 7> {};
-  class DataField: public BitField<uint32_t, 13, 32 - 13> {};
-
-  friend class VirtualFrame;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_FRAME_ELEMENT_H_
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 5951806..7ba79bf 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,6 +67,7 @@
 
 
 inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
+  v->VisitPointer(context_address());
   StackFrame::IteratePc(v, pc_address(), holder);
 }
 
@@ -82,6 +83,12 @@
 }
 
 
+inline Object** StackHandler::context_address() const {
+  const int offset = StackHandlerConstants::kContextOffset;
+  return reinterpret_cast<Object**>(address() + offset);
+}
+
+
 inline Address* StackHandler::pc_address() const {
   const int offset = StackHandlerConstants::kPCOffset;
   return reinterpret_cast<Address*>(address() + offset);
diff --git a/src/frames.cc b/src/frames.cc
index e0517c8..60b1aad 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,6 +36,8 @@
 #include "scopeinfo.h"
 #include "string-stream.h"
 
+#include "allocation-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -346,7 +348,6 @@
 // -------------------------------------------------------------------------
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
     Isolate* isolate,
     Address fp, Address sp, Address low_bound, Address high_bound) :
@@ -362,7 +363,6 @@
     if (frame()->is_java_script()) return;
   }
 }
-#endif
 
 
 Code* StackFrame::GetSafepointData(Isolate* isolate,
@@ -371,7 +371,6 @@
                                    unsigned* stack_slots) {
   PcToCodeCache::PcToCodeCacheEntry* entry =
       isolate->pc_to_code_cache()->GetCacheEntry(pc);
-  SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
   if (!entry->safepoint_entry.is_valid()) {
     entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
     ASSERT(entry->safepoint_entry.is_valid());
@@ -528,6 +527,17 @@
 }
 
 
+Object* StandardFrame::GetExpression(Address fp, int index) {
+  return Memory::Object_at(GetExpressionAddress(fp, index));
+}
+
+
+Address StandardFrame::GetExpressionAddress(Address fp, int n) {
+  const int offset = StandardFrameConstants::kExpressionsOffset;
+  return fp + offset - n * kPointerSize;
+}
+
+
 int StandardFrame::ComputeExpressionsCount() const {
   const int offset =
       StandardFrameConstants::kExpressionsOffset + kPointerSize;
@@ -646,6 +656,16 @@
 }
 
 
+int JavaScriptFrame::GetArgumentsLength() const {
+  // If there is an arguments adaptor frame get the arguments length from it.
+  if (has_adapted_arguments()) {
+    return Smi::cast(GetExpression(caller_fp(), 0))->value();
+  } else {
+    return GetNumberOfIncomingArguments();
+  }
+}
+
+
 Code* JavaScriptFrame::unchecked_code() const {
   JSFunction* function = JSFunction::cast(this->function());
   return function->unchecked_code();
@@ -742,24 +762,30 @@
       // at the first position. Since we are always at a call when we need
       // to construct a stack trace, the receiver is always in a stack slot.
       opcode = static_cast<Translation::Opcode>(it.Next());
-      ASSERT(opcode == Translation::STACK_SLOT);
-      int input_slot_index = it.Next();
+      ASSERT(opcode == Translation::STACK_SLOT ||
+             opcode == Translation::LITERAL);
+      int index = it.Next();
 
       // Get the correct receiver in the optimized frame.
       Object* receiver = NULL;
-      // Positive index means the value is spilled to the locals area. Negative
-      // means it is stored in the incoming parameter area.
-      if (input_slot_index >= 0) {
-        receiver = GetExpression(input_slot_index);
+      if (opcode == Translation::LITERAL) {
+        receiver = data->LiteralArray()->get(index);
       } else {
-        // Index -1 overlaps with last parameter, -n with the first parameter,
-        // (-n - 1) with the receiver with n being the number of parameters
-        // of the outermost, optimized frame.
-        int parameter_count = ComputeParametersCount();
-        int parameter_index = input_slot_index + parameter_count;
-        receiver = (parameter_index == -1)
-            ? this->receiver()
-            : this->GetParameter(parameter_index);
+        // Positive index means the value is spilled to the locals
+        // area. Negative means it is stored in the incoming parameter
+        // area.
+        if (index >= 0) {
+          receiver = GetExpression(index);
+        } else {
+          // Index -1 overlaps with last parameter, -n with the first parameter,
+          // (-n - 1) with the receiver with n being the number of parameters
+          // of the outermost, optimized frame.
+          int parameter_count = ComputeParametersCount();
+          int parameter_index = index + parameter_count;
+          receiver = (parameter_index == -1)
+              ? this->receiver()
+              : this->GetParameter(parameter_index);
+        }
       }
 
       Code* code = function->shared()->code();
@@ -806,6 +832,22 @@
 }
 
 
+int OptimizedFrame::GetInlineCount() {
+  ASSERT(is_optimized());
+
+  int deopt_index = Safepoint::kNoDeoptimizationIndex;
+  DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  USE(opcode);
+  int frame_count = it.Next();
+  return frame_count;
+}
+
+
 void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
   ASSERT(functions->length() == 0);
   ASSERT(is_optimized());
@@ -938,6 +980,10 @@
     accumulator->Add("\n");
     return;
   }
+  if (is_optimized()) {
+    accumulator->Add(" {\n// optimized frame\n}\n");
+    return;
+  }
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
@@ -1141,7 +1187,8 @@
   isolate_->counters()->pc_to_code()->Increment();
   ASSERT(IsPowerOf2(kPcToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)),
+      v8::internal::kZeroHashSeed);
   uint32_t index = hash & (kPcToCodeCacheSize - 1);
   PcToCodeCacheEntry* entry = cache(index);
   if (entry->pc == pc) {
diff --git a/src/frames.h b/src/frames.h
index 6fe6a63..fed11c4 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_FRAMES_H_
 #define V8_FRAMES_H_
 
+#include "allocation.h"
 #include "handles.h"
 #include "safepoint-table.h"
 
@@ -113,6 +114,7 @@
   // Accessors.
   inline State state() const;
 
+  inline Object** context_address() const;
   inline Address* pc_address() const;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
@@ -382,6 +384,7 @@
   inline Object* GetExpression(int index) const;
   inline void SetExpression(int index, Object* value);
   int ComputeExpressionsCount() const;
+  static Object* GetExpression(Address fp, int index);
 
   virtual void SetCallerFp(Address caller_fp);
 
@@ -410,6 +413,7 @@
 
   // Returns the address of the n'th expression stack element.
   Address GetExpressionAddress(int n) const;
+  static Address GetExpressionAddress(Address fp, int n);
 
   // Determines if the n'th expression stack element is in a stack
   // handler or not. Requires traversing all handlers in this frame.
@@ -482,6 +486,7 @@
   // actual passed arguments are available in an arguments adaptor
   // frame below it on the stack.
   inline bool has_adapted_arguments() const;
+  int GetArgumentsLength() const;
 
   // Garbage collection support.
   virtual void Iterate(ObjectVisitor* v) const;
@@ -494,6 +499,9 @@
   // Determine the code for the frame.
   virtual Code* unchecked_code() const;
 
+  // Returns the levels of inlining for this frame.
+  virtual int GetInlineCount() { return 1; }
+
   // Return a list with JSFunctions of this frame.
   virtual void GetFunctions(List<JSFunction*>* functions);
 
@@ -532,6 +540,8 @@
   // GC support.
   virtual void Iterate(ObjectVisitor* v) const;
 
+  virtual int GetInlineCount();
+
   // Return a list with JSFunctions of this frame.
   // The functions are ordered bottom-to-top (i.e. functions.last()
   // is the top-most activation)
@@ -569,6 +579,7 @@
   virtual void Print(StringStream* accumulator,
                      PrintMode mode,
                      int index) const;
+
  protected:
   explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
       : JavaScriptFrame(iterator) { }
@@ -834,7 +845,6 @@
 };
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
     SafeJavaScriptFrameIterator;
 
@@ -846,7 +856,6 @@
                                        Address low_bound, Address high_bound);
   void Advance();
 };
-#endif
 
 
 class StackFrameLocator BASE_EMBEDDED {
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 5f97421..8073874 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -35,6 +35,7 @@
 #include "macro-assembler.h"
 #include "prettyprinter.h"
 #include "scopes.h"
+#include "scopeinfo.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -90,17 +91,11 @@
 }
 
 
-void BreakableStatementChecker::VisitWithEnterStatement(
-    WithEnterStatement* stmt) {
+void BreakableStatementChecker::VisitWithStatement(WithStatement* stmt) {
   Visit(stmt->expression());
 }
 
 
-void BreakableStatementChecker::VisitWithExitStatement(
-    WithExitStatement* stmt) {
-}
-
-
 void BreakableStatementChecker::VisitSwitchStatement(SwitchStatement* stmt) {
   // Switch statements breakable if the tag expression is.
   Visit(stmt->tag());
@@ -187,17 +182,12 @@
 }
 
 
-void BreakableStatementChecker::VisitCatchExtensionObject(
-    CatchExtensionObject* expr) {
-}
-
-
 void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
   // If assigning to a property (including a global property) the assignment is
   // breakable.
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  VariableProxy* proxy = expr->target()->AsVariableProxy();
   Property* prop = expr->target()->AsProperty();
-  if (prop != NULL || (var != NULL && var->is_global())) {
+  if (prop != NULL || (proxy != NULL && proxy->var()->IsUnallocated())) {
     is_breakable_ = true;
     return;
   }
@@ -296,11 +286,13 @@
   }
   unsigned table_offset = cgen.EmitStackCheckTable();
 
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
   code->set_optimizable(info->IsOptimizable());
   cgen.PopulateDeoptimizationData(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_has_debug_break_slots(
+      info->isolate()->debugger()->IsDebuggerActive());
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
@@ -322,7 +314,6 @@
   // field, and then a sequence of entries.  Each entry is a pair of AST id
   // and code-relative pc offset.
   masm()->Align(kIntSize);
-  masm()->RecordComment("[ Stack check table");
   unsigned offset = masm()->pc_offset();
   unsigned length = stack_checks_.length();
   __ dd(length);
@@ -330,7 +321,6 @@
     __ dd(stack_checks_[i].id);
     __ dd(stack_checks_[i].pc_and_state);
   }
-  masm()->RecordComment("]");
   return offset;
 }
 
@@ -351,7 +341,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
+void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
   PrepareForBailoutForId(node->id(), state);
 }
 
@@ -402,26 +392,6 @@
 }
 
 
-int FullCodeGenerator::SlotOffset(Slot* slot) {
-  ASSERT(slot != NULL);
-  // Offset is negative because higher indexes are at lower addresses.
-  int offset = -slot->index() * kPointerSize;
-  // Adjust by a (parameter or local) base offset.
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      offset += (scope()->num_parameters() + 1) * kPointerSize;
-      break;
-    case Slot::LOCAL:
-      offset += JavaScriptFrameConstants::kLocal0Offset;
-      break;
-    case Slot::CONTEXT:
-    case Slot::LOOKUP:
-      UNREACHABLE();
-  }
-  return offset;
-}
-
-
 bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
   // Inline smi case inside loops, but not division and modulo which
   // are too complicated and take up too much space.
@@ -442,6 +412,7 @@
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
   __ push(reg);
+  codegen()->increment_stack_height();
 }
 
 
@@ -449,17 +420,19 @@
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
   __ Drop(1);
+  codegen()->decrement_stack_height();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
   __ pop(result_register());
+  codegen()->decrement_stack_height();
 }
 
 
@@ -470,8 +443,9 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
+  codegen()->decrement_stack_height();
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -521,37 +495,32 @@
 }
 
 
+void FullCodeGenerator::DoTest(const TestContext* context) {
+  DoTest(context->condition(),
+         context->true_label(),
+         context->false_label(),
+         context->fall_through());
+}
+
+
 void FullCodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
   int length = declarations->length();
-  int globals = 0;
+  int global_count = 0;
   for (int i = 0; i < length; i++) {
     Declaration* decl = declarations->at(i);
-    Variable* var = decl->proxy()->var();
-    Slot* slot = var->AsSlot();
-
-    // If it was not possible to allocate the variable at compile
-    // time, we need to "declare" it at runtime to make sure it
-    // actually exists in the local context.
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      VisitDeclaration(decl);
-    } else {
-      // Count global variables and functions for later processing
-      globals++;
-    }
+    EmitDeclaration(decl->proxy(), decl->mode(), decl->fun(), &global_count);
   }
 
-  // Compute array of global variable and function declarations.
-  // Do nothing in case of no declared global functions or variables.
-  if (globals > 0) {
+  // Batch declare global functions and variables.
+  if (global_count > 0) {
     Handle<FixedArray> array =
-        isolate()->factory()->NewFixedArray(2 * globals, TENURED);
+        isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
     for (int j = 0, i = 0; i < length; i++) {
       Declaration* decl = declarations->at(i);
       Variable* var = decl->proxy()->var();
-      Slot* slot = var->AsSlot();
 
-      if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+      if (var->IsUnallocated()) {
         array->set(j++, *(var->name()));
         if (decl->fun() == NULL) {
           if (var->mode() == Variable::CONST) {
@@ -573,95 +542,94 @@
       }
     }
     // Invoke the platform-dependent code generator to do the actual
-    // declaration the global variables and functions.
+    // declaration the global functions and variables.
     DeclareGlobals(array);
   }
 }
 
 
+int FullCodeGenerator::DeclareGlobalsFlags() {
+  int flags = 0;
+  if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
+  if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
+  if (is_native()) flags |= kDeclareGlobalsNativeFlag;
+  return flags;
+}
+
+
 void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, fun->start_position());
-  }
+  CodeGenerator::RecordPositions(masm_, fun->start_position());
 }
 
 
 void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
-  }
+  CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
 }
 
 
 void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    if (!isolate()->debugger()->IsDebuggerActive()) {
-      CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-    } else {
-      // Check if the statement will be breakable without adding a debug break
-      // slot.
-      BreakableStatementChecker checker;
-      checker.Check(stmt);
-      // Record the statement position right here if the statement is not
-      // breakable. For breakable statements the actual recording of the
-      // position will be postponed to the breakable code (typically an IC).
-      bool position_recorded = CodeGenerator::RecordPositions(
-          masm_, stmt->statement_pos(), !checker.is_breakable());
-      // If the position recording did record a new position generate a debug
-      // break slot to make the statement breakable.
-      if (position_recorded) {
-        Debug::GenerateSlot(masm_);
-      }
-    }
-#else
+  if (!isolate()->debugger()->IsDebuggerActive()) {
     CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-#endif
+  } else {
+    // Check if the statement will be breakable without adding a debug break
+    // slot.
+    BreakableStatementChecker checker;
+    checker.Check(stmt);
+    // Record the statement position right here if the statement is not
+    // breakable. For breakable statements the actual recording of the
+    // position will be postponed to the breakable code (typically an IC).
+    bool position_recorded = CodeGenerator::RecordPositions(
+        masm_, stmt->statement_pos(), !checker.is_breakable());
+    // If the position recording did record a new position generate a debug
+    // break slot to make the statement breakable.
+    if (position_recorded) {
+      Debug::GenerateSlot(masm_);
+    }
   }
+#else
+  CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+#endif
 }
 
 
 void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
-  if (FLAG_debug_info) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    if (!isolate()->debugger()->IsDebuggerActive()) {
-      CodeGenerator::RecordPositions(masm_, pos);
-    } else {
-      // Check if the expression will be breakable without adding a debug break
-      // slot.
-      BreakableStatementChecker checker;
-      checker.Check(expr);
-      // Record a statement position right here if the expression is not
-      // breakable. For breakable expressions the actual recording of the
-      // position will be postponed to the breakable code (typically an IC).
-      // NOTE this will record a statement position for something which might
-      // not be a statement. As stepping in the debugger will only stop at
-      // statement positions this is used for e.g. the condition expression of
-      // a do while loop.
-      bool position_recorded = CodeGenerator::RecordPositions(
-          masm_, pos, !checker.is_breakable());
-      // If the position recording did record a new position generate a debug
-      // break slot to make the statement breakable.
-      if (position_recorded) {
-        Debug::GenerateSlot(masm_);
-      }
-    }
-#else
+  if (!isolate()->debugger()->IsDebuggerActive()) {
     CodeGenerator::RecordPositions(masm_, pos);
-#endif
+  } else {
+    // Check if the expression will be breakable without adding a debug break
+    // slot.
+    BreakableStatementChecker checker;
+    checker.Check(expr);
+    // Record a statement position right here if the expression is not
+    // breakable. For breakable expressions the actual recording of the
+    // position will be postponed to the breakable code (typically an IC).
+    // NOTE this will record a statement position for something which might
+    // not be a statement. As stepping in the debugger will only stop at
+    // statement positions this is used for e.g. the condition expression of
+    // a do while loop.
+    bool position_recorded = CodeGenerator::RecordPositions(
+        masm_, pos, !checker.is_breakable());
+    // If the position recording did record a new position generate a debug
+    // break slot to make the statement breakable.
+    if (position_recorded) {
+      Debug::GenerateSlot(masm_);
+    }
   }
+#else
+  CodeGenerator::RecordPositions(masm_, pos);
+#endif
 }
 
 
 void FullCodeGenerator::SetStatementPosition(int pos) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, pos);
-  }
+  CodeGenerator::RecordPositions(masm_, pos);
 }
 
 
 void FullCodeGenerator::SetSourcePosition(int pos) {
-  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+  if (pos != RelocInfo::kNoPosition) {
     masm_->positions_recorder()->RecordPosition(pos);
   }
 }
@@ -693,7 +661,6 @@
 
 void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
   ZoneList<Expression*>* args = node->arguments();
-  Handle<String> name = node->name();
   const Runtime::Function* function = node->function();
   ASSERT(function != NULL);
   ASSERT(function->intrinsic_type == Runtime::INLINE);
@@ -704,143 +671,116 @@
 
 
 void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Expression* left = expr->left();
-  Expression* right = expr->right();
-
-  OverwriteMode mode = NO_OVERWRITE;
-  if (left->ResultOverwriteAllowed()) {
-    mode = OVERWRITE_LEFT;
-  } else if (right->ResultOverwriteAllowed()) {
-    mode = OVERWRITE_RIGHT;
-  }
-
-  switch (op) {
+  switch (expr->op()) {
     case Token::COMMA:
-      VisitForEffect(left);
-      if (context()->IsTest()) ForwardBailoutToChild(expr);
-      context()->HandleExpression(right);
-      break;
-
+      return VisitComma(expr);
     case Token::OR:
     case Token::AND:
-      EmitLogicalOperation(expr);
-      break;
-
-    case Token::ADD:
-    case Token::SUB:
-    case Token::DIV:
-    case Token::MOD:
-    case Token::MUL:
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      // Load both operands.
-      VisitForStackValue(left);
-      VisitForAccumulatorValue(right);
-
-      SetSourcePosition(expr->position());
-      if (ShouldInlineSmiCase(op)) {
-        EmitInlineSmiBinaryOp(expr, op, mode, left, right);
-      } else {
-        EmitBinaryOp(op, mode);
-      }
-      break;
-    }
-
+      return VisitLogicalExpression(expr);
     default:
-      UNREACHABLE();
+      return VisitArithmeticExpression(expr);
   }
 }
 
 
-void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
-  Label eval_right, done;
-
-  context()->EmitLogicalLeft(expr, &eval_right, &done);
-
-  PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
-  __ bind(&eval_right);
+void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
+  Comment cmnt(masm_, "[ Comma");
+  VisitForEffect(expr->left());
   if (context()->IsTest()) ForwardBailoutToChild(expr);
-  context()->HandleExpression(expr->right());
+  VisitInCurrentContext(expr->right());
+}
 
+
+void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  Comment cmnt(masm_, is_logical_and ? "[ Logical AND" :  "[ Logical OR");
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+  int right_id = expr->RightId();
+  Label done;
+
+  if (context()->IsTest()) {
+    Label eval_right;
+    const TestContext* test = TestContext::cast(context());
+    if (is_logical_and) {
+      VisitForControl(left, &eval_right, test->false_label(), &eval_right);
+    } else {
+      VisitForControl(left, test->true_label(), &eval_right, &eval_right);
+    }
+    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    __ bind(&eval_right);
+    ForwardBailoutToChild(expr);
+
+  } else if (context()->IsAccumulatorValue()) {
+    VisitForAccumulatorValue(left);
+    // We want the value in the accumulator for the test, and on the stack in
+    // case we need it.
+    __ push(result_register());
+    Label discard, restore;
+    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+    if (is_logical_and) {
+      DoTest(left, &discard, &restore, &restore);
+    } else {
+      DoTest(left, &restore, &discard, &restore);
+    }
+    __ bind(&restore);
+    __ pop(result_register());
+    __ jmp(&done);
+    __ bind(&discard);
+    __ Drop(1);
+    PrepareForBailoutForId(right_id, NO_REGISTERS);
+
+  } else if (context()->IsStackValue()) {
+    VisitForAccumulatorValue(left);
+    // We want the value in the accumulator for the test, and on the stack in
+    // case we need it.
+    __ push(result_register());
+    Label discard;
+    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+    if (is_logical_and) {
+      DoTest(left, &discard, &done, &discard);
+    } else {
+      DoTest(left, &done, &discard, &discard);
+    }
+    __ bind(&discard);
+    __ Drop(1);
+    PrepareForBailoutForId(right_id, NO_REGISTERS);
+
+  } else {
+    ASSERT(context()->IsEffect());
+    Label eval_right;
+    if (is_logical_and) {
+      VisitForControl(left, &eval_right, &done, &eval_right);
+    } else {
+      VisitForControl(left, &done, &eval_right, &eval_right);
+    }
+    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    __ bind(&eval_right);
+  }
+
+  VisitInCurrentContext(right);
   __ bind(&done);
 }
 
 
-void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
-                                                       Label* eval_right,
-                                                       Label* done) const {
-  if (expr->op() == Token::OR) {
-    codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
+void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+  Token::Value op = expr->op();
+  Comment cmnt(masm_, "[ ArithmeticExpression");
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+  OverwriteMode mode =
+      left->ResultOverwriteAllowed()
+      ? OVERWRITE_LEFT
+      : (right->ResultOverwriteAllowed() ? OVERWRITE_RIGHT : NO_OVERWRITE);
+
+  VisitForStackValue(left);
+  VisitForAccumulatorValue(right);
+
+  SetSourcePosition(expr->position());
+  if (ShouldInlineSmiCase(op)) {
+    EmitInlineSmiBinaryOp(expr, op, mode, left, right);
   } else {
-    ASSERT(expr->op() == Token::AND);
-    codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
-  }
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
-    BinaryOperation* expr,
-    Label* eval_right,
-    Label* done) const {
-  HandleExpression(expr->left());
-  // We want the value in the accumulator for the test, and on the stack in case
-  // we need it.
-  __ push(result_register());
-  Label discard, restore;
-  if (expr->op() == Token::OR) {
-    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-    codegen()->DoTest(&restore, &discard, &restore);
-  } else {
-    ASSERT(expr->op() == Token::AND);
-    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-    codegen()->DoTest(&discard, &restore, &restore);
-  }
-  __ bind(&restore);
-  __ pop(result_register());
-  __ jmp(done);
-  __ bind(&discard);
-  __ Drop(1);
-}
-
-
-void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
-    BinaryOperation* expr,
-    Label* eval_right,
-    Label* done) const {
-  codegen()->VisitForAccumulatorValue(expr->left());
-  // We want the value in the accumulator for the test, and on the stack in case
-  // we need it.
-  __ push(result_register());
-  Label discard;
-  if (expr->op() == Token::OR) {
-    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-    codegen()->DoTest(done, &discard, &discard);
-  } else {
-    ASSERT(expr->op() == Token::AND);
-    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-    codegen()->DoTest(&discard, done, &discard);
-  }
-  __ bind(&discard);
-  __ Drop(1);
-}
-
-
-void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
-                                                     Label* eval_right,
-                                                     Label* done) const {
-  if (expr->op() == Token::OR) {
-    codegen()->VisitForControl(expr->left(),
-                               true_label_, eval_right, eval_right);
-  } else {
-    ASSERT(expr->op() == Token::AND);
-    codegen()->VisitForControl(expr->left(),
-                               eval_right, false_label_, eval_right);
+    EmitBinaryOp(expr, op, mode);
   }
 }
 
@@ -853,58 +793,59 @@
 }
 
 
-void FullCodeGenerator::EffectContext::HandleExpression(
-    Expression* expr) const {
-  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
-    Expression* expr) const {
-  codegen()->HandleInNonTestContext(expr, TOS_REG);
-}
-
-
-void FullCodeGenerator::StackValueContext::HandleExpression(
-    Expression* expr) const {
-  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
-  codegen()->VisitInTestContext(expr);
-}
-
-
-void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
-  ASSERT(forward_bailout_pending_ == NULL);
-  AstVisitor::Visit(expr);
-  PrepareForBailout(expr, state);
-  // Forwarding bailouts to children is a one shot operation. It
-  // should have been processed at this point.
-  ASSERT(forward_bailout_pending_ == NULL);
-}
-
-
-void FullCodeGenerator::VisitInTestContext(Expression* expr) {
-  ForwardBailoutStack stack(expr, forward_bailout_pending_);
-  ForwardBailoutStack* saved = forward_bailout_stack_;
-  forward_bailout_pending_ = NULL;
-  forward_bailout_stack_ = &stack;
-  AstVisitor::Visit(expr);
-  forward_bailout_stack_ = saved;
+void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
+  if (context()->IsTest()) {
+    ForwardBailoutStack stack(expr, forward_bailout_pending_);
+    ForwardBailoutStack* saved = forward_bailout_stack_;
+    forward_bailout_pending_ = NULL;
+    forward_bailout_stack_ = &stack;
+    Visit(expr);
+    forward_bailout_stack_ = saved;
+  } else {
+    ASSERT(forward_bailout_pending_ == NULL);
+    Visit(expr);
+    State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
+    PrepareForBailout(expr, state);
+    // Forwarding bailouts to children is a one shot operation. It should have
+    // been processed at this point.
+    ASSERT(forward_bailout_pending_ == NULL);
+  }
 }
 
 
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
-  Breakable nested_statement(this, stmt);
+  NestedBlock nested_block(this, stmt);
   SetStatementPosition(stmt);
 
+  Scope* saved_scope = scope();
+  // Push a block context when entering a block with block scoped variables.
+  if (stmt->block_scope() != NULL) {
+    { Comment cmnt(masm_, "[ Extend block context");
+      scope_ = stmt->block_scope();
+      __ Push(scope_->GetSerializedScopeInfo());
+      PushFunctionArgumentForContextAllocation();
+      __ CallRuntime(Runtime::kPushBlockContext, 2);
+      StoreToFrameField(StandardFrameConstants::kContextOffset,
+                        context_register());
+    }
+    { Comment cmnt(masm_, "[ Declarations");
+      VisitDeclarations(scope_->declarations());
+    }
+  }
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   VisitStatements(stmt->statements());
-  __ bind(nested_statement.break_target());
+  scope_ = saved_scope;
+  __ bind(nested_block.break_label());
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+
+  // Pop block context if necessary.
+  if (stmt->block_scope() != NULL) {
+    LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+    // Update local stack frame context field.
+    StoreToFrameField(StandardFrameConstants::kContextOffset,
+                      context_register());
+  }
 }
 
 
@@ -945,7 +886,7 @@
     PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
   }
   __ bind(&done);
-  PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
 }
 
 
@@ -954,19 +895,26 @@
   SetStatementPosition(stmt);
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
+  int context_length = 0;
   // When continuing, we clobber the unpredictable value in the accumulator
   // with one that's safe for GC.  If we hit an exit from the try block of
   // try...finally on our way out, we will unconditionally preserve the
   // accumulator on the stack.
   ClearAccumulator();
   while (!current->IsContinueTarget(stmt->target())) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
+    current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
+  if (context_length > 0) {
+    while (context_length > 0) {
+      LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+      --context_length;
+    }
+    StoreToFrameField(StandardFrameConstants::kContextOffset,
+                      context_register());
+  }
 
-  Iteration* loop = current->AsIteration();
-  __ jmp(loop->continue_target());
+  __ jmp(current->AsIteration()->continue_label());
 }
 
 
@@ -975,19 +923,26 @@
   SetStatementPosition(stmt);
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
+  int context_length = 0;
   // When breaking, we clobber the unpredictable value in the accumulator
   // with one that's safe for GC.  If we hit an exit from the try block of
   // try...finally on our way out, we will unconditionally preserve the
   // accumulator on the stack.
   ClearAccumulator();
   while (!current->IsBreakTarget(stmt->target())) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
+    current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
+  if (context_length > 0) {
+    while (context_length > 0) {
+      LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+      --context_length;
+    }
+    StoreToFrameField(StandardFrameConstants::kContextOffset,
+                      context_register());
+  }
 
-  Breakable* target = current->AsBreakable();
-  __ jmp(target->break_target());
+  __ jmp(current->AsBreakable()->break_label());
 }
 
 
@@ -1000,9 +955,9 @@
   // Exit all nested statements.
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
+  int context_length = 0;
   while (current != NULL) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
+    current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
 
@@ -1010,27 +965,19 @@
 }
 
 
-void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  Comment cmnt(masm_, "[ WithEnterStatement");
+void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
+  Comment cmnt(masm_, "[ WithStatement");
   SetStatementPosition(stmt);
 
   VisitForStackValue(stmt->expression());
-  if (stmt->is_catch_block()) {
-    __ CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    __ CallRuntime(Runtime::kPushContext, 1);
-  }
-  // Both runtime calls return the new context in both the context and the
-  // result registers.
-
-  // Update local stack frame context field.
+  PushFunctionArgumentForContextAllocation();
+  __ CallRuntime(Runtime::kPushWithContext, 2);
+  decrement_stack_height();
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
 
-
-void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
-  Comment cmnt(masm_, "[ WithExitStatement");
-  SetStatementPosition(stmt);
+  { WithOrCatch body(this);
+    Visit(stmt->statement());
+  }
 
   // Pop context.
   LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -1052,12 +999,12 @@
 
   // Record the position of the do while condition and make sure it is
   // possible to break on the condition.
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   SetExpressionPosition(stmt->cond(), stmt->condition_position());
   VisitForControl(stmt->cond(),
                   &stack_check,
-                  loop_statement.break_target(),
+                  loop_statement.break_label(),
                   &stack_check);
 
   // Check stack before looping.
@@ -1067,7 +1014,7 @@
   __ jmp(&body);
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
 
@@ -1088,7 +1035,7 @@
 
   // Emit the statement position here as this is where the while
   // statement code starts.
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   SetStatementPosition(stmt);
 
   // Check stack before looping.
@@ -1097,11 +1044,11 @@
   __ bind(&test);
   VisitForControl(stmt->cond(),
                   &body,
-                  loop_statement.break_target(),
-                  loop_statement.break_target());
+                  loop_statement.break_label(),
+                  loop_statement.break_label());
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
 
@@ -1124,7 +1071,7 @@
   Visit(stmt->body());
 
   PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   SetStatementPosition(stmt);
   if (stmt->next() != NULL) {
     Visit(stmt->next());
@@ -1141,14 +1088,14 @@
   if (stmt->cond() != NULL) {
     VisitForControl(stmt->cond(),
                     &body,
-                    loop_statement.break_target(),
-                    loop_statement.break_target());
+                    loop_statement.break_label(),
+                    loop_statement.break_label());
   } else {
     __ jmp(&body);
   }
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
 
@@ -1166,31 +1113,42 @@
   // to introduce a new scope to bind the catch variable and to remove
   // that scope again afterwards.
 
-  Label try_handler_setup, catch_entry, done;
+  Label try_handler_setup, done;
   __ Call(&try_handler_setup);
   // Try handler code, exception in result register.
 
-  // Store exception in local .catch variable before executing catch block.
-  {
-    // The catch variable is *always* a variable proxy for a local variable.
-    Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
-    ASSERT_NOT_NULL(catch_var);
-    Slot* variable_slot = catch_var->AsSlot();
-    ASSERT_NOT_NULL(variable_slot);
-    ASSERT_EQ(Slot::LOCAL, variable_slot->type());
-    StoreToFrameField(SlotOffset(variable_slot), result_register());
+  // Extend the context before executing the catch block.
+  { Comment cmnt(masm_, "[ Extend catch context");
+    __ Push(stmt->variable()->name());
+    __ push(result_register());
+    PushFunctionArgumentForContextAllocation();
+    __ CallRuntime(Runtime::kPushCatchContext, 3);
+    StoreToFrameField(StandardFrameConstants::kContextOffset,
+                      context_register());
   }
 
-  Visit(stmt->catch_block());
+  Scope* saved_scope = scope();
+  scope_ = stmt->scope();
+  ASSERT(scope_->declarations()->is_empty());
+  { WithOrCatch body(this);
+    Visit(stmt->catch_block());
+  }
+  // Restore the context.
+  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+  scope_ = saved_scope;
   __ jmp(&done);
 
   // Try block code. Sets up the exception handler chain.
   __ bind(&try_handler_setup);
   {
-    TryCatch try_block(this, &catch_entry);
+    const int delta = StackHandlerConstants::kSize / kPointerSize;
+    TryCatch try_block(this);
     __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+    increment_stack_height(delta);
     Visit(stmt->try_block());
     __ PopTryHandler();
+    decrement_stack_height(delta);
   }
   __ bind(&done);
 }
@@ -1222,6 +1180,7 @@
   // cooked before GC.
   Label finally_entry;
   Label try_handler_setup;
+  const int original_stack_height = stack_height();
 
   // Setup the try-handler chain. Use a call to
   // Jump to try-handler setup and try-block code. Use call to put try-handler
@@ -1230,9 +1189,9 @@
   // Try handler code. Return address of call is pushed on handler stack.
   {
     // This code is only executed during stack-handler traversal when an
-    // exception is thrown. The execption is in the result register, which
+    // exception is thrown. The exception is in the result register, which
     // is retained by the finally block.
-    // Call the finally block and then rethrow the exception.
+    // Call the finally block and then rethrow the exception if it returns.
     __ Call(&finally_entry);
     __ push(result_register());
     __ CallRuntime(Runtime::kReThrow, 1);
@@ -1243,6 +1202,7 @@
     // Finally block implementation.
     Finally finally_block(this);
     EnterFinallyBlock();
+    set_stack_height(original_stack_height + Finally::kElementCount);
     Visit(stmt->finally_block());
     ExitFinallyBlock();  // Return to the calling code.
   }
@@ -1250,10 +1210,13 @@
   __ bind(&try_handler_setup);
   {
     // Setup try handler (stack pointer registers).
+    const int delta = StackHandlerConstants::kSize / kPointerSize;
     TryFinally try_block(this, &finally_entry);
     __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+    set_stack_height(original_stack_height + delta);
     Visit(stmt->try_block());
     __ PopTryHandler();
+    set_stack_height(original_stack_height);
   }
   // Execute the finally block on the way out.  Clobber the unpredictable
   // value in the accumulator with one that's safe for GC.  The finally
@@ -1283,6 +1246,7 @@
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
+  int start_stack_height = stack_height();
   if (context()->IsTest()) {
     const TestContext* for_test = TestContext::cast(context());
     VisitForControl(expr->then_expression(),
@@ -1290,16 +1254,17 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    context()->HandleExpression(expr->then_expression());
+    VisitInCurrentContext(expr->then_expression());
     __ jmp(&done);
   }
 
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
+  set_stack_height(start_stack_height);
   if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  context()->HandleExpression(expr->else_expression());
+  VisitInCurrentContext(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1334,40 +1299,45 @@
 }
 
 
-void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  VisitForStackValue(expr->key());
-  VisitForStackValue(expr->value());
-  // Create catch extension object.
-  __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  context()->Plug(result_register());
-}
-
-
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
+  // Throw has no effect on the stack height or the current expression context.
+  // Usually the expression context is null, because throw is a statement.
   VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
+  decrement_stack_height();
   // Never returns here.
 }
 
 
-int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
+    int* stack_depth,
+    int* context_length) {
   // The macros used here must preserve the result register.
-  __ Drop(stack_depth);
+  __ Drop(*stack_depth);
   __ PopTryHandler();
-  __ Call(finally_entry_);
-  return 0;
+  *stack_depth = 0;
+  return previous_;
 }
 
 
-int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
-  // The macros used here must preserve the result register.
-  __ Drop(stack_depth);
-  __ PopTryHandler();
-  return 0;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
+                                          Label* if_true,
+                                          Label* if_false,
+                                          Label* fall_through) {
+  Expression *expr;
+  Handle<String> check;
+  if (compare->IsLiteralCompareTypeof(&expr, &check)) {
+    EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+    return true;
+  }
+
+  if (compare->IsLiteralCompareUndefined(&expr)) {
+    EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+    return true;
+  }
+
+  return false;
 }
 
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index d6ed1b9..803c618 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "code-stubs.h"
 #include "codegen.h"
@@ -79,8 +80,10 @@
   explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
         info_(NULL),
+        scope_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
+        stack_height_(0),
         context_(NULL),
         bailout_entries_(0),
         stack_checks_(2),  // There's always at least one.
@@ -108,10 +111,8 @@
  private:
   class Breakable;
   class Iteration;
-  class TryCatch;
-  class TryFinally;
-  class Finally;
-  class ForIn;
+
+  class TestContext;
 
   class NestedStatement BASE_EMBEDDED {
    public:
@@ -128,132 +129,151 @@
 
     virtual Breakable* AsBreakable() { return NULL; }
     virtual Iteration* AsIteration() { return NULL; }
-    virtual TryCatch* AsTryCatch() { return NULL; }
-    virtual TryFinally* AsTryFinally() { return NULL; }
-    virtual Finally* AsFinally() { return NULL; }
-    virtual ForIn* AsForIn() { return NULL; }
 
     virtual bool IsContinueTarget(Statement* target) { return false; }
     virtual bool IsBreakTarget(Statement* target) { return false; }
 
-    // Generate code to leave the nested statement. This includes
-    // cleaning up any stack elements in use and restoring the
-    // stack to the expectations of the surrounding statements.
-    // Takes a number of stack elements currently on top of the
-    // nested statement's stack, and returns a number of stack
-    // elements left on top of the surrounding statement's stack.
-    // The generated code must preserve the result register (which
-    // contains the value in case of a return).
-    virtual int Exit(int stack_depth) {
-      // Default implementation for the case where there is
-      // nothing to clean up.
-      return stack_depth;
+    // Notify the statement that we are exiting it via break, continue, or
+    // return and give it a chance to generate cleanup code.  Return the
+    // next outer statement in the nesting stack.  We accumulate in
+    // *stack_depth the amount to drop the stack and in *context_length the
+    // number of context chain links to unwind as we traverse the nesting
+    // stack from an exit to its target.
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+      return previous_;
     }
-    NestedStatement* outer() { return previous_; }
-   protected:
+
+ protected:
     MacroAssembler* masm() { return codegen_->masm(); }
-   private:
+
     FullCodeGenerator* codegen_;
     NestedStatement* previous_;
     DISALLOW_COPY_AND_ASSIGN(NestedStatement);
   };
 
+  // A breakable statement such as a block.
   class Breakable : public NestedStatement {
    public:
-    Breakable(FullCodeGenerator* codegen,
-              BreakableStatement* break_target)
-        : NestedStatement(codegen),
-          target_(break_target) {}
-    virtual ~Breakable() {}
-    virtual Breakable* AsBreakable() { return this; }
-    virtual bool IsBreakTarget(Statement* statement) {
-      return target_ == statement;
+    Breakable(FullCodeGenerator* codegen, BreakableStatement* statement)
+        : NestedStatement(codegen), statement_(statement) {
     }
-    BreakableStatement* statement() { return target_; }
-    Label* break_target() { return &break_target_label_; }
+    virtual ~Breakable() {}
+
+    virtual Breakable* AsBreakable() { return this; }
+    virtual bool IsBreakTarget(Statement* target) {
+      return statement() == target;
+    }
+
+    BreakableStatement* statement() { return statement_; }
+    Label* break_label() { return &break_label_; }
+
    private:
-    BreakableStatement* target_;
-    Label break_target_label_;
-    DISALLOW_COPY_AND_ASSIGN(Breakable);
+    BreakableStatement* statement_;
+    Label break_label_;
   };
 
+  // An iteration statement such as a while, for, or do loop.
   class Iteration : public Breakable {
    public:
-    Iteration(FullCodeGenerator* codegen,
-              IterationStatement* iteration_statement)
-        : Breakable(codegen, iteration_statement) {}
-    virtual ~Iteration() {}
-    virtual Iteration* AsIteration() { return this; }
-    virtual bool IsContinueTarget(Statement* statement) {
-      return this->statement() == statement;
+    Iteration(FullCodeGenerator* codegen, IterationStatement* statement)
+        : Breakable(codegen, statement) {
     }
-    Label* continue_target() { return &continue_target_label_; }
+    virtual ~Iteration() {}
+
+    virtual Iteration* AsIteration() { return this; }
+    virtual bool IsContinueTarget(Statement* target) {
+      return statement() == target;
+    }
+
+    Label* continue_label() { return &continue_label_; }
+
    private:
-    Label continue_target_label_;
-    DISALLOW_COPY_AND_ASSIGN(Iteration);
+    Label continue_label_;
   };
 
-  // The environment inside the try block of a try/catch statement.
+  // A nested block statement.
+  class NestedBlock : public Breakable {
+   public:
+    NestedBlock(FullCodeGenerator* codegen, Block* block)
+        : Breakable(codegen, block) {
+    }
+    virtual ~NestedBlock() {}
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+      if (statement()->AsBlock()->block_scope() != NULL) {
+        ++(*context_length);
+      }
+      return previous_;
+    };
+  };
+
+  // The try block of a try/catch statement.
   class TryCatch : public NestedStatement {
    public:
-    explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
-        : NestedStatement(codegen), catch_entry_(catch_entry) { }
+    explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {
+    }
     virtual ~TryCatch() {}
-    virtual TryCatch* AsTryCatch() { return this; }
-    Label* catch_entry() { return catch_entry_; }
-    virtual int Exit(int stack_depth);
-   private:
-    Label* catch_entry_;
-    DISALLOW_COPY_AND_ASSIGN(TryCatch);
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length);
   };
 
-  // The environment inside the try block of a try/finally statement.
+  // The try block of a try/finally statement.
   class TryFinally : public NestedStatement {
    public:
-    explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
-        : NestedStatement(codegen), finally_entry_(finally_entry) { }
+    TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
+        : NestedStatement(codegen), finally_entry_(finally_entry) {
+    }
     virtual ~TryFinally() {}
-    virtual TryFinally* AsTryFinally() { return this; }
-    Label* finally_entry() { return finally_entry_; }
-    virtual int Exit(int stack_depth);
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length);
+
    private:
     Label* finally_entry_;
-    DISALLOW_COPY_AND_ASSIGN(TryFinally);
   };
 
-  // A FinallyEnvironment represents being inside a finally block.
-  // Abnormal termination of the finally block needs to clean up
-  // the block's parameters from the stack.
+  // The finally block of a try/finally statement.
   class Finally : public NestedStatement {
    public:
+    static const int kElementCount = 2;
+
     explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
     virtual ~Finally() {}
-    virtual Finally* AsFinally() { return this; }
-    virtual int Exit(int stack_depth) {
-      return stack_depth + kFinallyStackElementCount;
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+      *stack_depth += kElementCount;
+      return previous_;
     }
-   private:
-    // Number of extra stack slots occupied during a finally block.
-    static const int kFinallyStackElementCount = 2;
-    DISALLOW_COPY_AND_ASSIGN(Finally);
   };
 
-  // A ForInEnvironment represents being inside a for-in loop.
-  // Abnormal termination of the for-in block needs to clean up
-  // the block's temporary storage from the stack.
+  // The body of a for/in loop.
   class ForIn : public Iteration {
    public:
-    ForIn(FullCodeGenerator* codegen,
-          ForInStatement* statement)
-        : Iteration(codegen, statement) { }
-    virtual ~ForIn() {}
-    virtual ForIn* AsForIn() { return this; }
-    virtual int Exit(int stack_depth) {
-      return stack_depth + kForInStackElementCount;
+    static const int kElementCount = 5;
+
+    ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
+        : Iteration(codegen, statement) {
     }
-   private:
-    static const int kForInStackElementCount = 5;
-    DISALLOW_COPY_AND_ASSIGN(ForIn);
+    virtual ~ForIn() {}
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+      *stack_depth += kElementCount;
+      return previous_;
+    }
+  };
+
+
+  // The body of a with or catch.
+  class WithOrCatch : public NestedStatement {
+   public:
+    explicit WithOrCatch(FullCodeGenerator* codegen)
+        : NestedStatement(codegen) {
+    }
+    virtual ~WithOrCatch() {}
+
+    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+      ++(*context_length);
+      return previous_;
+    }
   };
 
   // The forward bailout stack keeps track of the expressions that can
@@ -284,10 +304,6 @@
   // with a GC-safe value.
   void ClearAccumulator();
 
-  // Compute the frame pointer relative offset for a given local or
-  // parameter slot.
-  int SlotOffset(Slot* slot);
-
   // Determine whether or not to inline the smi case for the given
   // operation.
   bool ShouldInlineSmiCase(Token::Value op);
@@ -295,22 +311,51 @@
   // Helper function to convert a pure value into a test context.  The value
   // is expected on the stack or the accumulator, depending on the platform.
   // See the platform-specific implementation for details.
-  void DoTest(Label* if_true, Label* if_false, Label* fall_through);
+  void DoTest(Expression* condition,
+              Label* if_true,
+              Label* if_false,
+              Label* fall_through);
+  void DoTest(const TestContext* context);
 
   // Helper function to split control flow and avoid a branch to the
   // fall-through label if it is set up.
+#ifdef V8_TARGET_ARCH_MIPS
+  void Split(Condition cc,
+             Register lhs,
+             const Operand&  rhs,
+             Label* if_true,
+             Label* if_false,
+             Label* fall_through);
+#else  // All non-mips arch.
   void Split(Condition cc,
              Label* if_true,
              Label* if_false,
              Label* fall_through);
+#endif  // V8_TARGET_ARCH_MIPS
 
-  void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
-  void Move(Register dst, Slot* source);
+  // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
+  // a register.  Emits a context chain walk if if necessary (so does
+  // SetVar) so avoid calling both on the same variable.
+  void GetVar(Register destination, Variable* var);
 
-  // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
-  // May emit code to traverse the context chain, destroying the scratch
-  // register.
-  MemOperand EmitSlotSearch(Slot* slot, Register scratch);
+  // Assign to a known (PARAMETER, LOCAL, or CONTEXT) variable.  If it's in
+  // the context, the write barrier will be emitted and source, scratch0,
+  // scratch1 will be clobbered.  Emits a context chain walk if if necessary
+  // (so does GetVar) so avoid calling both on the same variable.
+  void SetVar(Variable* var,
+              Register source,
+              Register scratch0,
+              Register scratch1);
+
+  // An operand used to read/write a stack-allocated (PARAMETER or LOCAL)
+  // variable.  Writing does not need the write barrier.
+  MemOperand StackOperand(Variable* var);
+
+  // An operand used to read/write a known (PARAMETER, LOCAL, or CONTEXT)
+  // variable.  May emit code to traverse the context chain, loading the
+  // found context into the scratch register.  Writing to this operand will
+  // need the write barrier if location is CONTEXT.
+  MemOperand VarOperand(Variable* var, Register scratch);
 
   // Forward the bailout responsibility for the given expression to
   // the next child visited (which must be in a test context).
@@ -318,48 +363,56 @@
 
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    HandleInNonTestContext(expr, NO_REGISTERS);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    HandleInNonTestContext(expr, TOS_REG);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    HandleInNonTestContext(expr, NO_REGISTERS);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForControl(Expression* expr,
                        Label* if_true,
                        Label* if_false,
                        Label* fall_through) {
-    TestContext context(this, if_true, if_false, fall_through);
-    VisitInTestContext(expr);
-    // Forwarding bailouts to children is a one shot operation. It
-    // should have been processed at this point.
-    ASSERT(forward_bailout_pending_ == NULL);
+    TestContext context(this, expr, if_true, if_false, fall_through);
+    VisitInCurrentContext(expr);
   }
 
-  void HandleInNonTestContext(Expression* expr, State state);
-  void VisitInTestContext(Expression* expr);
-
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
+  int DeclareGlobalsFlags();
 
   // Try to perform a comparison as a fast inlined literal compare if
   // the operands allow it.  Returns true if the compare operations
   // has been matched and all code generated; false otherwise.
-  bool TryLiteralCompare(Token::Value op,
-                         Expression* left,
-                         Expression* right,
+  bool TryLiteralCompare(CompareOperation* compare,
                          Label* if_true,
                          Label* if_false,
                          Label* fall_through);
 
+  // Platform-specific code for comparing the type of a value with
+  // a given literal string.
+  void EmitLiteralCompareTypeof(Expression* expr,
+                                Handle<String> check,
+                                Label* if_true,
+                                Label* if_false,
+                                Label* fall_through);
+
+  // Platform-specific code for strict equality comparison with
+  // the undefined value.
+  void EmitLiteralCompareUndefined(Expression* expr,
+                                   Label* if_true,
+                                   Label* if_false,
+                                   Label* fall_through);
+
   // Bailout support.
-  void PrepareForBailout(AstNode* node, State state);
+  void PrepareForBailout(Expression* node, State state);
   void PrepareForBailoutForId(int id, State state);
 
   // Record a call's return site offset, used to rebuild the frame if the
@@ -378,9 +431,10 @@
 
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
-  void EmitDeclaration(Variable* variable,
+  void EmitDeclaration(VariableProxy* proxy,
                        Variable::Mode mode,
-                       FunctionLiteral* function);
+                       FunctionLiteral* function,
+                       int* global_count);
 
   // Platform-specific code for checking the stack limit at the back edge of
   // a loop.
@@ -395,9 +449,9 @@
   void EmitReturnSequence();
 
   // Platform-specific code sequences for calls
-  void EmitCallWithStub(Call* expr);
+  void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
   void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
-  void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
+  void EmitKeyedCallWithIC(Call* expr, Expression* key);
 
   // Platform-specific code for inline runtime calls.
   InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
@@ -411,15 +465,15 @@
 #undef EMIT_INLINE_RUNTIME_CALL
 
   // Platform-specific code for loading variables.
-  void EmitLoadGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         Label* slow);
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot, Label* slow);
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Label* slow,
-                                       Label* done);
-  void EmitVariableLoad(Variable* expr);
+  void EmitLoadGlobalCheckExtensions(Variable* var,
+                                     TypeofState typeof_state,
+                                     Label* slow);
+  MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
+  void EmitDynamicLookupFastCase(Variable* var,
+                                 TypeofState typeof_state,
+                                 Label* slow,
+                                 Label* done);
+  void EmitVariableLoad(VariableProxy* proxy);
 
   enum ResolveEvalFlag {
     SKIP_CONTEXT_LOOKUP,
@@ -445,12 +499,13 @@
 
   // Apply the compound assignment operator. Expects the left operand on top
   // of the stack and the right one in the accumulator.
-  void EmitBinaryOp(Token::Value op,
+  void EmitBinaryOp(BinaryOperation* expr,
+                    Token::Value op,
                     OverwriteMode mode);
 
   // Helper functions for generating inlined smi code for certain
   // binary operations.
-  void EmitInlineSmiBinaryOp(Expression* expr,
+  void EmitInlineSmiBinaryOp(BinaryOperation* expr,
                              Token::Value op,
                              OverwriteMode mode,
                              Expression* left,
@@ -493,6 +548,35 @@
     loop_depth_--;
   }
 
+#if defined(V8_TARGET_ARCH_IA32)
+  int stack_height() { return stack_height_; }
+  void set_stack_height(int depth) { stack_height_ = depth; }
+  void increment_stack_height() { stack_height_++; }
+  void increment_stack_height(int delta) { stack_height_ += delta; }
+  void decrement_stack_height() {
+    if (FLAG_verify_stack_height) {
+      ASSERT(stack_height_ > 0);
+    }
+    stack_height_--;
+  }
+  void decrement_stack_height(int delta) {
+    stack_height_-= delta;
+    if (FLAG_verify_stack_height) {
+      ASSERT(stack_height_ >= 0);
+    }
+  }
+  // Call this function only if FLAG_verify_stack_height is true.
+  void verify_stack_height();  // Generates a runtime check of esp - ebp.
+#else
+  int stack_height() { return 0; }
+  void set_stack_height(int depth) {}
+  void increment_stack_height() {}
+  void increment_stack_height(int delta) {}
+  void decrement_stack_height() {}
+  void decrement_stack_height(int delta) {}
+  void verify_stack_height() {}
+#endif  // V8_TARGET_ARCH_IA32
+
   MacroAssembler* masm() { return masm_; }
 
   class ExpressionContext;
@@ -501,24 +585,17 @@
 
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
+  bool is_native() { return info_->is_native(); }
   bool is_strict_mode() { return function()->strict_mode(); }
   StrictModeFlag strict_mode_flag() {
     return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   FunctionLiteral* function() { return info_->function(); }
-  Scope* scope() { return info_->scope(); }
+  Scope* scope() { return scope_; }
 
   static Register result_register();
   static Register context_register();
 
-  // Helper for calling an IC stub.
-  void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
-
-  // Calling an IC stub with a patch site. Passing NULL for patch_site
-  // or non NULL patch_site which is not activated indicates no inlined smi code
-  // and emits a nop after the IC call.
-  void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
-
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
   void StoreToFrameField(int frame_offset, Register value);
@@ -527,12 +604,21 @@
   // in v8::internal::Context.
   void LoadContextField(Register dst, int context_index);
 
+  // Push the function argument for the runtime functions PushWithContext
+  // and PushCatchContext.
+  void PushFunctionArgumentForContextAllocation();
+
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
-  // Handles the shortcutted logical binary operations in VisitBinaryOperation.
-  void EmitLogicalOperation(BinaryOperation* expr);
+
+  void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
+
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
+  void VisitInCurrentContext(Expression* expr);
 
   void VisitForTypeofValue(Expression* expr);
 
@@ -551,6 +637,10 @@
 
     virtual ~ExpressionContext() {
       codegen_->set_new_context(old_);
+      if (FLAG_verify_stack_height) {
+        ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
+        codegen()->verify_stack_height();
+      }
     }
 
     Isolate* isolate() const { return codegen_->isolate(); }
@@ -559,11 +649,11 @@
     // this expression context.
     virtual void Plug(bool flag) const = 0;
 
-    // Emit code to convert a pure value (in a register, slot, as a literal,
-    // or on top of the stack) into the result expected according to this
-    // expression context.
+    // Emit code to convert a pure value (in a register, known variable
+    // location, as a literal, or on top of the stack) into the result
+    // expected according to this expression context.
     virtual void Plug(Register reg) const = 0;
-    virtual void Plug(Slot* slot) const = 0;
+    virtual void Plug(Variable* var) const = 0;
     virtual void Plug(Handle<Object> lit) const = 0;
     virtual void Plug(Heap::RootListIndex index) const = 0;
     virtual void PlugTOS() const = 0;
@@ -580,11 +670,6 @@
     // context.
     virtual void DropAndPlug(int count, Register reg) const = 0;
 
-    // For shortcutting operations || and &&.
-    virtual void EmitLogicalLeft(BinaryOperation* expr,
-                                 Label* eval_right,
-                                 Label* done) const = 0;
-
     // Set up branch labels for a test expression.  The three Label** parameters
     // are output parameters.
     virtual void PrepareTest(Label* materialize_true,
@@ -593,12 +678,14 @@
                              Label** if_false,
                              Label** fall_through) const = 0;
 
-    virtual void HandleExpression(Expression* expr) const = 0;
-
     // Returns true if we are evaluating only for side effects (ie if the result
     // will be discarded).
     virtual bool IsEffect() const { return false; }
 
+    // Returns true if we are evaluating for the value (in accu/on stack).
+    virtual bool IsAccumulatorValue() const { return false; }
+    virtual bool IsStackValue() const { return false; }
+
     // Returns true if we are branching on the value rather than materializing
     // it.  Only used for asserts.
     virtual bool IsTest() const { return false; }
@@ -607,6 +694,7 @@
     FullCodeGenerator* codegen() const { return codegen_; }
     MacroAssembler* masm() const { return masm_; }
     MacroAssembler* masm_;
+    int expected_stack_height_;  // The expected stack height esp - ebp on exit.
 
    private:
     const ExpressionContext* old_;
@@ -616,67 +704,70 @@
   class AccumulatorValueContext : public ExpressionContext {
    public:
     explicit AccumulatorValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height();
+    }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
     virtual void Plug(Label* materialize_true, Label* materialize_false) const;
-    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Variable* var) const;
     virtual void Plug(Handle<Object> lit) const;
     virtual void Plug(Heap::RootListIndex) const;
     virtual void PlugTOS() const;
     virtual void DropAndPlug(int count, Register reg) const;
-    virtual void EmitLogicalLeft(BinaryOperation* expr,
-                                 Label* eval_right,
-                                 Label* done) const;
     virtual void PrepareTest(Label* materialize_true,
                              Label* materialize_false,
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
-    virtual void HandleExpression(Expression* expr) const;
+    virtual bool IsAccumulatorValue() const { return true; }
   };
 
   class StackValueContext : public ExpressionContext {
    public:
     explicit StackValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height() + 1;
+    }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
     virtual void Plug(Label* materialize_true, Label* materialize_false) const;
-    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Variable* var) const;
     virtual void Plug(Handle<Object> lit) const;
     virtual void Plug(Heap::RootListIndex) const;
     virtual void PlugTOS() const;
     virtual void DropAndPlug(int count, Register reg) const;
-    virtual void EmitLogicalLeft(BinaryOperation* expr,
-                                 Label* eval_right,
-                                 Label* done) const;
     virtual void PrepareTest(Label* materialize_true,
                              Label* materialize_false,
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
-    virtual void HandleExpression(Expression* expr) const;
+    virtual bool IsStackValue() const { return true; }
   };
 
   class TestContext : public ExpressionContext {
    public:
-    explicit TestContext(FullCodeGenerator* codegen,
-                         Label* true_label,
-                         Label* false_label,
-                         Label* fall_through)
+    TestContext(FullCodeGenerator* codegen,
+                Expression* condition,
+                Label* true_label,
+                Label* false_label,
+                Label* fall_through)
         : ExpressionContext(codegen),
+          condition_(condition),
           true_label_(true_label),
           false_label_(false_label),
-          fall_through_(fall_through) { }
+          fall_through_(fall_through) {
+      expected_stack_height_ = codegen->stack_height();
+    }
 
     static const TestContext* cast(const ExpressionContext* context) {
       ASSERT(context->IsTest());
       return reinterpret_cast<const TestContext*>(context);
     }
 
+    Expression* condition() const { return condition_; }
     Label* true_label() const { return true_label_; }
     Label* false_label() const { return false_label_; }
     Label* fall_through() const { return fall_through_; }
@@ -684,23 +775,20 @@
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
     virtual void Plug(Label* materialize_true, Label* materialize_false) const;
-    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Variable* var) const;
     virtual void Plug(Handle<Object> lit) const;
     virtual void Plug(Heap::RootListIndex) const;
     virtual void PlugTOS() const;
     virtual void DropAndPlug(int count, Register reg) const;
-    virtual void EmitLogicalLeft(BinaryOperation* expr,
-                                 Label* eval_right,
-                                 Label* done) const;
     virtual void PrepareTest(Label* materialize_true,
                              Label* materialize_false,
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
-    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsTest() const { return true; }
 
    private:
+    Expression* condition_;
     Label* true_label_;
     Label* false_label_;
     Label* fall_through_;
@@ -709,33 +797,34 @@
   class EffectContext : public ExpressionContext {
    public:
     explicit EffectContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height();
+    }
+
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
     virtual void Plug(Label* materialize_true, Label* materialize_false) const;
-    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Variable* var) const;
     virtual void Plug(Handle<Object> lit) const;
     virtual void Plug(Heap::RootListIndex) const;
     virtual void PlugTOS() const;
     virtual void DropAndPlug(int count, Register reg) const;
-    virtual void EmitLogicalLeft(BinaryOperation* expr,
-                                 Label* eval_right,
-                                 Label* done) const;
     virtual void PrepareTest(Label* materialize_true,
                              Label* materialize_false,
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
-    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsEffect() const { return true; }
   };
 
   MacroAssembler* masm_;
   CompilationInfo* info_;
+  Scope* scope_;
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
+  int stack_height_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index c094251..239358d 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,52 +29,67 @@
 
 #include "ast.h"
 #include "func-name-inferrer.h"
+#include "list-inl.h"
 
 namespace v8 {
 namespace internal {
 
+FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
+    : isolate_(isolate),
+      entries_stack_(10),
+      names_stack_(5),
+      funcs_to_infer_(4) {
+}
+
 
 void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
   // Enclosing name is a name of a constructor function. To check
   // that it is really a constructor, we check that it is not empty
   // and starts with a capital letter.
   if (name->length() > 0 && Runtime::IsUpperCaseChar(
-      Isolate::Current()->runtime_state(), name->Get(0))) {
-    names_stack_.Add(name);
+          isolate()->runtime_state(), name->Get(0))) {
+    names_stack_.Add(Name(name, kEnclosingConstructorName));
   }
 }
 
 
 void FuncNameInferrer::PushLiteralName(Handle<String> name) {
-  if (IsOpen() && !HEAP->prototype_symbol()->Equals(*name)) {
-    names_stack_.Add(name);
+  if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
+    names_stack_.Add(Name(name, kLiteralName));
   }
 }
 
 
 void FuncNameInferrer::PushVariableName(Handle<String> name) {
-  if (IsOpen() && !HEAP->result_symbol()->Equals(*name)) {
-    names_stack_.Add(name);
+  if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
+    names_stack_.Add(Name(name, kVariableName));
   }
 }
 
 
 Handle<String> FuncNameInferrer::MakeNameFromStack() {
-  if (names_stack_.is_empty()) {
-    return FACTORY->empty_string();
-  } else {
-    return MakeNameFromStackHelper(1, names_stack_.at(0));
-  }
+  return MakeNameFromStackHelper(0, isolate()->factory()->empty_string());
 }
 
 
 Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
                                                          Handle<String> prev) {
-  if (pos >= names_stack_.length()) {
-    return prev;
+  if (pos >= names_stack_.length()) return prev;
+  if (pos < names_stack_.length() - 1 &&
+      names_stack_.at(pos).type == kVariableName &&
+      names_stack_.at(pos + 1).type == kVariableName) {
+    // Skip consecutive variable declarations.
+    return MakeNameFromStackHelper(pos + 1, prev);
   } else {
-    Handle<String> curr = FACTORY->NewConsString(dot_, names_stack_.at(pos));
-    return MakeNameFromStackHelper(pos + 1, FACTORY->NewConsString(prev, curr));
+    if (prev->length() > 0) {
+      Factory* factory = isolate()->factory();
+      Handle<String> curr = factory->NewConsString(
+          factory->dot_symbol(), names_stack_.at(pos).name);
+      return MakeNameFromStackHelper(pos + 1,
+                                     factory->NewConsString(prev, curr));
+    } else {
+      return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
+    }
   }
 }
 
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index 5aa2b35..1a57268 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -31,6 +31,8 @@
 namespace v8 {
 namespace internal {
 
+class Isolate;
+
 // FuncNameInferrer is a stateful class that is used to perform name
 // inference for anonymous functions during static analysis of source code.
 // Inference is performed in cases when an anonymous function is assigned
@@ -43,12 +45,7 @@
 // a name.
 class FuncNameInferrer : public ZoneObject {
  public:
-  FuncNameInferrer()
-      : entries_stack_(10),
-        names_stack_(5),
-        funcs_to_infer_(4),
-        dot_(FACTORY->NewStringFromAscii(CStrVector("."))) {
-  }
+  explicit FuncNameInferrer(Isolate* isolate);
 
   // Returns whether we have entered name collection state.
   bool IsOpen() const { return !entries_stack_.is_empty(); }
@@ -73,6 +70,12 @@
     }
   }
 
+  void RemoveLastFunction() {
+    if (IsOpen() && !funcs_to_infer_.is_empty()) {
+      funcs_to_infer_.RemoveLast();
+    }
+  }
+
   // Infers a function name and leaves names collection state.
   void Infer() {
     ASSERT(IsOpen());
@@ -81,13 +84,26 @@
     }
   }
 
-  // Infers a function name and leaves names collection state.
+  // Leaves names collection state.
   void Leave() {
     ASSERT(IsOpen());
     names_stack_.Rewind(entries_stack_.RemoveLast());
   }
 
  private:
+  enum NameType {
+    kEnclosingConstructorName,
+    kLiteralName,
+    kVariableName
+  };
+  struct Name {
+    Name(Handle<String> name, NameType type) : name(name), type(type) { }
+    Handle<String> name;
+    NameType type;
+  };
+
+  Isolate* isolate() { return isolate_; }
+
   // Constructs a full name in dotted notation from gathered names.
   Handle<String> MakeNameFromStack();
 
@@ -97,10 +113,10 @@
   // Performs name inferring for added functions.
   void InferFunctionsNames();
 
+  Isolate* isolate_;
   ZoneList<int> entries_stack_;
-  ZoneList<Handle<String> > names_stack_;
+  ZoneList<Name> names_stack_;
   ZoneList<FunctionLiteral*> funcs_to_infer_;
-  Handle<String> dot_;
 
   DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
 };
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index bf8ac19..68cb053 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -34,16 +34,29 @@
 #include "global-handles.h"
 #include "messages.h"
 #include "natives.h"
+#include "scopeinfo.h"
 
 namespace v8 {
 namespace internal {
 
+#ifdef __APPLE__
+#define __MACH_O
+class MachO;
+class MachOSection;
+typedef MachO DebugObject;
+typedef MachOSection DebugSection;
+#else
+#define __ELF
 class ELF;
+class ELFSection;
+typedef ELF DebugObject;
+typedef ELFSection DebugSection;
+#endif
 
 class Writer BASE_EMBEDDED {
  public:
-  explicit Writer(ELF* elf)
-      : elf_(elf),
+  explicit Writer(DebugObject* debug_object)
+      : debug_object_(debug_object),
         position_(0),
         capacity_(1024),
         buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
@@ -112,7 +125,7 @@
     }
   }
 
-  ELF* elf() { return elf_; }
+  DebugObject* debug_object() { return debug_object_; }
 
   byte* buffer() { return buffer_; }
 
@@ -165,7 +178,7 @@
     return reinterpret_cast<T*>(&buffer_[offset]);
   }
 
-  ELF* elf_;
+  DebugObject* debug_object_;
   uintptr_t position_;
   uintptr_t capacity_;
   byte* buffer_;
@@ -173,21 +186,120 @@
 
 class StringTable;
 
-class ELFSection : public ZoneObject {
+template<typename THeader>
+class DebugSectionBase : public ZoneObject {
  public:
-  struct Header {
-    uint32_t name;
-    uint32_t type;
-    uintptr_t flags;
-    uintptr_t address;
-    uintptr_t offset;
-    uintptr_t size;
-    uint32_t link;
-    uint32_t info;
-    uintptr_t alignment;
-    uintptr_t entry_size;
+  virtual ~DebugSectionBase() { }
+
+  virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
+    uintptr_t start = writer->position();
+    if (WriteBody(writer)) {
+      uintptr_t end = writer->position();
+      header->offset = start;
+#if defined(__MACH_O)
+      header->addr = 0;
+#endif
+      header->size = end - start;
+    }
+  }
+
+  virtual bool WriteBody(Writer* writer) {
+    return false;
+  }
+
+  typedef THeader Header;
+};
+
+
+struct MachOSectionHeader {
+  char sectname[16];
+  char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+  uint32_t addr;
+  uint32_t size;
+#else
+  uint64_t addr;
+  uint64_t size;
+#endif
+  uint32_t offset;
+  uint32_t align;
+  uint32_t reloff;
+  uint32_t nreloc;
+  uint32_t flags;
+  uint32_t reserved1;
+  uint32_t reserved2;
+};
+
+
+class MachOSection : public DebugSectionBase<MachOSectionHeader> {
+ public:
+  enum Type {
+    S_REGULAR = 0x0u,
+    S_ATTR_COALESCED = 0xbu,
+    S_ATTR_SOME_INSTRUCTIONS = 0x400u,
+    S_ATTR_DEBUG = 0x02000000u,
+    S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
   };
 
+  MachOSection(const char* name,
+               const char* segment,
+               uintptr_t align,
+               uint32_t flags)
+    : name_(name),
+      segment_(segment),
+      align_(align),
+      flags_(flags) {
+    ASSERT(IsPowerOf2(align));
+    if (align_ != 0) {
+      align_ = WhichPowerOf2(align_);
+    }
+  }
+
+  virtual ~MachOSection() { }
+
+  virtual void PopulateHeader(Writer::Slot<Header> header) {
+    header->addr = 0;
+    header->size = 0;
+    header->offset = 0;
+    header->align = align_;
+    header->reloff = 0;
+    header->nreloc = 0;
+    header->flags = flags_;
+    header->reserved1 = 0;
+    header->reserved2 = 0;
+    memset(header->sectname, 0, sizeof(header->sectname));
+    memset(header->segname, 0, sizeof(header->segname));
+    ASSERT(strlen(name_) < sizeof(header->sectname));
+    ASSERT(strlen(segment_) < sizeof(header->segname));
+    strncpy(header->sectname, name_, sizeof(header->sectname));
+    strncpy(header->segname, segment_, sizeof(header->segname));
+  }
+
+ private:
+  const char* name_;
+  const char* segment_;
+  uintptr_t align_;
+  uint32_t flags_;
+};
+
+
+struct ELFSectionHeader {
+  uint32_t name;
+  uint32_t type;
+  uintptr_t flags;
+  uintptr_t address;
+  uintptr_t offset;
+  uintptr_t size;
+  uint32_t link;
+  uint32_t info;
+  uintptr_t alignment;
+  uintptr_t entry_size;
+};
+
+
+#if defined(__ELF)
+class ELFSection : public DebugSectionBase<ELFSectionHeader> {
+ public:
   enum Type {
     TYPE_NULL = 0,
     TYPE_PROGBITS = 1,
@@ -252,15 +364,45 @@
     header->entry_size = 0;
   }
 
-
  private:
   const char* name_;
   Type type_;
   uintptr_t align_;
   uint16_t index_;
 };
+#endif  // defined(__ELF)
 
 
+#if defined(__MACH_O)
+class MachOTextSection : public MachOSection {
+ public:
+  MachOTextSection(uintptr_t align,
+                   uintptr_t addr,
+                   uintptr_t size)
+      : MachOSection("__text",
+                     "__TEXT",
+                     align,
+                     MachOSection::S_REGULAR |
+                         MachOSection::S_ATTR_SOME_INSTRUCTIONS |
+                         MachOSection::S_ATTR_PURE_INSTRUCTIONS),
+        addr_(addr),
+        size_(size) { }
+
+ protected:
+  virtual void PopulateHeader(Writer::Slot<Header> header) {
+    MachOSection::PopulateHeader(header);
+    header->addr = addr_;
+    header->size = size_;
+  }
+
+ private:
+  uintptr_t addr_;
+  uintptr_t size_;
+};
+#endif  // defined(__MACH_O)
+
+
+#if defined(__ELF)
 class FullHeaderELFSection : public ELFSection {
  public:
   FullHeaderELFSection(const char* name,
@@ -349,8 +491,139 @@
   header->alignment = align_;
   PopulateHeader(header);
 }
+#endif  // defined(__ELF)
 
 
+#if defined(__MACH_O)
+class MachO BASE_EMBEDDED {
+ public:
+  MachO() : sections_(6) { }
+
+  uint32_t AddSection(MachOSection* section) {
+    sections_.Add(section);
+    return sections_.length() - 1;
+  }
+
+  void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
+    Writer::Slot<MachOHeader> header = WriteHeader(w);
+    uintptr_t load_command_start = w->position();
+    Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
+                                                                code_start,
+                                                                code_size);
+    WriteSections(w, cmd, header, load_command_start);
+  }
+
+ private:
+  struct MachOHeader {
+    uint32_t magic;
+    uint32_t cputype;
+    uint32_t cpusubtype;
+    uint32_t filetype;
+    uint32_t ncmds;
+    uint32_t sizeofcmds;
+    uint32_t flags;
+#if defined(V8_TARGET_ARCH_X64)
+    uint32_t reserved;
+#endif
+  };
+
+  struct MachOSegmentCommand {
+    uint32_t cmd;
+    uint32_t cmdsize;
+    char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+    uint32_t vmaddr;
+    uint32_t vmsize;
+    uint32_t fileoff;
+    uint32_t filesize;
+#else
+    uint64_t vmaddr;
+    uint64_t vmsize;
+    uint64_t fileoff;
+    uint64_t filesize;
+#endif
+    uint32_t maxprot;
+    uint32_t initprot;
+    uint32_t nsects;
+    uint32_t flags;
+  };
+
+  enum MachOLoadCommandCmd {
+    LC_SEGMENT_32 = 0x00000001u,
+    LC_SEGMENT_64 = 0x00000019u
+  };
+
+
+  Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
+    ASSERT(w->position() == 0);
+    Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
+#if defined(V8_TARGET_ARCH_IA32)
+    header->magic = 0xFEEDFACEu;
+    header->cputype = 7;  // i386
+    header->cpusubtype = 3;  // CPU_SUBTYPE_I386_ALL
+#elif defined(V8_TARGET_ARCH_X64)
+    header->magic = 0xFEEDFACFu;
+    header->cputype = 7 | 0x01000000;  // i386 | 64-bit ABI
+    header->cpusubtype = 3;  // CPU_SUBTYPE_I386_ALL
+    header->reserved = 0;
+#else
+#error Unsupported target architecture.
+#endif
+    header->filetype = 0x1;  // MH_OBJECT
+    header->ncmds = 1;
+    header->sizeofcmds = 0;
+    header->flags = 0;
+    return header;
+  }
+
+
+  Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
+                                                        uintptr_t code_start,
+                                                        uintptr_t code_size) {
+    Writer::Slot<MachOSegmentCommand> cmd =
+        w->CreateSlotHere<MachOSegmentCommand>();
+#if defined(V8_TARGET_ARCH_IA32)
+    cmd->cmd = LC_SEGMENT_32;
+#else
+    cmd->cmd = LC_SEGMENT_64;
+#endif
+    cmd->vmaddr = code_start;
+    cmd->vmsize = code_size;
+    cmd->fileoff = 0;
+    cmd->filesize = 0;
+    cmd->maxprot = 7;
+    cmd->initprot = 7;
+    cmd->flags = 0;
+    cmd->nsects = sections_.length();
+    memset(cmd->segname, 0, 16);
+    cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
+        cmd->nsects;
+    return cmd;
+  }
+
+
+  void WriteSections(Writer* w,
+                     Writer::Slot<MachOSegmentCommand> cmd,
+                     Writer::Slot<MachOHeader> header,
+                     uintptr_t load_command_start) {
+    Writer::Slot<MachOSection::Header> headers =
+        w->CreateSlotsHere<MachOSection::Header>(sections_.length());
+    cmd->fileoff = w->position();
+    header->sizeofcmds = w->position() - load_command_start;
+    for (int section = 0; section < sections_.length(); ++section) {
+      sections_[section]->PopulateHeader(headers.at(section));
+      sections_[section]->WriteBody(headers.at(section), w);
+    }
+    cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
+  }
+
+
+  ZoneList<MachOSection*> sections_;
+};
+#endif  // defined(__MACH_O)
+
+
+#if defined(__ELF)
 class ELF BASE_EMBEDDED {
  public:
   ELF() : sections_(6) {
@@ -596,7 +869,7 @@
 
     // String table for this symbol table should follow it in the section table.
     StringTable* strtab =
-        static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
+        static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
     strtab->AttachWriter(w);
     symbols.at(0).set(ELFSymbol::SerializedLayout(0,
                                                   0,
@@ -640,11 +913,11 @@
   ZoneList<ELFSymbol> locals_;
   ZoneList<ELFSymbol> globals_;
 };
+#endif  // defined(__ELF)
 
 
 class CodeDescription BASE_EMBEDDED {
  public:
-
 #ifdef V8_TARGET_ARCH_X64
   enum StackState {
     POST_RBP_PUSH,
@@ -658,12 +931,14 @@
                   Code* code,
                   Handle<Script> script,
                   GDBJITLineInfo* lineinfo,
-                  GDBJITInterface::CodeTag tag)
+                  GDBJITInterface::CodeTag tag,
+                  CompilationInfo* info)
       : name_(name),
         code_(code),
         script_(script),
         lineinfo_(lineinfo),
-        tag_(tag) {
+        tag_(tag),
+        info_(info) {
   }
 
   const char* name() const {
@@ -678,6 +953,14 @@
     return tag_;
   }
 
+  CompilationInfo* info() const {
+    return info_;
+  }
+
+  bool IsInfoAvailable() const {
+    return info_ != NULL;
+  }
+
   uintptr_t CodeStart() const {
     return reinterpret_cast<uintptr_t>(code_->instruction_start());
   }
@@ -710,7 +993,7 @@
   }
 #endif
 
-  SmartPointer<char> GetFilename() {
+  SmartArrayPointer<char> GetFilename() {
     return String::cast(script_->name())->ToCString();
   }
 
@@ -725,12 +1008,13 @@
   Handle<Script> script_;
   GDBJITLineInfo* lineinfo_;
   GDBJITInterface::CodeTag tag_;
+  CompilationInfo* info_;
 #ifdef V8_TARGET_ARCH_X64
   uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
 #endif
 };
 
-
+#if defined(__ELF)
 static void CreateSymbolsTable(CodeDescription* desc,
                                ELF* elf,
                                int text_section_index) {
@@ -755,14 +1039,42 @@
                         ELFSymbol::TYPE_FUNC,
                         text_section_index));
 }
+#endif  // defined(__ELF)
 
 
-class DebugInfoSection : public ELFSection {
+class DebugInfoSection : public DebugSection {
  public:
   explicit DebugInfoSection(CodeDescription* desc)
-      : ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
+#if defined(__ELF)
+      : ELFSection(".debug_info", TYPE_PROGBITS, 1),
+#else
+      : MachOSection("__debug_info",
+                     "__DWARF",
+                     1,
+                     MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+        desc_(desc) { }
+
+  // DWARF2 standard
+  enum DWARF2LocationOp {
+    DW_OP_reg0 = 0x50,
+    DW_OP_reg1 = 0x51,
+    DW_OP_reg2 = 0x52,
+    DW_OP_reg3 = 0x53,
+    DW_OP_reg4 = 0x54,
+    DW_OP_reg5 = 0x55,
+    DW_OP_reg6 = 0x56,
+    DW_OP_reg7 = 0x57,
+    DW_OP_fbreg = 0x91  // 1 param: SLEB128 offset
+  };
+
+  enum DWARF2Encoding {
+    DW_ATE_ADDRESS = 0x1,
+    DW_ATE_SIGNED = 0x5
+  };
 
   bool WriteBody(Writer* w) {
+    uintptr_t cu_start = w->position();
     Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
     uintptr_t start = w->position();
     w->Write<uint16_t>(2);  // DWARF version.
@@ -774,6 +1086,123 @@
     w->Write<intptr_t>(desc_->CodeStart());
     w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
     w->Write<uint32_t>(0);
+
+    uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
+    w->WriteULEB128(3);
+    w->Write<uint8_t>(kPointerSize);
+    w->WriteString("v8value");
+
+    if (desc_->IsInfoAvailable()) {
+      CompilationInfo* info = desc_->info();
+      ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+      w->WriteULEB128(2);
+      w->WriteString(desc_->name());
+      w->Write<intptr_t>(desc_->CodeStart());
+      w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
+      Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
+      uintptr_t fb_block_start = w->position();
+#if defined(V8_TARGET_ARCH_IA32)
+      w->Write<uint8_t>(DW_OP_reg5);  // The frame pointer's here on ia32
+#elif defined(V8_TARGET_ARCH_X64)
+      w->Write<uint8_t>(DW_OP_reg6);  // and here on x64.
+#else
+#error Unsupported target architecture.
+#endif
+      fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
+
+      int params = scope_info.number_of_parameters();
+      int slots = scope_info.number_of_stack_slots();
+      int context_slots = scope_info.number_of_context_slots();
+      // The real slot ID is internal_slots + context_slot_id.
+      int internal_slots = Context::MIN_CONTEXT_SLOTS;
+      int locals = scope_info.NumberOfLocals();
+      int current_abbreviation = 4;
+
+      for (int param = 0; param < params; ++param) {
+        w->WriteULEB128(current_abbreviation++);
+        w->WriteString(
+            *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+        w->Write<uint32_t>(ty_offset);
+        Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+        uintptr_t block_start = w->position();
+        w->Write<uint8_t>(DW_OP_fbreg);
+        w->WriteSLEB128(
+          JavaScriptFrameConstants::kLastParameterOffset +
+              kPointerSize * (params - param - 1));
+        block_size.set(static_cast<uint32_t>(w->position() - block_start));
+      }
+
+      EmbeddedVector<char, 256> buffer;
+      StringBuilder builder(buffer.start(), buffer.length());
+
+      for (int slot = 0; slot < slots; ++slot) {
+        w->WriteULEB128(current_abbreviation++);
+        builder.Reset();
+        builder.AddFormatted("slot%d", slot);
+        w->WriteString(builder.Finalize());
+      }
+
+      // See contexts.h for more information.
+      ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
+      ASSERT(Context::CLOSURE_INDEX == 0);
+      ASSERT(Context::PREVIOUS_INDEX == 1);
+      ASSERT(Context::EXTENSION_INDEX == 2);
+      ASSERT(Context::GLOBAL_INDEX == 3);
+      w->WriteULEB128(current_abbreviation++);
+      w->WriteString(".closure");
+      w->WriteULEB128(current_abbreviation++);
+      w->WriteString(".previous");
+      w->WriteULEB128(current_abbreviation++);
+      w->WriteString(".extension");
+      w->WriteULEB128(current_abbreviation++);
+      w->WriteString(".global");
+
+      for (int context_slot = 0;
+           context_slot < context_slots;
+           ++context_slot) {
+        w->WriteULEB128(current_abbreviation++);
+        builder.Reset();
+        builder.AddFormatted("context_slot%d", context_slot + internal_slots);
+        w->WriteString(builder.Finalize());
+      }
+
+      for (int local = 0; local < locals; ++local) {
+        w->WriteULEB128(current_abbreviation++);
+        w->WriteString(
+            *scope_info.LocalName(local)->ToCString(DISALLOW_NULLS));
+        w->Write<uint32_t>(ty_offset);
+        Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+        uintptr_t block_start = w->position();
+        w->Write<uint8_t>(DW_OP_fbreg);
+        w->WriteSLEB128(
+          JavaScriptFrameConstants::kLocal0Offset -
+              kPointerSize * local);
+        block_size.set(static_cast<uint32_t>(w->position() - block_start));
+      }
+
+      {
+        w->WriteULEB128(current_abbreviation++);
+        w->WriteString("__function");
+        w->Write<uint32_t>(ty_offset);
+        Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+        uintptr_t block_start = w->position();
+        w->Write<uint8_t>(DW_OP_fbreg);
+        w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset);
+        block_size.set(static_cast<uint32_t>(w->position() - block_start));
+      }
+
+      {
+        w->WriteULEB128(current_abbreviation++);
+        w->WriteString("__context");
+        w->Write<uint32_t>(ty_offset);
+        Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+        uintptr_t block_start = w->position();
+        w->Write<uint8_t>(DW_OP_fbreg);
+        w->WriteSLEB128(StandardFrameConstants::kContextOffset);
+        block_size.set(static_cast<uint32_t>(w->position() - block_start));
+      }
+    }
+
     size.set(static_cast<uint32_t>(w->position() - start));
     return true;
   }
@@ -783,13 +1212,28 @@
 };
 
 
-class DebugAbbrevSection : public ELFSection {
+class DebugAbbrevSection : public DebugSection {
  public:
-  DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
+  explicit DebugAbbrevSection(CodeDescription* desc)
+#ifdef __ELF
+      : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
+#else
+      : MachOSection("__debug_abbrev",
+                     "__DWARF",
+                     1,
+                     MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+        desc_(desc) { }
 
   // DWARF2 standard, figure 14.
   enum DWARF2Tags {
-    DW_TAG_COMPILE_UNIT = 0x11
+    DW_TAG_FORMAL_PARAMETER = 0x05,
+    DW_TAG_POINTER_TYPE = 0xf,
+    DW_TAG_COMPILE_UNIT = 0x11,
+    DW_TAG_STRUCTURE_TYPE = 0x13,
+    DW_TAG_BASE_TYPE = 0x24,
+    DW_TAG_SUBPROGRAM = 0x2e,
+    DW_TAG_VARIABLE = 0x34
   };
 
   // DWARF2 standard, figure 16.
@@ -800,25 +1244,57 @@
 
   // DWARF standard, figure 17.
   enum DWARF2Attribute {
+    DW_AT_LOCATION = 0x2,
     DW_AT_NAME = 0x3,
+    DW_AT_BYTE_SIZE = 0xb,
     DW_AT_STMT_LIST = 0x10,
     DW_AT_LOW_PC = 0x11,
-    DW_AT_HIGH_PC = 0x12
+    DW_AT_HIGH_PC = 0x12,
+    DW_AT_ENCODING = 0x3e,
+    DW_AT_FRAME_BASE = 0x40,
+    DW_AT_TYPE = 0x49
   };
 
   // DWARF2 standard, figure 19.
   enum DWARF2AttributeForm {
     DW_FORM_ADDR = 0x1,
+    DW_FORM_BLOCK4 = 0x4,
     DW_FORM_STRING = 0x8,
-    DW_FORM_DATA4 = 0x6
+    DW_FORM_DATA4 = 0x6,
+    DW_FORM_BLOCK = 0x9,
+    DW_FORM_DATA1 = 0xb,
+    DW_FORM_FLAG = 0xc,
+    DW_FORM_REF4 = 0x13
   };
 
-  bool WriteBody(Writer* w) {
-    w->WriteULEB128(1);
-    w->WriteULEB128(DW_TAG_COMPILE_UNIT);
+  void WriteVariableAbbreviation(Writer* w,
+                                 int abbreviation_code,
+                                 bool has_value,
+                                 bool is_parameter) {
+    w->WriteULEB128(abbreviation_code);
+    w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
     w->Write<uint8_t>(DW_CHILDREN_NO);
     w->WriteULEB128(DW_AT_NAME);
     w->WriteULEB128(DW_FORM_STRING);
+    if (has_value) {
+      w->WriteULEB128(DW_AT_TYPE);
+      w->WriteULEB128(DW_FORM_REF4);
+      w->WriteULEB128(DW_AT_LOCATION);
+      w->WriteULEB128(DW_FORM_BLOCK4);
+    }
+    w->WriteULEB128(0);
+    w->WriteULEB128(0);
+  }
+
+  bool WriteBody(Writer* w) {
+    int current_abbreviation = 1;
+    bool extra_info = desc_->IsInfoAvailable();
+    ASSERT(desc_->IsLineInfoAvailable());
+    w->WriteULEB128(current_abbreviation++);
+    w->WriteULEB128(DW_TAG_COMPILE_UNIT);
+    w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+    w->WriteULEB128(DW_AT_NAME);
+    w->WriteULEB128(DW_FORM_STRING);
     w->WriteULEB128(DW_AT_LOW_PC);
     w->WriteULEB128(DW_FORM_ADDR);
     w->WriteULEB128(DW_AT_HIGH_PC);
@@ -827,16 +1303,101 @@
     w->WriteULEB128(DW_FORM_DATA4);
     w->WriteULEB128(0);
     w->WriteULEB128(0);
-    w->WriteULEB128(0);
+
+    if (extra_info) {
+      CompilationInfo* info = desc_->info();
+      ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+      int params = scope_info.number_of_parameters();
+      int slots = scope_info.number_of_stack_slots();
+      int context_slots = scope_info.number_of_context_slots();
+      // The real slot ID is internal_slots + context_slot_id.
+      int internal_slots = Context::MIN_CONTEXT_SLOTS;
+      int locals = scope_info.NumberOfLocals();
+      int total_children =
+          params + slots + context_slots + internal_slots + locals + 2;
+
+      // The extra duplication below seems to be necessary to keep
+      // gdb from getting upset on OSX.
+      w->WriteULEB128(current_abbreviation++);  // Abbreviation code.
+      w->WriteULEB128(DW_TAG_SUBPROGRAM);
+      w->Write<uint8_t>(
+          total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+      w->WriteULEB128(DW_AT_NAME);
+      w->WriteULEB128(DW_FORM_STRING);
+      w->WriteULEB128(DW_AT_LOW_PC);
+      w->WriteULEB128(DW_FORM_ADDR);
+      w->WriteULEB128(DW_AT_HIGH_PC);
+      w->WriteULEB128(DW_FORM_ADDR);
+      w->WriteULEB128(DW_AT_FRAME_BASE);
+      w->WriteULEB128(DW_FORM_BLOCK4);
+      w->WriteULEB128(0);
+      w->WriteULEB128(0);
+
+      w->WriteULEB128(current_abbreviation++);
+      w->WriteULEB128(DW_TAG_STRUCTURE_TYPE);
+      w->Write<uint8_t>(DW_CHILDREN_NO);
+      w->WriteULEB128(DW_AT_BYTE_SIZE);
+      w->WriteULEB128(DW_FORM_DATA1);
+      w->WriteULEB128(DW_AT_NAME);
+      w->WriteULEB128(DW_FORM_STRING);
+      w->WriteULEB128(0);
+      w->WriteULEB128(0);
+
+      for (int param = 0; param < params; ++param) {
+        WriteVariableAbbreviation(w, current_abbreviation++, true, true);
+      }
+
+      for (int slot = 0; slot < slots; ++slot) {
+        WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+      }
+
+      for (int internal_slot = 0;
+           internal_slot < internal_slots;
+           ++internal_slot) {
+        WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+      }
+
+      for (int context_slot = 0;
+           context_slot < context_slots;
+           ++context_slot) {
+        WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+      }
+
+      for (int local = 0; local < locals; ++local) {
+        WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+      }
+
+      // The function.
+      WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+      // The context.
+      WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+      if (total_children != 0) {
+        w->WriteULEB128(0);  // Terminate the sibling list.
+      }
+    }
+
+    w->WriteULEB128(0);  // Terminate the table.
     return true;
   }
+
+ private:
+  CodeDescription* desc_;
 };
 
 
-class DebugLineSection : public ELFSection {
+class DebugLineSection : public DebugSection {
  public:
   explicit DebugLineSection(CodeDescription* desc)
+#ifdef __ELF
       : ELFSection(".debug_line", TYPE_PROGBITS, 1),
+#else
+      : MachOSection("__debug_line",
+                     "__DWARF",
+                     1,
+                     MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
         desc_(desc) { }
 
   // DWARF2 standard, figure 34.
@@ -993,8 +1554,7 @@
 
 #ifdef V8_TARGET_ARCH_X64
 
-
-class UnwindInfoSection : public ELFSection {
+class UnwindInfoSection : public DebugSection {
  public:
   explicit UnwindInfoSection(CodeDescription *desc);
   virtual bool WriteBody(Writer *w);
@@ -1080,8 +1640,13 @@
 
 
 UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
-    : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
-{ }
+#ifdef __ELF
+    : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
+#else
+    : MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
+                   MachOSection::S_REGULAR),
+#endif
+      desc_(desc) { }
 
 int UnwindInfoSection::WriteCIE(Writer *w) {
   Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
@@ -1213,15 +1778,14 @@
 
 #endif  // V8_TARGET_ARCH_X64
 
-
-static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
+static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
   if (desc->IsLineInfoAvailable()) {
-    elf->AddSection(new DebugInfoSection(desc));
-    elf->AddSection(new DebugAbbrevSection);
-    elf->AddSection(new DebugLineSection(desc));
+    obj->AddSection(new DebugInfoSection(desc));
+    obj->AddSection(new DebugAbbrevSection(desc));
+    obj->AddSection(new DebugLineSection(desc));
   }
 #ifdef V8_TARGET_ARCH_X64
-  elf->AddSection(new UnwindInfoSection(desc));
+  obj->AddSection(new UnwindInfoSection(desc));
 #endif
 }
 
@@ -1261,6 +1825,13 @@
   // Static initialization is necessary to prevent GDB from seeing
   // uninitialized descriptor.
   JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+
+#ifdef OBJECT_PRINT
+  void __gdb_print_v8_object(MaybeObject* object) {
+    object->Print();
+    fprintf(stdout, "\n");
+  }
+#endif
 }
 
 
@@ -1284,17 +1855,23 @@
 }
 
 
-static void RegisterCodeEntry(JITCodeEntry* entry) {
+static void RegisterCodeEntry(JITCodeEntry* entry,
+                              bool dump_if_enabled,
+                              const char* name_hint) {
 #if defined(DEBUG) && !defined(WIN32)
   static int file_num = 0;
-  if (FLAG_gdbjit_dump) {
+  if (FLAG_gdbjit_dump && dump_if_enabled) {
     static const int kMaxFileNameSize = 64;
     static const char* kElfFilePrefix = "/tmp/elfdump";
     static const char* kObjFileExt = ".o";
     char file_name[64];
 
-    OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
-                 kElfFilePrefix, file_num++, kObjFileExt);
+    OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
+                 "%s%s%d%s",
+                 kElfFilePrefix,
+                 (name_hint != NULL) ? name_hint : "",
+                 file_num++,
+                 kObjFileExt);
     WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
   }
 #endif
@@ -1327,8 +1904,19 @@
 
 
 static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+#ifdef __MACH_O
+  MachO mach_o;
+  Writer w(&mach_o);
 
+  mach_o.AddSection(new MachOTextSection(kCodeAlignment,
+                                         desc->CodeStart(),
+                                         desc->CodeSize()));
+
+  CreateDWARFSections(desc, &mach_o);
+
+  mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
+#else
   ELF elf;
   Writer w(&elf);
 
@@ -1346,6 +1934,7 @@
   CreateDWARFSections(desc, &elf);
 
   elf.Write(&w);
+#endif
 
   return CreateCodeEntry(w.buffer(), w.position());
 }
@@ -1394,17 +1983,18 @@
 
 void GDBJITInterface::AddCode(Handle<String> name,
                               Handle<Script> script,
-                              Handle<Code> code) {
+                              Handle<Code> code,
+                              CompilationInfo* info) {
   if (!FLAG_gdbjit) return;
 
   // Force initialization of line_ends array.
   GetScriptLineNumber(script, 0);
 
   if (!name.is_null()) {
-    SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
-    AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
+    SmartArrayPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
+    AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
   } else {
-    AddCode("", *code, GDBJITInterface::FUNCTION, *script);
+    AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
   }
 }
 
@@ -1451,7 +2041,8 @@
 void GDBJITInterface::AddCode(const char* name,
                               Code* code,
                               GDBJITInterface::CodeTag tag,
-                              Script* script) {
+                              Script* script,
+                              CompilationInfo* info) {
   if (!FLAG_gdbjit) return;
 
   ScopedLock lock(mutex_);
@@ -1466,7 +2057,8 @@
                             script != NULL ? Handle<Script>(script)
                                            : Handle<Script>(),
                             lineinfo,
-                            tag);
+                            tag,
+                            info);
 
   if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
     delete lineinfo;
@@ -1481,7 +2073,18 @@
   delete lineinfo;
   e->value = entry;
 
-  RegisterCodeEntry(entry);
+  const char* name_hint = NULL;
+  bool should_dump = false;
+  if (FLAG_gdbjit_dump) {
+    if (strlen(FLAG_gdbjit_dump_filter) == 0) {
+      name_hint = name;
+      should_dump = true;
+    } else if (name != NULL) {
+      name_hint = strstr(name, FLAG_gdbjit_dump_filter);
+      should_dump = (name_hint != NULL);
+    }
+  }
+  RegisterCodeEntry(entry, should_dump, name_hint);
 }
 
 
@@ -1501,7 +2104,7 @@
     builder.AddFormatted(": code object %p", static_cast<void*>(code));
   }
 
-  AddCode(builder.Finalize(), code, tag);
+  AddCode(builder.Finalize(), code, tag, NULL, NULL);
 }
 
 
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index de6928f..2cf15bc 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -28,6 +28,8 @@
 #ifndef V8_GDB_JIT_H_
 #define V8_GDB_JIT_H_
 
+#include "allocation.h"
+
 //
 // Basic implementation of GDB JIT Interface client.
 // GBD JIT Interface is supported in GDB 7.0 and above.
@@ -41,6 +43,8 @@
 namespace v8 {
 namespace internal {
 
+class CompilationInfo;
+
 #define CODE_TAGS_LIST(V)                       \
   V(LOAD_IC)                                    \
   V(KEYED_LOAD_IC)                              \
@@ -111,11 +115,13 @@
   static void AddCode(const char* name,
                       Code* code,
                       CodeTag tag,
-                      Script* script = NULL);
+                      Script* script,
+                      CompilationInfo* info);
 
   static void AddCode(Handle<String> name,
                       Handle<Script> script,
-                      Handle<Code> code);
+                      Handle<Code> code,
+                      CompilationInfo* info);
 
   static void AddCode(CodeTag tag, String* name, Code* code);
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index c4e8f13..87066fa 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -41,79 +41,151 @@
 }
 
 
-class GlobalHandles::Node : public Malloced {
+class GlobalHandles::Node {
  public:
+  // State transition diagram:
+  // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
+  enum State {
+    FREE,
+    NORMAL,     // Normal global handle.
+    WEAK,       // Flagged as weak but not yet finalized.
+    PENDING,    // Has been recognized as only reachable by weak handles.
+    NEAR_DEATH  // Callback has informed the handle is near death.
+  };
 
-  void Initialize(Object* object) {
-    // Set the initial value of the handle.
-    object_ = object;
-    class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
-    state_  = NORMAL;
-    parameter_or_next_free_.parameter = NULL;
-    callback_ = NULL;
-  }
-
-  Node() {
-    state_ = DESTROYED;
-  }
-
-  explicit Node(Object* object) {
-    Initialize(object);
-    // Initialize link structure.
-    next_ = NULL;
-  }
-
-  ~Node() {
-    if (state_ != DESTROYED) Destroy(Isolate::Current()->global_handles());
-#ifdef DEBUG
-    // Zap the values for eager trapping.
-    object_ = NULL;
-    next_ = NULL;
-    parameter_or_next_free_.next_free = NULL;
-#endif
-  }
-
-  void Destroy(GlobalHandles* global_handles) {
-    if (state_ == WEAK || IsNearDeath()) {
-      global_handles->number_of_weak_handles_--;
-      if (object_->IsJSGlobalObject()) {
-        global_handles->number_of_global_object_weak_handles_--;
-      }
-    }
-    state_ = DESTROYED;
-  }
-
-  // Accessors for next_.
-  Node* next() { return next_; }
-  void set_next(Node* value) { next_ = value; }
-  Node** next_addr() { return &next_; }
-
-  // Accessors for next free node in the free list.
-  Node* next_free() {
-    ASSERT(state_ == DESTROYED);
-    return parameter_or_next_free_.next_free;
-  }
-  void set_next_free(Node* value) {
-    ASSERT(state_ == DESTROYED);
-    parameter_or_next_free_.next_free = value;
-  }
-
-  // Returns a link from the handle.
+  // Maps handle location (slot) to the containing node.
   static Node* FromLocation(Object** location) {
     ASSERT(OFFSET_OF(Node, object_) == 0);
     return reinterpret_cast<Node*>(location);
   }
 
-  // Returns the handle.
-  Handle<Object> handle() { return Handle<Object>(&object_); }
+  Node() {}
 
-  // Make this handle weak.
-  void MakeWeak(GlobalHandles* global_handles, void* parameter,
+#ifdef DEBUG
+  ~Node() {
+    // TODO(1428): if it's a weak handle we should have invoked its callback.
+    // Zap the values for eager trapping.
+    object_ = NULL;
+    class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+    index_ = 0;
+    independent_ = false;
+    in_new_space_list_ = false;
+    parameter_or_next_free_.next_free = NULL;
+    callback_ = NULL;
+  }
+#endif
+
+  void Initialize(int index, Node** first_free) {
+    index_ = static_cast<uint8_t>(index);
+    ASSERT(static_cast<int>(index_) == index);
+    state_ = FREE;
+    in_new_space_list_ = false;
+    parameter_or_next_free_.next_free = *first_free;
+    *first_free = this;
+  }
+
+  void Acquire(Object* object, GlobalHandles* global_handles) {
+    ASSERT(state_ == FREE);
+    object_ = object;
+    class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+    independent_ = false;
+    state_  = NORMAL;
+    parameter_or_next_free_.parameter = NULL;
+    callback_ = NULL;
+    IncreaseBlockUses(global_handles);
+  }
+
+  void Release(GlobalHandles* global_handles) {
+    ASSERT(state_ != FREE);
+    if (IsWeakRetainer()) {
+      global_handles->number_of_weak_handles_--;
+      if (object_->IsJSGlobalObject()) {
+        global_handles->number_of_global_object_weak_handles_--;
+      }
+    }
+    state_ = FREE;
+    parameter_or_next_free_.next_free = global_handles->first_free_;
+    global_handles->first_free_ = this;
+    DecreaseBlockUses(global_handles);
+  }
+
+  // Object slot accessors.
+  Object* object() const { return object_; }
+  Object** location() { return &object_; }
+  Handle<Object> handle() { return Handle<Object>(location()); }
+
+  // Wrapper class ID accessors.
+  bool has_wrapper_class_id() const {
+    return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
+  }
+  uint16_t wrapper_class_id() const { return class_id_; }
+  void set_wrapper_class_id(uint16_t class_id) {
+    class_id_ = class_id;
+  }
+
+  // State accessors.
+
+  State state() const { return state_; }
+
+  bool IsNearDeath() const {
+    // Check for PENDING to ensure correct answer when processing callbacks.
+    return state_ == PENDING || state_ == NEAR_DEATH;
+  }
+
+  bool IsWeak() const { return state_ == WEAK; }
+
+  bool IsRetainer() const { return state_ != FREE; }
+
+  bool IsStrongRetainer() const { return state_ == NORMAL; }
+
+  bool IsWeakRetainer() const {
+    return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH;
+  }
+
+  void MarkPending() {
+    ASSERT(state_ == WEAK);
+    state_ = PENDING;
+  }
+
+  // Independent flag accessors.
+  void MarkIndependent() {
+    ASSERT(state_ != FREE);
+    independent_ = true;
+  }
+  bool is_independent() const { return independent_; }
+
+  // In-new-space-list flag accessors.
+  void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
+  bool is_in_new_space_list() const { return in_new_space_list_; }
+
+  // Callback accessor.
+  WeakReferenceCallback callback() { return callback_; }
+
+  // Callback parameter accessors.
+  void set_parameter(void* parameter) {
+    ASSERT(state_ != FREE);
+    parameter_or_next_free_.parameter = parameter;
+  }
+  void* parameter() const {
+    ASSERT(state_ != FREE);
+    return parameter_or_next_free_.parameter;
+  }
+
+  // Accessors for next free node in the free list.
+  Node* next_free() {
+    ASSERT(state_ == FREE);
+    return parameter_or_next_free_.next_free;
+  }
+  void set_next_free(Node* value) {
+    ASSERT(state_ == FREE);
+    parameter_or_next_free_.next_free = value;
+  }
+
+  void MakeWeak(GlobalHandles* global_handles,
+                void* parameter,
                 WeakReferenceCallback callback) {
-    LOG(global_handles->isolate(),
-        HandleEvent("GlobalHandle::MakeWeak", handle().location()));
-    ASSERT(state_ != DESTROYED);
-    if (state_ != WEAK && !IsNearDeath()) {
+    ASSERT(state_ != FREE);
+    if (!IsWeakRetainer()) {
       global_handles->number_of_weak_handles_++;
       if (object_->IsJSGlobalObject()) {
         global_handles->number_of_global_object_weak_handles_++;
@@ -125,10 +197,8 @@
   }
 
   void ClearWeakness(GlobalHandles* global_handles) {
-    LOG(global_handles->isolate(),
-        HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
-    ASSERT(state_ != DESTROYED);
-    if (state_ == WEAK || IsNearDeath()) {
+    ASSERT(state_ != FREE);
+    if (IsWeakRetainer()) {
       global_handles->number_of_weak_handles_--;
       if (object_->IsJSGlobalObject()) {
         global_handles->number_of_global_object_weak_handles_--;
@@ -138,43 +208,12 @@
     set_parameter(NULL);
   }
 
-  bool IsNearDeath() {
-    // Check for PENDING to ensure correct answer when processing callbacks.
-    return state_ == PENDING || state_ == NEAR_DEATH;
-  }
-
-  bool IsWeak() {
-    return state_ == WEAK;
-  }
-
-  bool CanBeRetainer() {
-    return state_ != DESTROYED && state_ != NEAR_DEATH;
-  }
-
-  void SetWrapperClassId(uint16_t class_id) {
-    class_id_ = class_id;
-  }
-
-  // Returns the id for this weak handle.
-  void set_parameter(void* parameter) {
-    ASSERT(state_ != DESTROYED);
-    parameter_or_next_free_.parameter = parameter;
-  }
-  void* parameter() {
-    ASSERT(state_ != DESTROYED);
-    return parameter_or_next_free_.parameter;
-  }
-
-  // Returns the callback for this weak handle.
-  WeakReferenceCallback callback() { return callback_; }
-
   bool PostGarbageCollectionProcessing(Isolate* isolate,
                                        GlobalHandles* global_handles) {
     if (state_ != Node::PENDING) return false;
-    LOG(isolate, HandleEvent("GlobalHandle::Processing", handle().location()));
     WeakReferenceCallback func = callback();
     if (func == NULL) {
-      Destroy(global_handles);
+      Release(global_handles);
       return false;
     }
     void* par = parameter();
@@ -183,13 +222,6 @@
 
     v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
     {
-      // Forbid reuse of destroyed nodes as they might be already deallocated.
-      // It's fine though to reuse nodes that were destroyed in weak callback
-      // as those cannot be deallocated until we are back from the callback.
-      global_handles->set_first_free(NULL);
-      if (global_handles->first_deallocated()) {
-        global_handles->first_deallocated()->set_next(global_handles->head());
-      }
       // Check that we are not passing a finalized external string to
       // the callback.
       ASSERT(!object_->IsExternalAsciiString() ||
@@ -206,95 +238,145 @@
     return true;
   }
 
-  // Place the handle address first to avoid offset computation.
-  Object* object_;  // Storage for object pointer.
+ private:
+  inline NodeBlock* FindBlock();
+  inline void IncreaseBlockUses(GlobalHandles* global_handles);
+  inline void DecreaseBlockUses(GlobalHandles* global_handles);
 
+  // Storage for object pointer.
+  // Placed first to avoid offset computation.
+  Object* object_;
+
+  // Next word stores class_id, index, state, and independent.
+  // Note: the most aligned fields should go first.
+
+  // Wrapper class ID.
   uint16_t class_id_;
 
-  // Transition diagram:
-  // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
-  enum State {
-    NORMAL,      // Normal global handle.
-    WEAK,        // Flagged as weak but not yet finalized.
-    PENDING,     // Has been recognized as only reachable by weak handles.
-    NEAR_DEATH,  // Callback has informed the handle is near death.
-    DESTROYED
-  };
-  State state_ : 4;  // Need one more bit for MSVC as it treats enums as signed.
+  // Index in the containing handle block.
+  uint8_t index_;
 
- private:
+  // Need one more bit for MSVC as it treats enums as signed.
+  State state_ : 4;
+
+  bool independent_ : 1;
+  bool in_new_space_list_ : 1;
+
   // Handle specific callback.
   WeakReferenceCallback callback_;
-  // Provided data for callback.  In DESTROYED state, this is used for
+
+  // Provided data for callback.  In FREE state, this is used for
   // the free list link.
   union {
     void* parameter;
     Node* next_free;
   } parameter_or_next_free_;
 
-  // Linkage for the list.
-  Node* next_;
-
- public:
-  TRACK_MEMORY("GlobalHandles::Node")
+  DISALLOW_COPY_AND_ASSIGN(Node);
 };
 
 
-class GlobalHandles::Pool {
-  public:
-    Pool() {
-      current_ = new Chunk();
-      current_->previous = NULL;
-      next_ = current_->nodes;
-      limit_ = current_->nodes + kNodesPerChunk;
-    }
+class GlobalHandles::NodeBlock {
+ public:
+  static const int kSize = 256;
 
-    ~Pool() {
-      if (current_ != NULL) {
-        Release();
+  explicit NodeBlock(NodeBlock* next)
+      : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
+
+  void PutNodesOnFreeList(Node** first_free) {
+    for (int i = kSize - 1; i >= 0; --i) {
+      nodes_[i].Initialize(i, first_free);
+    }
+  }
+
+  Node* node_at(int index) {
+    ASSERT(0 <= index && index < kSize);
+    return &nodes_[index];
+  }
+
+  void IncreaseUses(GlobalHandles* global_handles) {
+    ASSERT(used_nodes_ < kSize);
+    if (used_nodes_++ == 0) {
+      NodeBlock* old_first = global_handles->first_used_block_;
+      global_handles->first_used_block_ = this;
+      next_used_ = old_first;
+      prev_used_ = NULL;
+      if (old_first == NULL) return;
+      old_first->prev_used_ = this;
+    }
+  }
+
+  void DecreaseUses(GlobalHandles* global_handles) {
+    ASSERT(used_nodes_ > 0);
+    if (--used_nodes_ == 0) {
+      if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
+      if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
+      if (this == global_handles->first_used_block_) {
+        global_handles->first_used_block_ = next_used_;
       }
     }
+  }
 
-    Node* Allocate() {
-      if (next_ < limit_) {
-        return next_++;
-      }
-      return SlowAllocate();
-    }
+  // Next block in the list of all blocks.
+  NodeBlock* next() const { return next_; }
 
-    void Release() {
-      Chunk* current = current_;
-      ASSERT(current != NULL);  // At least a single block must by allocated
-      do {
-        Chunk* previous = current->previous;
-        delete current;
-        current = previous;
-      } while (current != NULL);
-      current_ = NULL;
-      next_ = limit_ = NULL;
-    }
+  // Next/previous block in the list of blocks with used nodes.
+  NodeBlock* next_used() const { return next_used_; }
+  NodeBlock* prev_used() const { return prev_used_; }
 
-  private:
-    static const int kNodesPerChunk = (1 << 12) - 1;
-    struct Chunk : public Malloced {
-      Chunk* previous;
-      Node nodes[kNodesPerChunk];
-    };
+ private:
+  Node nodes_[kSize];
+  NodeBlock* const next_;
+  int used_nodes_;
+  NodeBlock* next_used_;
+  NodeBlock* prev_used_;
+};
 
-    Node* SlowAllocate() {
-      Chunk* chunk = new Chunk();
-      chunk->previous = current_;
-      current_ = chunk;
 
-      Node* new_nodes = current_->nodes;
-      next_ = new_nodes + 1;
-      limit_ = new_nodes + kNodesPerChunk;
-      return new_nodes;
-    }
+GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
+  intptr_t ptr = reinterpret_cast<intptr_t>(this);
+  ptr = ptr - index_ * sizeof(Node);
+  NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
+  ASSERT(block->node_at(index_) == this);
+  return block;
+}
 
-    Chunk* current_;
-    Node* next_;
-    Node* limit_;
+
+void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
+  FindBlock()->IncreaseUses(global_handles);
+}
+
+
+void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
+  FindBlock()->DecreaseUses(global_handles);
+}
+
+
+class GlobalHandles::NodeIterator {
+ public:
+  explicit NodeIterator(GlobalHandles* global_handles)
+      : block_(global_handles->first_used_block_),
+        index_(0) {}
+
+  bool done() const { return block_ == NULL; }
+
+  Node* node() const {
+    ASSERT(!done());
+    return block_->node_at(index_);
+  }
+
+  void Advance() {
+    ASSERT(!done());
+    if (++index_ < NodeBlock::kSize) return;
+    index_ = 0;
+    block_ = block_->next_used();
+  }
+
+ private:
+  NodeBlock* block_;
+  int index_;
+
+  DISALLOW_COPY_AND_ASSIGN(NodeIterator);
 };
 
 
@@ -302,41 +384,39 @@
     : isolate_(isolate),
       number_of_weak_handles_(0),
       number_of_global_object_weak_handles_(0),
-      head_(NULL),
+      first_block_(NULL),
+      first_used_block_(NULL),
       first_free_(NULL),
-      first_deallocated_(NULL),
-      pool_(new Pool()),
-      post_gc_processing_count_(0),
-      object_groups_(4) {
-}
+      post_gc_processing_count_(0) {}
 
 
 GlobalHandles::~GlobalHandles() {
-  delete pool_;
-  pool_ = 0;
+  NodeBlock* block = first_block_;
+  while (block != NULL) {
+    NodeBlock* tmp = block->next();
+    delete block;
+    block = tmp;
+  }
+  first_block_ = NULL;
 }
 
 
 Handle<Object> GlobalHandles::Create(Object* value) {
   isolate_->counters()->global_handles()->Increment();
-  Node* result;
-  if (first_free()) {
-    // Take the first node in the free list.
-    result = first_free();
-    set_first_free(result->next_free());
-  } else if (first_deallocated()) {
-    // Next try deallocated list
-    result = first_deallocated();
-    set_first_deallocated(result->next_free());
-    ASSERT(result->next() == head());
-    set_head(result);
-  } else {
-    // Allocate a new node.
-    result = pool_->Allocate();
-    result->set_next(head());
-    set_head(result);
+  if (first_free_ == NULL) {
+    first_block_ = new NodeBlock(first_block_);
+    first_block_->PutNodesOnFreeList(&first_free_);
   }
-  result->Initialize(value);
+  ASSERT(first_free_ != NULL);
+  // Take the first node in the free list.
+  Node* result = first_free_;
+  first_free_ = result->next_free();
+  result->Acquire(value, this);
+  if (isolate_->heap()->InNewSpace(value) &&
+      !result->is_in_new_space_list()) {
+    new_space_nodes_.Add(result);
+    result->set_in_new_space_list(true);
+  }
   return result->handle();
 }
 
@@ -344,11 +424,7 @@
 void GlobalHandles::Destroy(Object** location) {
   isolate_->counters()->global_handles()->Decrement();
   if (location == NULL) return;
-  Node* node = Node::FromLocation(location);
-  node->Destroy(this);
-  // Link the destroyed.
-  node->set_next_free(first_free());
-  set_first_free(node);
+  Node::FromLocation(location)->Release(this);
 }
 
 
@@ -364,6 +440,11 @@
 }
 
 
+void GlobalHandles::MarkIndependent(Object** location) {
+  Node::FromLocation(location)->MarkIndependent();
+}
+
+
 bool GlobalHandles::IsNearDeath(Object** location) {
   return Node::FromLocation(location)->IsNearDeath();
 }
@@ -375,142 +456,173 @@
 
 
 void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
-  Node::FromLocation(location)->SetWrapperClassId(class_id);
+  Node::FromLocation(location)->set_wrapper_class_id(class_id);
 }
 
 
 void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
-  // Traversal of GC roots in the global handle list that are marked as
-  // WEAK or PENDING.
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->state_ == Node::WEAK
-      || current->state_ == Node::PENDING
-      || current->state_ == Node::NEAR_DEATH) {
-      v->VisitPointer(&current->object_);
-    }
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
   }
 }
 
 
 void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
                                      WeakReferenceCallback callback) {
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->IsWeak() && current->callback() == callback) {
-      f(current->object_, current->parameter());
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->IsWeak() && it.node()->callback() == callback) {
+      f(it.node()->object(), it.node()->parameter());
     }
   }
 }
 
 
 void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->state_ == Node::WEAK) {
-      if (f(&current->object_)) {
-        current->state_ = Node::PENDING;
-        LOG(isolate_,
-            HandleEvent("GlobalHandle::Pending", current->handle().location()));
-      }
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->IsWeak() && f(it.node()->location())) {
+      it.node()->MarkPending();
     }
   }
 }
 
 
-bool GlobalHandles::PostGarbageCollectionProcessing() {
+void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
+  for (int i = 0; i < new_space_nodes_.length(); ++i) {
+    Node* node = new_space_nodes_[i];
+    if (node->IsStrongRetainer() ||
+        (node->IsWeakRetainer() && !node->is_independent())) {
+      v->VisitPointer(node->location());
+    }
+  }
+}
+
+
+void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
+    WeakSlotCallbackWithHeap f) {
+  for (int i = 0; i < new_space_nodes_.length(); ++i) {
+    Node* node = new_space_nodes_[i];
+    ASSERT(node->is_in_new_space_list());
+    if (node->is_independent() && node->IsWeak() &&
+        f(isolate_->heap(), node->location())) {
+      node->MarkPending();
+    }
+  }
+}
+
+
+void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
+  for (int i = 0; i < new_space_nodes_.length(); ++i) {
+    Node* node = new_space_nodes_[i];
+    ASSERT(node->is_in_new_space_list());
+    if (node->is_independent() && node->IsWeakRetainer()) {
+      v->VisitPointer(node->location());
+    }
+  }
+}
+
+
+bool GlobalHandles::PostGarbageCollectionProcessing(
+    GarbageCollector collector) {
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
-  // At the same time deallocate all DESTROYED nodes.
   ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
   const int initial_post_gc_processing_count = ++post_gc_processing_count_;
   bool next_gc_likely_to_collect_more = false;
-  Node** p = &head_;
-  while (*p != NULL) {
-    if ((*p)->PostGarbageCollectionProcessing(isolate_, this)) {
-      if (initial_post_gc_processing_count != post_gc_processing_count_) {
-        // Weak callback triggered another GC and another round of
-        // PostGarbageCollection processing.  The current node might
-        // have been deleted in that round, so we need to bail out (or
-        // restart the processing).
-        break;
+  if (collector == SCAVENGER) {
+    for (int i = 0; i < new_space_nodes_.length(); ++i) {
+      Node* node = new_space_nodes_[i];
+      ASSERT(node->is_in_new_space_list());
+      // Skip dependent handles. Their weak callbacks might expect to be
+      // called between two global garbage collection callbacks which
+      // are not called for minor collections.
+      if (!node->is_independent()) continue;
+      if (node->PostGarbageCollectionProcessing(isolate_, this)) {
+        if (initial_post_gc_processing_count != post_gc_processing_count_) {
+          // Weak callback triggered another GC and another round of
+          // PostGarbageCollection processing.  The current node might
+          // have been deleted in that round, so we need to bail out (or
+          // restart the processing).
+          return next_gc_likely_to_collect_more;
+        }
+      }
+      if (!node->IsRetainer()) {
+        next_gc_likely_to_collect_more = true;
       }
     }
-    if ((*p)->state_ == Node::DESTROYED) {
-      // Delete the link.
-      Node* node = *p;
-      *p = node->next();  // Update the link.
-      if (first_deallocated()) {
-        first_deallocated()->set_next(node);
+  } else {
+    for (NodeIterator it(this); !it.done(); it.Advance()) {
+      if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
+        if (initial_post_gc_processing_count != post_gc_processing_count_) {
+          // See the comment above.
+          return next_gc_likely_to_collect_more;
+        }
       }
-      node->set_next_free(first_deallocated());
-      set_first_deallocated(node);
-      next_gc_likely_to_collect_more = true;
+      if (!it.node()->IsRetainer()) {
+        next_gc_likely_to_collect_more = true;
+      }
+    }
+  }
+  // Update the list of new space nodes.
+  int last = 0;
+  for (int i = 0; i < new_space_nodes_.length(); ++i) {
+    Node* node = new_space_nodes_[i];
+    ASSERT(node->is_in_new_space_list());
+    if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
+      new_space_nodes_[last++] = node;
     } else {
-      p = (*p)->next_addr();
+      node->set_in_new_space_list(false);
     }
   }
-  set_first_free(NULL);
-  if (first_deallocated()) {
-    first_deallocated()->set_next(head());
-  }
-
+  new_space_nodes_.Rewind(last);
   return next_gc_likely_to_collect_more;
 }
 
 
 void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
-  // Traversal of global handles marked as NORMAL.
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->state_ == Node::NORMAL) {
-      v->VisitPointer(&current->object_);
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->IsStrongRetainer()) {
+      v->VisitPointer(it.node()->location());
     }
   }
 }
 
 
 void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->state_ != Node::DESTROYED) {
-      v->VisitPointer(&current->object_);
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->IsRetainer()) {
+      v->VisitPointer(it.node()->location());
     }
   }
 }
 
 
 void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    if (current->class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId &&
-        current->CanBeRetainer()) {
-      v->VisitEmbedderReference(&current->object_, current->class_id_);
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
+      v->VisitEmbedderReference(it.node()->location(),
+                                it.node()->wrapper_class_id());
     }
   }
 }
 
 
-void GlobalHandles::TearDown() {
-  // Reset all the lists.
-  set_head(NULL);
-  set_first_free(NULL);
-  set_first_deallocated(NULL);
-  pool_->Release();
-}
-
-
 void GlobalHandles::RecordStats(HeapStats* stats) {
   *stats->global_handle_count = 0;
   *stats->weak_global_handle_count = 0;
   *stats->pending_global_handle_count = 0;
   *stats->near_death_global_handle_count = 0;
-  *stats->destroyed_global_handle_count = 0;
-  for (Node* current = head_; current != NULL; current = current->next()) {
+  *stats->free_global_handle_count = 0;
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
     *stats->global_handle_count += 1;
-    if (current->state_ == Node::WEAK) {
+    if (it.node()->state() == Node::WEAK) {
       *stats->weak_global_handle_count += 1;
-    } else if (current->state_ == Node::PENDING) {
+    } else if (it.node()->state() == Node::PENDING) {
       *stats->pending_global_handle_count += 1;
-    } else if (current->state_ == Node::NEAR_DEATH) {
+    } else if (it.node()->state() == Node::NEAR_DEATH) {
       *stats->near_death_global_handle_count += 1;
-    } else if (current->state_ == Node::DESTROYED) {
-      *stats->destroyed_global_handle_count += 1;
+    } else if (it.node()->state() == Node::FREE) {
+      *stats->free_global_handle_count += 1;
     }
   }
 }
@@ -524,12 +636,12 @@
   int near_death = 0;
   int destroyed = 0;
 
-  for (Node* current = head_; current != NULL; current = current->next()) {
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
     total++;
-    if (current->state_ == Node::WEAK) weak++;
-    if (current->state_ == Node::PENDING) pending++;
-    if (current->state_ == Node::NEAR_DEATH) near_death++;
-    if (current->state_ == Node::DESTROYED) destroyed++;
+    if (it.node()->state() == Node::WEAK) weak++;
+    if (it.node()->state() == Node::PENDING) pending++;
+    if (it.node()->state() == Node::NEAR_DEATH) near_death++;
+    if (it.node()->state() == Node::FREE) destroyed++;
   }
 
   PrintF("Global Handle Statistics:\n");
@@ -537,17 +649,17 @@
   PrintF("  # weak       = %d\n", weak);
   PrintF("  # pending    = %d\n", pending);
   PrintF("  # near_death = %d\n", near_death);
-  PrintF("  # destroyed  = %d\n", destroyed);
+  PrintF("  # free       = %d\n", destroyed);
   PrintF("  # total      = %d\n", total);
 }
 
 void GlobalHandles::Print() {
   PrintF("Global handles:\n");
-  for (Node* current = head_; current != NULL; current = current->next()) {
-    PrintF("  handle %p to %p (weak=%d)\n",
-           reinterpret_cast<void*>(current->handle().location()),
-           reinterpret_cast<void*>(*current->handle()),
-           current->state_ == Node::WEAK);
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    PrintF("  handle %p to %p%s\n",
+           reinterpret_cast<void*>(it.node()->location()),
+           reinterpret_cast<void*>(it.node()->object()),
+           it.node()->IsWeak() ? " (weak)" : "");
   }
 }
 
@@ -558,6 +670,11 @@
 void GlobalHandles::AddObjectGroup(Object*** handles,
                                    size_t length,
                                    v8::RetainedObjectInfo* info) {
+#ifdef DEBUG
+  for (size_t i = 0; i < length; ++i) {
+    ASSERT(!Node::FromLocation(handles[i])->is_independent());
+  }
+#endif
   if (length == 0) {
     if (info != NULL) info->Dispose();
     return;
@@ -569,6 +686,12 @@
 void GlobalHandles::AddImplicitReferences(HeapObject** parent,
                                           Object*** children,
                                           size_t length) {
+#ifdef DEBUG
+  ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
+  for (size_t i = 0; i < length; ++i) {
+    ASSERT(!Node::FromLocation(children[i])->is_independent());
+  }
+#endif
   if (length == 0) return;
   implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
 }
@@ -590,4 +713,9 @@
 }
 
 
+void GlobalHandles::TearDown() {
+  // TODO(1428): invoke weak callbacks.
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/global-handles.h b/src/global-handles.h
index 2171b2c..153d4da 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 
 #include "../include/v8-profiler.h"
 
-#include "list-inl.h"
+#include "list.h"
 
 namespace v8 {
 namespace internal {
@@ -146,6 +146,9 @@
   // Clear the weakness of a global handle.
   void ClearWeakness(Object** location);
 
+  // Clear the weakness of a global handle.
+  void MarkIndependent(Object** location);
+
   // Tells whether global handle is near death.
   static bool IsNearDeath(Object** location);
 
@@ -154,7 +157,7 @@
 
   // Process pending weak handles.
   // Returns true if next major GC is likely to collect more garbage.
-  bool PostGarbageCollectionProcessing();
+  bool PostGarbageCollectionProcessing(GarbageCollector collector);
 
   // Iterates over all strong handles.
   void IterateStrongRoots(ObjectVisitor* v);
@@ -176,6 +179,21 @@
   // them as pending.
   void IdentifyWeakHandles(WeakSlotCallback f);
 
+  // NOTE: Three ...NewSpace... functions below are used during
+  // scavenge collections and iterate over sets of handles that are
+  // guaranteed to contain all handles holding new space objects (but
+  // may also include old space objects).
+
+  // Iterates over strong and dependent handles. See the node above.
+  void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
+
+  // Finds weak independent handles satisfying the callback predicate
+  // and marks them as pending. See the note above.
+  void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
+
+  // Iterates over weak independent handles. See the note above.
+  void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+
   // Add an object group.
   // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a mark-compact collection.
@@ -211,12 +229,14 @@
   void PrintStats();
   void Print();
 #endif
-  class Pool;
+
  private:
   explicit GlobalHandles(Isolate* isolate);
 
-  // Internal node structure, one for each global handle.
+  // Internal node structures.
   class Node;
+  class NodeBlock;
+  class NodeIterator;
 
   Isolate* isolate_;
 
@@ -228,35 +248,21 @@
   // number_of_weak_handles_.
   int number_of_global_object_weak_handles_;
 
-  // Global handles are kept in a single linked list pointed to by head_.
-  Node* head_;
-  Node* head() { return head_; }
-  void set_head(Node* value) { head_ = value; }
+  // List of all allocated node blocks.
+  NodeBlock* first_block_;
 
-  // Free list for DESTROYED global handles not yet deallocated.
+  // List of node blocks with used nodes.
+  NodeBlock* first_used_block_;
+
+  // Free list of nodes.
   Node* first_free_;
-  Node* first_free() { return first_free_; }
-  void set_first_free(Node* value) { first_free_ = value; }
 
-  // List of deallocated nodes.
-  // Deallocated nodes form a prefix of all the nodes and
-  // |first_deallocated| points to last deallocated node before
-  // |head|.  Those deallocated nodes are additionally linked
-  // by |next_free|:
-  //                                    1st deallocated  head
-  //                                           |          |
-  //                                           V          V
-  //    node          node        ...         node       node
-  //      .next      -> .next ->                .next ->
-  //   <- .next_free <- .next_free           <- .next_free
-  Node* first_deallocated_;
-  Node* first_deallocated() { return first_deallocated_; }
-  void set_first_deallocated(Node* value) {
-    first_deallocated_ = value;
-  }
+  // Contains all nodes holding new space objects. Note: when the list
+  // is accessed, some of the objects may have been promoted already.
+  List<Node*> new_space_nodes_;
 
-  Pool* pool_;
   int post_gc_processing_count_;
+
   List<ObjectGroup*> object_groups_;
   List<ImplicitRefGroup*> implicit_ref_groups_;
 
diff --git a/src/globals.h b/src/globals.h
index 5ab9806..6c6966a 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,35 @@
 #ifndef V8_GLOBALS_H_
 #define V8_GLOBALS_H_
 
+// Define V8_INFINITY
+#define V8_INFINITY INFINITY
+
+// GCC specific stuff
+#ifdef __GNUC__
+
+#define __GNUC_VERSION_FOR_INFTY__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+
+// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
+// warning flag and certain versions of GCC due to a bug:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
+// For now, we use the more involved template-based version from <limits>, but
+// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
+// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
+#if __GNUC_VERSION_FOR_INFTY__ >= 29600 && __GNUC_VERSION_FOR_INFTY__ < 40100
+#include <limits>
+#undef V8_INFINITY
+#define V8_INFINITY std::numeric_limits<double>::infinity()
+#endif
+#undef __GNUC_VERSION_FOR_INFTY__
+
+#endif  // __GNUC__
+
+#ifdef _MSC_VER
+#undef V8_INFINITY
+#define V8_INFINITY HUGE_VAL
+#endif
+
+
 #include "../include/v8stdint.h"
 
 namespace v8 {
@@ -199,6 +228,8 @@
 const int kIntptrSize   = sizeof(intptr_t);  // NOLINT
 const int kPointerSize  = sizeof(void*);     // NOLINT
 
+const int kDoubleSizeLog2 = 3;
+
 #if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
diff --git a/src/handles.cc b/src/handles.cc
index 326de86..c482fa6 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -214,9 +214,10 @@
 }
 
 
-void NormalizeElements(Handle<JSObject> object) {
-  CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
-                          object->NormalizeElements());
+Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->NormalizeElements(),
+                     SeededNumberDictionary);
 }
 
 
@@ -228,12 +229,14 @@
 }
 
 
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
-                         uint32_t index,
-                         Handle<Object> value,
-                         PropertyDetails details) {
-  CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
-                          dictionary->Set(index, *value, details));
+Handle<SeededNumberDictionary> SeededNumberDictionarySet(
+    Handle<SeededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value,
+    PropertyDetails details) {
+  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+                     dictionary->Set(index, *value, details),
+                     SeededNumberDictionary);
 }
 
 
@@ -258,7 +261,7 @@
 }
 
 
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
                            Handle<String> key,
                            Handle<Object> value,
                            PropertyAttributes attributes,
@@ -353,7 +356,7 @@
 }
 
 
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name) {
   Isolate* isolate = obj->GetIsolate();
   Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
@@ -369,7 +372,7 @@
 }
 
 
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            Handle<String> name,
                            LookupResult* result) {
   PropertyAttributes attributes;
@@ -419,43 +422,18 @@
 
 
 Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   bool create_if_needed) {
-  Isolate* isolate = obj->GetIsolate();
-  Object* holder = obj->BypassGlobalProxy();
-  if (holder->IsUndefined()) return isolate->factory()->undefined_value();
-  obj = Handle<JSObject>(JSObject::cast(holder), isolate);
+                                   JSObject::HiddenPropertiesFlag flag) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->GetHiddenProperties(flag),
+                     Object);
+}
 
-  if (obj->HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = obj->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == isolate->heap()->hidden_symbol()) &&
-        descriptors->IsProperty(0)) {
-      ASSERT(descriptors->GetType(0) == FIELD);
-      return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)),
-                            isolate);
-    }
-  }
 
-  // Only attempt to find the hidden properties in the local object and not
-  // in the prototype chain.  Note that HasLocalProperty() can cause a GC in
-  // the general case in the presence of interceptors.
-  if (!obj->HasHiddenPropertiesObject()) {
-    // Hidden properties object not found. Allocate a new hidden properties
-    // object if requested. Otherwise return the undefined value.
-    if (create_if_needed) {
-      Handle<Object> hidden_obj =
-          isolate->factory()->NewJSObject(isolate->object_function());
-      CALL_HEAP_FUNCTION(isolate,
-                         obj->SetHiddenPropertiesObject(*hidden_obj), Object);
-    } else {
-      return isolate->factory()->undefined_value();
-    }
-  }
-  return Handle<Object>(obj->GetHiddenPropertiesObject(), isolate);
+int GetIdentityHash(Handle<JSObject> obj) {
+  CALL_AND_RETRY(obj->GetIsolate(),
+                 obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+                 return Smi::cast(__object__)->value(),
+                 return 0);
 }
 
 
@@ -505,7 +483,8 @@
     }
   }
   CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetElement(index, *value, strict_mode), Object);
+                     object->SetElement(index, *value, strict_mode, true),
+                     Object);
 }
 
 
@@ -533,22 +512,17 @@
 
 
 // Wrappers for scripts are kept alive and cached in weak global
-// handles referred from proxy objects held by the scripts as long as
+// handles referred from foreign objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
 // collector will call the weak callback on the global handle
 // associated with the wrapper and get rid of both the wrapper and the
 // handle.
 static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
-#ifdef ENABLE_HEAP_PROTECTION
-  // Weak reference callbacks are called as if from outside V8.  We
-  // need to reeenter to unprotect the heap.
-  VMState state(OTHER);
-#endif
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
-  Proxy* proxy = Script::cast(wrapper->value())->wrapper();
-  ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
-  proxy->set_proxy(0);
+  Foreign* foreign = Script::cast(wrapper->value())->wrapper();
+  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
+  foreign->set_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -556,10 +530,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->proxy() != NULL) {
+  if (script->wrapper()->address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
+        reinterpret_cast<JSValue**>(script->wrapper()->address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -575,7 +549,7 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
@@ -643,15 +617,17 @@
   {
     AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid.
     // Dispatch on type of strings.
-    if (src->IsAsciiRepresentation()) {
+    String::FlatContent content = src->GetFlatContent();
+    ASSERT(content.IsFlat());
+    if (content.IsAscii()) {
       CalculateLineEnds(isolate,
                         &line_ends,
-                        src->ToAsciiVector(),
+                        content.ToAsciiVector(),
                         with_last_line);
     } else {
       CalculateLineEnds(isolate,
                         &line_ends,
-                        src->ToUC16Vector(),
+                        content.ToUC16Vector(),
                         with_last_line);
     }
   }
@@ -909,6 +885,15 @@
 }
 
 
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+                                               Handle<JSObject> key,
+                                               Handle<Object> value) {
+  CALL_HEAP_FUNCTION(table->GetIsolate(),
+                     table->Put(*key, *value),
+                     ObjectHashTable);
+}
+
+
 bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
                     ClearExceptionFlag flag) {
   return shared->is_compiled() || CompileLazyShared(shared, flag);
@@ -936,16 +921,13 @@
 }
 
 
-static bool CompileLazyFunction(Handle<JSFunction> function,
-                                ClearExceptionFlag flag,
-                                InLoopFlag in_loop_flag) {
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
   bool result = true;
   if (function->shared()->is_compiled()) {
     function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
   } else {
     CompilationInfo info(function);
-    if (in_loop_flag == IN_LOOP) info.MarkAsInLoop();
     result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
   }
@@ -953,18 +935,6 @@
 }
 
 
-bool CompileLazy(Handle<JSFunction> function,
-                 ClearExceptionFlag flag) {
-  return CompileLazyFunction(function, flag, NOT_IN_LOOP);
-}
-
-
-bool CompileLazyInLoop(Handle<JSFunction> function,
-                       ClearExceptionFlag flag) {
-  return CompileLazyFunction(function, flag, IN_LOOP);
-}
-
-
 bool CompileOptimized(Handle<JSFunction> function,
                       int osr_ast_id,
                       ClearExceptionFlag flag) {
diff --git a/src/handles.h b/src/handles.h
index 3839f37..5674120 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_HANDLES_H_
 #define V8_HANDLES_H_
 
+#include "allocation.h"
 #include "apiutils.h"
 
 namespace v8 {
@@ -169,13 +170,14 @@
 void NormalizeProperties(Handle<JSObject> object,
                          PropertyNormalizationMode mode,
                          int expected_additional_properties);
-void NormalizeElements(Handle<JSObject> object);
+Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object);
 void TransformToFastProperties(Handle<JSObject> object,
                                int unused_property_fields);
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
-                         uint32_t index,
-                         Handle<Object> value,
-                         PropertyDetails details);
+MUST_USE_RESULT Handle<SeededNumberDictionary> SeededNumberDictionarySet(
+    Handle<SeededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value,
+    PropertyDetails details);
 
 // Flattens a string.
 void FlattenString(Handle<String> str);
@@ -184,7 +186,7 @@
 // string.
 Handle<String> FlattenGetString(Handle<String> str);
 
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
                            Handle<String> key,
                            Handle<Object> value,
                            PropertyAttributes attributes,
@@ -238,13 +240,13 @@
                              Handle<Object> value,
                              StrictModeFlag strict_mode);
 
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name);
 
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            Handle<String> name,
                            LookupResult* result);
 
@@ -262,9 +264,13 @@
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
 
 // Return the object's hidden properties object. If the object has no hidden
-// properties and create_if_needed is true, then a new hidden property object
-// will be allocated. Otherwise the Heap::undefined_value is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed);
+// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
+// hidden property object will be allocated. Otherwise Heap::undefined_value
+// is returned.
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+                                   JSObject::HiddenPropertiesFlag flag);
+
+int GetIdentityHash(Handle<JSObject> obj);
 
 Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
 Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -341,6 +347,10 @@
 
 Handle<Object> PreventExtensions(Handle<JSObject> object);
 
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+                                               Handle<JSObject> key,
+                                               Handle<Object> value);
+
 // Does lazy compilation of the given function. Returns true on success and
 // false if the compilation resulted in a stack overflow.
 enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
@@ -353,8 +363,6 @@
 
 bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
 
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
-
 bool CompileOptimized(Handle<JSFunction> function,
                       int osr_ast_id,
                       ClearExceptionFlag flag);
diff --git a/src/hashmap.h b/src/hashmap.h
index bb3e3ce..5c13212 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_HASHMAP_H_
 #define V8_HASHMAP_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 296cb05..7b666af 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,9 @@
 #define V8_HEAP_INL_H_
 
 #include "heap.h"
-#include "objects.h"
 #include "isolate.h"
+#include "list-inl.h"
+#include "objects.h"
 #include "v8-counters.h"
 
 namespace v8 {
@@ -141,6 +142,11 @@
 }
 
 
+MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+  return CopyFixedDoubleArrayWithMap(src, src->map());
+}
+
+
 MaybeObject* Heap::AllocateRaw(int size_in_bytes,
                                AllocationSpace space,
                                AllocationSpace retry_space) {
@@ -317,10 +323,10 @@
   ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
 
   if (type < FIRST_NONSTRING_TYPE) {
-    // There are three string representations: sequential strings, cons
-    // strings, and external strings.  Only cons strings contain
-    // non-map-word pointers to heap objects.
-    return ((type & kStringRepresentationMask) == kConsStringTag)
+    // There are four string representations: sequential strings, external
+    // strings, cons strings, and sliced strings.
+    // Only the latter two contain non-map-word pointers to heap objects.
+    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
         ? OLD_POINTER_SPACE
         : OLD_DATA_SPACE;
   } else {
@@ -525,8 +531,6 @@
   } while (false)
 
 
-// TODO(isolates): cache isolate: either accept as a parameter or
-//                 set to some known symbol (__CUR_ISOLATE__?)
 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)       \
   CALL_AND_RETRY(ISOLATE,                                      \
                  FUNCTION_CALL,                                \
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 4815f82..7e613e9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -28,294 +28,12 @@
 #include "v8.h"
 
 #include "heap-profiler.h"
-#include "frames-inl.h"
-#include "global-handles.h"
 #include "profile-generator.h"
-#include "string-stream.h"
 
 namespace v8 {
 namespace internal {
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// Clusterizer is a set of helper functions for converting
-// object references into clusters.
-class Clusterizer : public AllStatic {
- public:
-  static JSObjectsCluster Clusterize(HeapObject* obj) {
-    return Clusterize(obj, true);
-  }
-  static void InsertIntoTree(JSObjectsClusterTree* tree,
-                             HeapObject* obj, bool fine_grain);
-  static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
-                                      const JSObjectsCluster& cluster) {
-    InsertIntoTree(tree, cluster, 0);
-  }
-
- private:
-  static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
-  static int CalculateNetworkSize(JSObject* obj);
-  static int GetObjectSize(HeapObject* obj) {
-    return obj->IsJSObject() ?
-        CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
-  }
-  static void InsertIntoTree(JSObjectsClusterTree* tree,
-                             const JSObjectsCluster& cluster, int size);
-};
-
-
-JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
-  if (obj->IsJSObject()) {
-    JSObject* js_obj = JSObject::cast(obj);
-    String* constructor = GetConstructorNameForHeapProfile(
-        JSObject::cast(js_obj));
-    // Differentiate Object and Array instances.
-    if (fine_grain && (constructor == HEAP->Object_symbol() ||
-                       constructor == HEAP->Array_symbol())) {
-      return JSObjectsCluster(constructor, obj);
-    } else {
-      return JSObjectsCluster(constructor);
-    }
-  } else if (obj->IsString()) {
-    return JSObjectsCluster(HEAP->String_symbol());
-  } else if (obj->IsJSGlobalPropertyCell()) {
-    return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
-  } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
-    return JSObjectsCluster(JSObjectsCluster::CODE);
-  }
-  return JSObjectsCluster();
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
-                                 HeapObject* obj, bool fine_grain) {
-  JSObjectsCluster cluster = Clusterize(obj, fine_grain);
-  if (cluster.is_null()) return;
-  InsertIntoTree(tree, cluster, GetObjectSize(obj));
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
-                                 const JSObjectsCluster& cluster, int size) {
-  JSObjectsClusterTree::Locator loc;
-  tree->Insert(cluster, &loc);
-  NumberAndSizeInfo number_and_size = loc.value();
-  number_and_size.increment_number(1);
-  number_and_size.increment_bytes(size);
-  loc.set_value(number_and_size);
-}
-
-
-int Clusterizer::CalculateNetworkSize(JSObject* obj) {
-  int size = obj->Size();
-  // If 'properties' and 'elements' are non-empty (thus, non-shared),
-  // take their size into account.
-  if (obj->properties() != HEAP->empty_fixed_array()) {
-    size += obj->properties()->Size();
-  }
-  if (obj->elements() != HEAP->empty_fixed_array()) {
-    size += obj->elements()->Size();
-  }
-  // For functions, also account non-empty context and literals sizes.
-  if (obj->IsJSFunction()) {
-    JSFunction* f = JSFunction::cast(obj);
-    if (f->unchecked_context()->IsContext()) {
-      size += f->context()->Size();
-    }
-    if (f->literals()->length() != 0) {
-      size += f->literals()->Size();
-    }
-  }
-  return size;
-}
-
-
-// A helper class for recording back references.
-class ReferencesExtractor : public ObjectVisitor {
- public:
-  ReferencesExtractor(const JSObjectsCluster& cluster,
-                      RetainerHeapProfile* profile)
-      : cluster_(cluster),
-        profile_(profile),
-        inside_array_(false) {
-  }
-
-  void VisitPointer(Object** o) {
-    if ((*o)->IsFixedArray() && !inside_array_) {
-      // Traverse one level deep for data members that are fixed arrays.
-      // This covers the case of 'elements' and 'properties' of JSObject,
-      // and function contexts.
-      inside_array_ = true;
-      FixedArray::cast(*o)->Iterate(this);
-      inside_array_ = false;
-    } else if ((*o)->IsHeapObject()) {
-      profile_->StoreReference(cluster_, HeapObject::cast(*o));
-    }
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) VisitPointer(p);
-  }
-
- private:
-  const JSObjectsCluster& cluster_;
-  RetainerHeapProfile* profile_;
-  bool inside_array_;
-};
-
-
-// A printer interface implementation for the Retainers profile.
-class RetainersPrinter : public RetainerHeapProfile::Printer {
- public:
-  void PrintRetainers(const JSObjectsCluster& cluster,
-                      const StringStream& retainers) {
-    HeapStringAllocator allocator;
-    StringStream stream(&allocator);
-    cluster.Print(&stream);
-    LOG(ISOLATE,
-        HeapSampleJSRetainersEvent(
-        *(stream.ToCString()), *(retainers.ToCString())));
-  }
-};
-
-
-// Visitor for printing a cluster tree.
-class ClusterTreePrinter BASE_EMBEDDED {
- public:
-  explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size) {
-    Print(stream_, cluster, number_and_size);
-  }
-  static void Print(StringStream* stream,
-                    const JSObjectsCluster& cluster,
-                    const NumberAndSizeInfo& number_and_size);
-
- private:
-  StringStream* stream_;
-};
-
-
-void ClusterTreePrinter::Print(StringStream* stream,
-                               const JSObjectsCluster& cluster,
-                               const NumberAndSizeInfo& number_and_size) {
-  stream->Put(',');
-  cluster.Print(stream);
-  stream->Add(";%d", number_and_size.number());
-}
-
-
-// Visitor for printing a retainer tree.
-class SimpleRetainerTreePrinter BASE_EMBEDDED {
- public:
-  explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
-      : printer_(printer) {}
-  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
-  RetainerHeapProfile::Printer* printer_;
-};
-
-
-void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
-                                     JSObjectsClusterTree* tree) {
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
-  ClusterTreePrinter retainers_printer(&stream);
-  tree->ForEach(&retainers_printer);
-  printer_->PrintRetainers(cluster, stream);
-}
-
-
-// Visitor for aggregating references count of equivalent clusters.
-class RetainersAggregator BASE_EMBEDDED {
- public:
-  RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
-      : coarser_(coarser), dest_tree_(dest_tree) {}
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size);
-
- private:
-  ClustersCoarser* coarser_;
-  JSObjectsClusterTree* dest_tree_;
-};
-
-
-void RetainersAggregator::Call(const JSObjectsCluster& cluster,
-                               const NumberAndSizeInfo& number_and_size) {
-  JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
-  if (eq.is_null()) eq = cluster;
-  JSObjectsClusterTree::Locator loc;
-  dest_tree_->Insert(eq, &loc);
-  NumberAndSizeInfo aggregated_number = loc.value();
-  aggregated_number.increment_number(number_and_size.number());
-  loc.set_value(aggregated_number);
-}
-
-
-// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
-class AggregatingRetainerTreePrinter BASE_EMBEDDED {
- public:
-  AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
-                                 RetainerHeapProfile::Printer* printer)
-      : coarser_(coarser), printer_(printer) {}
-  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
-  ClustersCoarser* coarser_;
-  RetainerHeapProfile::Printer* printer_;
-};
-
-
-void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
-                                          JSObjectsClusterTree* tree) {
-  if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
-  JSObjectsClusterTree dest_tree_;
-  RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
-  tree->ForEach(&retainers_aggregator);
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
-  ClusterTreePrinter retainers_printer(&stream);
-  dest_tree_.ForEach(&retainers_printer);
-  printer_->PrintRetainers(cluster, stream);
-}
-
-}  // namespace
-
-
-// A helper class for building a retainers tree, that aggregates
-// all equivalent clusters.
-class RetainerTreeAggregator {
- public:
-  explicit RetainerTreeAggregator(ClustersCoarser* coarser)
-      : coarser_(coarser) {}
-  void Process(JSObjectsRetainerTree* input_tree) {
-    input_tree->ForEach(this);
-  }
-  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-  JSObjectsRetainerTree& output_tree() { return output_tree_; }
-
- private:
-  ClustersCoarser* coarser_;
-  JSObjectsRetainerTree output_tree_;
-};
-
-
-void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
-                                  JSObjectsClusterTree* tree) {
-  JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
-  if (eq.is_null()) return;
-  JSObjectsRetainerTree::Locator loc;
-  if (output_tree_.Insert(eq, &loc)) {
-    loc.set_value(new JSObjectsClusterTree());
-  }
-  RetainersAggregator retainers_aggregator(coarser_, loc.value());
-  tree->ForEach(&retainers_aggregator);
-}
-
-
 HeapProfiler::HeapProfiler()
     : snapshots_(new HeapSnapshotsCollection()),
       next_snapshot_uid_(1) {
@@ -333,29 +51,21 @@
 }
 
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 void HeapProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Isolate* isolate = Isolate::Current();
   if (isolate->heap_profiler() == NULL) {
     isolate->set_heap_profiler(new HeapProfiler());
   }
-#endif
 }
 
 
 void HeapProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Isolate* isolate = Isolate::Current();
   delete isolate->heap_profiler();
   isolate->set_heap_profiler(NULL);
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
                                          int type,
                                          v8::ActivityControl* control) {
@@ -409,14 +119,6 @@
       generation_completed = generator.GenerateSnapshot();
       break;
     }
-    case HeapSnapshot::kAggregated: {
-      HEAP->CollectAllGarbage(true);
-      AggregatedHeapSnapshot agg_snapshot;
-      AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
-      generator.GenerateSnapshot();
-      generator.FillHeapSnapshot(result);
-      break;
-    }
     default:
       UNREACHABLE();
   }
@@ -469,705 +171,4 @@
 }
 
 
-const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
-const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
-
-
-ConstructorHeapProfile::ConstructorHeapProfile()
-    : zscope_(DELETE_ON_EXIT) {
-}
-
-
-void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
-                                  const NumberAndSizeInfo& number_and_size) {
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
-  cluster.Print(&stream);
-  LOG(ISOLATE,
-      HeapSampleJSConstructorEvent(*(stream.ToCString()),
-                                   number_and_size.number(),
-                                   number_and_size.bytes()));
-}
-
-
-void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
-  Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
-}
-
-
-void ConstructorHeapProfile::PrintStats() {
-  js_objects_info_tree_.ForEach(this);
-}
-
-
-static const char* GetConstructorName(const char* name) {
-  return name[0] != '\0' ? name : "(anonymous)";
-}
-
-
-const char* JSObjectsCluster::GetSpecialCaseName() const {
-  if (constructor_ == FromSpecialCase(ROOTS)) {
-    return "(roots)";
-  } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
-    return "(global property)";
-  } else if (constructor_ == FromSpecialCase(CODE)) {
-    return "(code)";
-  } else if (constructor_ == FromSpecialCase(SELF)) {
-    return "(self)";
-  }
-  return NULL;
-}
-
-
-void JSObjectsCluster::Print(StringStream* accumulator) const {
-  ASSERT(!is_null());
-  const char* special_case_name = GetSpecialCaseName();
-  if (special_case_name != NULL) {
-    accumulator->Add(special_case_name);
-  } else {
-    SmartPointer<char> s_name(
-        constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
-    accumulator->Add("%s", GetConstructorName(*s_name));
-    if (instance_ != NULL) {
-      accumulator->Add(":%p", static_cast<void*>(instance_));
-    }
-  }
-}
-
-
-void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
-  if (!is_null()) {
-    Print(accumulator);
-  } else {
-    accumulator->Add("(null cluster)");
-  }
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
-    const JSObjectsCluster& cluster_)
-    : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
-    const ClustersCoarser::ClusterBackRefs& src)
-    : cluster(src.cluster), refs(src.refs.capacity()) {
-  refs.AddAll(src.refs);
-}
-
-
-inline ClustersCoarser::ClusterBackRefs&
-    ClustersCoarser::ClusterBackRefs::operator=(
-    const ClustersCoarser::ClusterBackRefs& src) {
-  if (this == &src) return *this;
-  cluster = src.cluster;
-  refs.Clear();
-  refs.AddAll(src.refs);
-  return *this;
-}
-
-
-inline int ClustersCoarser::ClusterBackRefs::Compare(
-    const ClustersCoarser::ClusterBackRefs& a,
-    const ClustersCoarser::ClusterBackRefs& b) {
-  int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
-  if (cmp != 0) return cmp;
-  if (a.refs.length() < b.refs.length()) return -1;
-  if (a.refs.length() > b.refs.length()) return 1;
-  for (int i = 0; i < a.refs.length(); ++i) {
-    int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
-    if (cmp != 0) return cmp;
-  }
-  return 0;
-}
-
-
-ClustersCoarser::ClustersCoarser()
-    : zscope_(DELETE_ON_EXIT),
-      sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
-      current_pair_(NULL),
-      current_set_(NULL),
-      self_(NULL) {
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
-                           JSObjectsClusterTree* tree) {
-  if (!cluster.can_be_coarsed()) return;
-  ClusterBackRefs pair(cluster);
-  ASSERT(current_pair_ == NULL);
-  current_pair_ = &pair;
-  current_set_ = new JSObjectsRetainerTree();
-  self_ = &cluster;
-  tree->ForEach(this);
-  sim_list_.Add(pair);
-  current_pair_ = NULL;
-  current_set_ = NULL;
-  self_ = NULL;
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
-                           const NumberAndSizeInfo& number_and_size) {
-  ASSERT(current_pair_ != NULL);
-  ASSERT(current_set_ != NULL);
-  ASSERT(self_ != NULL);
-  JSObjectsRetainerTree::Locator loc;
-  if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
-    current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
-    return;
-  }
-  JSObjectsCluster eq = GetCoarseEquivalent(cluster);
-  if (!eq.is_null()) {
-    if (current_set_->Find(eq, &loc)) return;
-    current_pair_->refs.Add(eq);
-    current_set_->Insert(eq, &loc);
-  } else {
-    current_pair_->refs.Add(cluster);
-  }
-}
-
-
-void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
-  int last_eq_clusters = -1;
-  for (int i = 0; i < kMaxPassesCount; ++i) {
-    sim_list_.Clear();
-    const int curr_eq_clusters = DoProcess(tree);
-    // If no new cluster equivalents discovered, abort processing.
-    if (last_eq_clusters == curr_eq_clusters) break;
-    last_eq_clusters = curr_eq_clusters;
-  }
-}
-
-
-int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
-  tree->ForEach(this);
-  sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
-  sim_list_.Sort(ClusterBackRefsCmp);
-  return FillEqualityTree();
-}
-
-
-JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
-    const JSObjectsCluster& cluster) {
-  if (!cluster.can_be_coarsed()) return JSObjectsCluster();
-  EqualityTree::Locator loc;
-  return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
-}
-
-
-bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
-  // Return true for coarsible clusters that have a non-identical equivalent.
-  if (!cluster.can_be_coarsed()) return false;
-  JSObjectsCluster eq = GetCoarseEquivalent(cluster);
-  return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
-}
-
-
-int ClustersCoarser::FillEqualityTree() {
-  int eq_clusters_count = 0;
-  int eq_to = 0;
-  bool first_added = false;
-  for (int i = 1; i < sim_list_.length(); ++i) {
-    if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
-      EqualityTree::Locator loc;
-      if (!first_added) {
-        // Add self-equivalence, if we have more than one item in this
-        // equivalence class.
-        eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
-        loc.set_value(sim_list_[eq_to].cluster);
-        first_added = true;
-      }
-      eq_tree_.Insert(sim_list_[i].cluster, &loc);
-      loc.set_value(sim_list_[eq_to].cluster);
-      ++eq_clusters_count;
-    } else {
-      eq_to = i;
-      first_added = false;
-    }
-  }
-  return eq_clusters_count;
-}
-
-
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
-const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
-const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
-    NULL;
-
-
-RetainerHeapProfile::RetainerHeapProfile()
-    : zscope_(DELETE_ON_EXIT),
-      aggregator_(NULL) {
-  JSObjectsCluster roots(JSObjectsCluster::ROOTS);
-  ReferencesExtractor extractor(roots, this);
-  HEAP->IterateRoots(&extractor, VISIT_ONLY_STRONG);
-}
-
-
-RetainerHeapProfile::~RetainerHeapProfile() {
-  delete aggregator_;
-}
-
-
-void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
-                                         HeapObject* ref) {
-  JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
-  if (ref_cluster.is_null()) return;
-  JSObjectsRetainerTree::Locator ref_loc;
-  if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
-    ref_loc.set_value(new JSObjectsClusterTree());
-  }
-  JSObjectsClusterTree* referenced_by = ref_loc.value();
-  Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
-}
-
-
-void RetainerHeapProfile::CollectStats(HeapObject* obj) {
-  const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
-  if (cluster.is_null()) return;
-  ReferencesExtractor extractor(cluster, this);
-  obj->Iterate(&extractor);
-}
-
-
-void RetainerHeapProfile::CoarseAndAggregate() {
-  coarser_.Process(&retainers_tree_);
-  ASSERT(aggregator_ == NULL);
-  aggregator_ = new RetainerTreeAggregator(&coarser_);
-  aggregator_->Process(&retainers_tree_);
-}
-
-
-void RetainerHeapProfile::DebugPrintStats(
-    RetainerHeapProfile::Printer* printer) {
-  // Print clusters that have no equivalents, aggregating their retainers.
-  AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
-  retainers_tree_.ForEach(&agg_printer);
-  // Print clusters that have equivalents.
-  SimpleRetainerTreePrinter s_printer(printer);
-  aggregator_->output_tree().ForEach(&s_printer);
-}
-
-
-void RetainerHeapProfile::PrintStats() {
-  RetainersPrinter printer;
-  DebugPrintStats(&printer);
-}
-
-
-//
-// HeapProfiler class implementation.
-//
-static void StackWeakReferenceCallback(Persistent<Value> object,
-                                       void* trace) {
-  DeleteArray(static_cast<Address*>(trace));
-  object.Dispose();
-}
-
-
-static void PrintProducerStackTrace(Object* obj, void* trace) {
-  if (!obj->IsJSObject()) return;
-  String* constructor = GetConstructorNameForHeapProfile(JSObject::cast(obj));
-  SmartPointer<char> s_name(
-      constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
-  LOG(ISOLATE,
-      HeapSampleJSProducerEvent(GetConstructorName(*s_name),
-                                reinterpret_cast<Address*>(trace)));
-}
-
-
-void HeapProfiler::WriteSample() {
-  Isolate* isolate = Isolate::Current();
-  LOG(isolate, HeapSampleBeginEvent("Heap", "allocated"));
-  LOG(isolate,
-      HeapSampleStats(
-          "Heap", "allocated", HEAP->CommittedMemory(), HEAP->SizeOfObjects()));
-
-  AggregatedHeapSnapshot snapshot;
-  AggregatedHeapSnapshotGenerator generator(&snapshot);
-  generator.GenerateSnapshot();
-
-  HistogramInfo* info = snapshot.info();
-  for (int i = FIRST_NONSTRING_TYPE;
-       i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
-       ++i) {
-    if (info[i].bytes() > 0) {
-      LOG(isolate,
-          HeapSampleItemEvent(info[i].name(), info[i].number(),
-                              info[i].bytes()));
-    }
-  }
-
-  snapshot.js_cons_profile()->PrintStats();
-  snapshot.js_retainer_profile()->PrintStats();
-
-  isolate->global_handles()->IterateWeakRoots(PrintProducerStackTrace,
-                                              StackWeakReferenceCallback);
-
-  LOG(isolate, HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-AggregatedHeapSnapshot::AggregatedHeapSnapshot()
-    : info_(NewArray<HistogramInfo>(
-        AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
-#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
-  INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
-#undef DEF_TYPE_NAME
-  info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
-      "STRING_TYPE");
-}
-
-
-AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
-  DeleteArray(info_);
-}
-
-
-AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
-    AggregatedHeapSnapshot* agg_snapshot)
-    : agg_snapshot_(agg_snapshot) {
-}
-
-
-void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
-  HistogramInfo* info = agg_snapshot_->info();
-  HistogramInfo& strings = info[kAllStringsType];
-  // Lump all the string types together.
-#define INCREMENT_SIZE(type, size, name, camel_name)   \
-  strings.increment_number(info[type].number());       \
-  strings.increment_bytes(info[type].bytes());
-  STRING_TYPE_LIST(INCREMENT_SIZE);
-#undef INCREMENT_SIZE
-}
-
-
-void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
-  InstanceType type = obj->map()->instance_type();
-  ASSERT(0 <= type && type <= LAST_TYPE);
-  agg_snapshot_->info()[type].increment_number(1);
-  agg_snapshot_->info()[type].increment_bytes(obj->Size());
-}
-
-
-void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    CollectStats(obj);
-    agg_snapshot_->js_cons_profile()->CollectStats(obj);
-    agg_snapshot_->js_retainer_profile()->CollectStats(obj);
-  }
-  CalculateStringsStats();
-  agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
-}
-
-
-class CountingConstructorHeapProfileIterator {
- public:
-  CountingConstructorHeapProfileIterator()
-      : entities_count_(0), children_count_(0) {
-  }
-
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size) {
-    ++entities_count_;
-    children_count_ += number_and_size.number();
-  }
-
-  int entities_count() { return entities_count_; }
-  int children_count() { return children_count_; }
-
- private:
-  int entities_count_;
-  int children_count_;
-};
-
-
-static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
-                                                 int* root_child_index,
-                                                 HeapEntry::Type type,
-                                                 const char* name,
-                                                 int count,
-                                                 int size,
-                                                 int children_count,
-                                                 int retainers_count) {
-  HeapEntry* entry = snapshot->AddEntry(
-      type, name, count, size, children_count, retainers_count);
-  ASSERT(entry != NULL);
-  snapshot->root()->SetUnidirElementReference(*root_child_index,
-                                              *root_child_index + 1,
-                                              entry);
-  *root_child_index = *root_child_index + 1;
-  return entry;
-}
-
-
-class AllocatingConstructorHeapProfileIterator {
- public:
-  AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
-                                  int* root_child_index)
-      : snapshot_(snapshot),
-        root_child_index_(root_child_index) {
-  }
-
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size) {
-    const char* name = cluster.GetSpecialCaseName();
-    if (name == NULL) {
-      name = snapshot_->collection()->names()->GetFunctionName(
-          cluster.constructor());
-    }
-    AddEntryFromAggregatedSnapshot(snapshot_,
-                                   root_child_index_,
-                                   HeapEntry::kObject,
-                                   name,
-                                   number_and_size.number(),
-                                   number_and_size.bytes(),
-                                   0,
-                                   0);
-  }
-
- private:
-  HeapSnapshot* snapshot_;
-  int* root_child_index_;
-};
-
-
-static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
-  return cluster.can_be_coarsed() ?
-      reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
-}
-
-
-static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
-  if (object->IsString()) {
-    return JSObjectsCluster(String::cast(object));
-  } else {
-    JSObject* js_obj = JSObject::cast(object);
-    String* constructor = GetConstructorNameForHeapProfile(
-        JSObject::cast(js_obj));
-    return JSObjectsCluster(constructor, object);
-  }
-}
-
-
-class CountingRetainersIterator {
- public:
-  CountingRetainersIterator(const JSObjectsCluster& child_cluster,
-                            HeapEntriesAllocator* allocator,
-                            HeapEntriesMap* map)
-      : child_(ClusterAsHeapObject(child_cluster)),
-        allocator_(allocator),
-        map_(map) {
-    if (map_->Map(child_) == NULL)
-      map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
-  }
-
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size) {
-    if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
-      map_->Pair(ClusterAsHeapObject(cluster),
-                 allocator_,
-                 HeapEntriesMap::kHeapEntryPlaceholder);
-    map_->CountReference(ClusterAsHeapObject(cluster), child_);
-  }
-
- private:
-  HeapObject* child_;
-  HeapEntriesAllocator* allocator_;
-  HeapEntriesMap* map_;
-};
-
-
-class AllocatingRetainersIterator {
- public:
-  AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
-                              HeapEntriesAllocator*,
-                              HeapEntriesMap* map)
-      : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
-    child_entry_ = map_->Map(child_);
-    ASSERT(child_entry_ != NULL);
-  }
-
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size) {
-    int child_index, retainer_index;
-    map_->CountReference(ClusterAsHeapObject(cluster),
-                         child_,
-                         &child_index,
-                         &retainer_index);
-    map_->Map(ClusterAsHeapObject(cluster))->SetIndexedReference(
-        HeapGraphEdge::kElement,
-        child_index,
-        number_and_size.number(),
-        child_entry_,
-        retainer_index);
-  }
-
- private:
-  HeapObject* child_;
-  HeapEntriesMap* map_;
-  HeapEntry* child_entry_;
-};
-
-
-template<class RetainersIterator>
-class AggregatingRetainerTreeIterator {
- public:
-  explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
-                                           HeapEntriesAllocator* allocator,
-                                           HeapEntriesMap* map)
-      : coarser_(coarser), allocator_(allocator), map_(map) {
-  }
-
-  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
-    if (coarser_ != NULL &&
-        !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
-    JSObjectsClusterTree* tree_to_iterate = tree;
-    ZoneScope zs(DELETE_ON_EXIT);
-    JSObjectsClusterTree dest_tree_;
-    if (coarser_ != NULL) {
-      RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
-      tree->ForEach(&retainers_aggregator);
-      tree_to_iterate = &dest_tree_;
-    }
-    RetainersIterator iterator(cluster, allocator_, map_);
-    tree_to_iterate->ForEach(&iterator);
-  }
-
- private:
-  ClustersCoarser* coarser_;
-  HeapEntriesAllocator* allocator_;
-  HeapEntriesMap* map_;
-};
-
-
-class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
- public:
-  AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
-                                  int* root_child_index)
-      : snapshot_(snapshot), root_child_index_(root_child_index) {
-  }
-  ~AggregatedRetainerTreeAllocator() { }
-
-  HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count) {
-    HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
-    JSObjectsCluster cluster = HeapObjectAsCluster(obj);
-    const char* name = cluster.GetSpecialCaseName();
-    if (name == NULL) {
-      name = snapshot_->collection()->names()->GetFunctionName(
-          cluster.constructor());
-    }
-    return AddEntryFromAggregatedSnapshot(
-        snapshot_, root_child_index_, HeapEntry::kObject, name,
-        0, 0, children_count, retainers_count);
-  }
-
- private:
-  HeapSnapshot* snapshot_;
-  int* root_child_index_;
-};
-
-
-template<class Iterator>
-void AggregatedHeapSnapshotGenerator::IterateRetainers(
-    HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
-  RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
-  AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
-      p->coarser(), allocator, entries_map);
-  p->retainers_tree()->ForEach(&agg_ret_iter_1);
-  AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
-      NULL, allocator, entries_map);
-  p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
-}
-
-
-void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
-  // Count the number of entities.
-  int histogram_entities_count = 0;
-  int histogram_children_count = 0;
-  int histogram_retainers_count = 0;
-  for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
-    if (agg_snapshot_->info()[i].bytes() > 0) {
-      ++histogram_entities_count;
-    }
-  }
-  CountingConstructorHeapProfileIterator counting_cons_iter;
-  agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
-  histogram_entities_count += counting_cons_iter.entities_count();
-  HeapEntriesMap entries_map;
-  int root_child_index = 0;
-  AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
-  IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
-  histogram_entities_count += entries_map.entries_count();
-  histogram_children_count += entries_map.total_children_count();
-  histogram_retainers_count += entries_map.total_retainers_count();
-
-  // Root entry references all other entries.
-  histogram_children_count += histogram_entities_count;
-  int root_children_count = histogram_entities_count;
-  ++histogram_entities_count;
-
-  // Allocate and fill entries in the snapshot, allocate references.
-  snapshot->AllocateEntries(histogram_entities_count,
-                            histogram_children_count,
-                            histogram_retainers_count);
-  snapshot->AddRootEntry(root_children_count);
-  for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
-    if (agg_snapshot_->info()[i].bytes() > 0) {
-      AddEntryFromAggregatedSnapshot(snapshot,
-                                     &root_child_index,
-                                     HeapEntry::kHidden,
-                                     agg_snapshot_->info()[i].name(),
-                                     agg_snapshot_->info()[i].number(),
-                                     agg_snapshot_->info()[i].bytes(),
-                                     0,
-                                     0);
-    }
-  }
-  AllocatingConstructorHeapProfileIterator alloc_cons_iter(
-      snapshot, &root_child_index);
-  agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
-  entries_map.AllocateEntries();
-
-  // Fill up references.
-  IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
-
-  snapshot->SetDominatorsToSelf();
-}
-
-
-void ProducerHeapProfile::Setup() {
-  can_log_ = true;
-}
-
-void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
-  ASSERT(FLAG_log_producers);
-  if (!can_log_) return;
-  int framesCount = 0;
-  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    ++framesCount;
-  }
-  if (framesCount == 0) return;
-  ++framesCount;  // Reserve place for the terminator item.
-  Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
-  int i = 0;
-  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    stack[i++] = it.frame()->pc();
-  }
-  stack[i] = NULL;
-  Handle<Object> handle = isolate_->global_handles()->Create(obj);
-  isolate_->global_handles()->MakeWeak(handle.location(),
-                                       static_cast<void*>(stack.start()),
-                                       StackWeakReferenceCallback);
-}
-
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
-
 } }  // namespace v8::internal
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 89a2e8a..b1bc91c 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -29,13 +29,10 @@
 #define V8_HEAP_PROFILER_H_
 
 #include "isolate.h"
-#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 class HeapSnapshot;
 class HeapSnapshotsCollection;
 
@@ -46,9 +43,6 @@
       profiler->call;                                                        \
     }                                                                        \
   } while (false)
-#else
-#define HEAP_PROFILE(heap, call) ((void) 0)
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 // The HeapProfiler writes data to the log files, which can be postprocessed
 // to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@@ -57,7 +51,6 @@
   static void Setup();
   static void TearDown();
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static HeapSnapshot* TakeSnapshot(const char* name,
                                     int type,
                                     v8::ActivityControl* control);
@@ -80,10 +73,6 @@
     return snapshots_->is_tracking_objects();
   }
 
-  // Obsolete interface.
-  // Write a single heap sample to the log file.
-  static void WriteSample();
-
  private:
   HeapProfiler();
   ~HeapProfiler();
@@ -98,299 +87,8 @@
   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
   List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
 };
 
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// JSObjectsCluster describes a group of JS objects that are
-// considered equivalent in terms of a particular profile.
-class JSObjectsCluster BASE_EMBEDDED {
- public:
-  // These special cases are used in retainer profile.
-  enum SpecialCase {
-    ROOTS = 1,
-    GLOBAL_PROPERTY = 2,
-    CODE = 3,
-    SELF = 100  // This case is used in ClustersCoarser only.
-  };
-
-  JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
-  explicit JSObjectsCluster(String* constructor)
-      : constructor_(constructor), instance_(NULL) {}
-  explicit JSObjectsCluster(SpecialCase special)
-      : constructor_(FromSpecialCase(special)), instance_(NULL) {}
-  JSObjectsCluster(String* constructor, Object* instance)
-      : constructor_(constructor), instance_(instance) {}
-
-  static int CompareConstructors(const JSObjectsCluster& a,
-                                 const JSObjectsCluster& b) {
-    // Strings are unique, so it is sufficient to compare their pointers.
-    return a.constructor_ == b.constructor_ ? 0
-        : (a.constructor_ < b.constructor_ ? -1 : 1);
-  }
-  static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
-    // Strings are unique, so it is sufficient to compare their pointers.
-    const int cons_cmp = CompareConstructors(a, b);
-    return cons_cmp == 0 ?
-        (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
-        : cons_cmp;
-  }
-  static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
-    return Compare(*a, *b);
-  }
-
-  bool is_null() const { return constructor_ == NULL; }
-  bool can_be_coarsed() const { return instance_ != NULL; }
-  String* constructor() const { return constructor_; }
-  Object* instance() const { return instance_; }
-
-  const char* GetSpecialCaseName() const;
-  void Print(StringStream* accumulator) const;
-  // Allows null clusters to be printed.
-  void DebugPrint(StringStream* accumulator) const;
-
- private:
-  static String* FromSpecialCase(SpecialCase special) {
-    // We use symbols that are illegal JS identifiers to identify special cases.
-    // Their actual value is irrelevant for us.
-    switch (special) {
-      case ROOTS: return HEAP->result_symbol();
-      case GLOBAL_PROPERTY: return HEAP->code_symbol();
-      case CODE: return HEAP->arguments_shadow_symbol();
-      case SELF: return HEAP->catch_var_symbol();
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-
-  String* constructor_;
-  Object* instance_;
-};
-
-
-struct JSObjectsClusterTreeConfig {
-  typedef JSObjectsCluster Key;
-  typedef NumberAndSizeInfo Value;
-  static const Key kNoKey;
-  static const Value kNoValue;
-  static int Compare(const Key& a, const Key& b) {
-    return Key::Compare(a, b);
-  }
-};
-typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
-
-
-// ConstructorHeapProfile is responsible for gathering and logging
-// "constructor profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class ConstructorHeapProfile BASE_EMBEDDED {
- public:
-  ConstructorHeapProfile();
-  virtual ~ConstructorHeapProfile() {}
-  void CollectStats(HeapObject* obj);
-  void PrintStats();
-
-  template<class Callback>
-  void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
-  // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
-  virtual void Call(const JSObjectsCluster& cluster,
-                    const NumberAndSizeInfo& number_and_size);
-
- private:
-  ZoneScope zscope_;
-  JSObjectsClusterTree js_objects_info_tree_;
-};
-
-
-// JSObjectsRetainerTree is used to represent retainer graphs using
-// adjacency list form:
-//
-//   Cluster -> (Cluster -> NumberAndSizeInfo)
-//
-// Subordinate splay trees are stored by pointer. They are zone-allocated,
-// so it isn't needed to manage their lifetime.
-//
-struct JSObjectsRetainerTreeConfig {
-  typedef JSObjectsCluster Key;
-  typedef JSObjectsClusterTree* Value;
-  static const Key kNoKey;
-  static const Value kNoValue;
-  static int Compare(const Key& a, const Key& b) {
-    return Key::Compare(a, b);
-  }
-};
-typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
-
-
-class ClustersCoarser BASE_EMBEDDED {
- public:
-  ClustersCoarser();
-
-  // Processes a given retainer graph.
-  void Process(JSObjectsRetainerTree* tree);
-
-  // Returns an equivalent cluster (can be the cluster itself).
-  // If the given cluster doesn't have an equivalent, returns null cluster.
-  JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
-  // Returns whether a cluster can be substitued with an equivalent and thus,
-  // skipped in some cases.
-  bool HasAnEquivalent(const JSObjectsCluster& cluster);
-
-  // Used by JSObjectsRetainerTree::ForEach.
-  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-  void Call(const JSObjectsCluster& cluster,
-            const NumberAndSizeInfo& number_and_size);
-
- private:
-  // Stores a list of back references for a cluster.
-  struct ClusterBackRefs {
-    explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
-    ClusterBackRefs(const ClusterBackRefs& src);
-    ClusterBackRefs& operator=(const ClusterBackRefs& src);
-
-    static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
-    void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
-    static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
-
-    JSObjectsCluster cluster;
-    ZoneList<JSObjectsCluster> refs;
-  };
-  typedef ZoneList<ClusterBackRefs> SimilarityList;
-
-  // A tree for storing a list of equivalents for a cluster.
-  struct ClusterEqualityConfig {
-    typedef JSObjectsCluster Key;
-    typedef JSObjectsCluster Value;
-    static const Key kNoKey;
-    static const Value kNoValue;
-    static int Compare(const Key& a, const Key& b) {
-      return Key::Compare(a, b);
-    }
-  };
-  typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
-
-  static int ClusterBackRefsCmp(const ClusterBackRefs* a,
-                                const ClusterBackRefs* b) {
-    return ClusterBackRefs::Compare(*a, *b);
-  }
-  int DoProcess(JSObjectsRetainerTree* tree);
-  int FillEqualityTree();
-
-  static const int kInitialBackrefsListCapacity = 2;
-  static const int kInitialSimilarityListCapacity = 2000;
-  // Number of passes for finding equivalents. Limits the length of paths
-  // that can be considered equivalent.
-  static const int kMaxPassesCount = 10;
-
-  ZoneScope zscope_;
-  SimilarityList sim_list_;
-  EqualityTree eq_tree_;
-  ClusterBackRefs* current_pair_;
-  JSObjectsRetainerTree* current_set_;
-  const JSObjectsCluster* self_;
-};
-
-
-// RetainerHeapProfile is responsible for gathering and logging
-// "retainer profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class RetainerTreeAggregator;
-
-class RetainerHeapProfile BASE_EMBEDDED {
- public:
-  class Printer {
-   public:
-    virtual ~Printer() {}
-    virtual void PrintRetainers(const JSObjectsCluster& cluster,
-                                const StringStream& retainers) = 0;
-  };
-
-  RetainerHeapProfile();
-  ~RetainerHeapProfile();
-
-  RetainerTreeAggregator* aggregator() { return aggregator_; }
-  ClustersCoarser* coarser() { return &coarser_; }
-  JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
-
-  void CollectStats(HeapObject* obj);
-  void CoarseAndAggregate();
-  void PrintStats();
-  void DebugPrintStats(Printer* printer);
-  void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
-
- private:
-  ZoneScope zscope_;
-  JSObjectsRetainerTree retainers_tree_;
-  ClustersCoarser coarser_;
-  RetainerTreeAggregator* aggregator_;
-};
-
-
-class AggregatedHeapSnapshot {
- public:
-  AggregatedHeapSnapshot();
-  ~AggregatedHeapSnapshot();
-
-  HistogramInfo* info() { return info_; }
-  ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
-  RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
-
- private:
-  HistogramInfo* info_;
-  ConstructorHeapProfile js_cons_profile_;
-  RetainerHeapProfile js_retainer_profile_;
-};
-
-
-class HeapEntriesMap;
-class HeapEntriesAllocator;
-
-class AggregatedHeapSnapshotGenerator {
- public:
-  explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
-  void GenerateSnapshot();
-  void FillHeapSnapshot(HeapSnapshot* snapshot);
-
-  static const int kAllStringsType = LAST_TYPE + 1;
-
- private:
-  void CalculateStringsStats();
-  void CollectStats(HeapObject* obj);
-  template<class Iterator>
-  void IterateRetainers(
-      HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
-
-  AggregatedHeapSnapshot* agg_snapshot_;
-};
-
-
-class ProducerHeapProfile {
- public:
-  void Setup();
-  void RecordJSObjectAllocation(Object* obj) {
-    if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
-  }
-
- private:
-  ProducerHeapProfile() : can_log_(false) { }
-
-  void DoRecordJSObjectAllocation(Object* obj);
-  Isolate* isolate_;
-  bool can_log_;
-
-  friend class Isolate;
-
-  DISALLOW_COPY_AND_ASSIGN(ProducerHeapProfile);
-};
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_PROFILER_H_
diff --git a/src/heap.cc b/src/heap.cc
index 2b6c11f..c91f769 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -33,14 +33,14 @@
 #include "codegen.h"
 #include "compilation-cache.h"
 #include "debug.h"
-#include "heap-profiler.h"
+#include "deoptimizer.h"
 #include "global-handles.h"
+#include "heap-profiler.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
 #include "runtime-profiler.h"
-#include "scanner-base.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
 #include "v8threads.h"
@@ -80,14 +80,14 @@
       reserved_semispace_size_(16*MB),
       max_semispace_size_(16*MB),
       initial_semispace_size_(1*MB),
-      max_old_generation_size_(1*GB),
+      max_old_generation_size_(1400*MB),
       max_executable_size_(256*MB),
       code_range_size_(512*MB),
 #else
       reserved_semispace_size_(8*MB),
       max_semispace_size_(8*MB),
       initial_semispace_size_(512*KB),
-      max_old_generation_size_(512*MB),
+      max_old_generation_size_(700*MB),
       max_executable_size_(128*MB),
       code_range_size_(0),
 #endif
@@ -96,6 +96,7 @@
 // Will be 4 * reserved_semispace_size_ to ensure that young
 // generation can be aligned to its size.
       survived_since_last_expansion_(0),
+      sweep_generation_(0),
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
@@ -107,6 +108,7 @@
       cell_space_(NULL),
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
+      gc_post_processing_depth_(0),
       mc_count_(0),
       ms_count_(0),
       gc_count_(0),
@@ -152,6 +154,15 @@
   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
 #endif
 
+  intptr_t max_virtual = OS::MaxVirtualMemory();
+
+  if (max_virtual > 0) {
+    if (code_range_size_ > 0) {
+      // Reserve no more than 1/8 of the memory for the code range.
+      code_range_size_ = Min(code_range_size_, max_virtual >> 3);
+    }
+  }
+
   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
   global_contexts_list_ = NULL;
   mark_compact_collector_.heap_ = this;
@@ -281,12 +292,11 @@
 
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 void Heap::ReportStatisticsBeforeGC() {
   // Heap::ReportHeapStatistics will also log NewSpace statistics when
-  // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set.  The
-  // following logic is used to avoid double logging.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+  // compiled --log-gc is set.  The following logic is used to avoid
+  // double logging.
+#ifdef DEBUG
   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
   if (FLAG_heap_stats) {
     ReportHeapStatistics("Before GC");
@@ -294,23 +304,16 @@
     new_space_.ReportStatistics();
   }
   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
-#elif defined(DEBUG)
-  if (FLAG_heap_stats) {
-    new_space_.CollectStatistics();
-    ReportHeapStatistics("Before GC");
-    new_space_.ClearHistograms();
-  }
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
+#else
   if (FLAG_log_gc) {
     new_space_.CollectStatistics();
     new_space_.ReportStatistics();
     new_space_.ClearHistograms();
   }
-#endif
+#endif  // DEBUG
 }
 
 
-#if defined(ENABLE_LOGGING_AND_PROFILING)
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
   PrintF("Memory allocator,   used: %8" V8_PTR_PREFIX "d"
@@ -356,7 +359,6 @@
          lo_space_->Size(),
          lo_space_->Available());
 }
-#endif
 
 
 // TODO(1238405): Combine the infrastructure for --heap-stats and
@@ -364,20 +366,17 @@
 void Heap::ReportStatisticsAfterGC() {
   // Similar to the before GC, we use some complicated logic to ensure that
   // NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
   if (FLAG_heap_stats) {
     new_space_.CollectStatistics();
     ReportHeapStatistics("After GC");
   } else if (FLAG_log_gc) {
     new_space_.ReportStatistics();
   }
-#elif defined(DEBUG)
-  if (FLAG_heap_stats) ReportHeapStatistics("After GC");
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
+#else
   if (FLAG_log_gc) new_space_.ReportStatistics();
-#endif
+#endif  // DEBUG
 }
-#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 
 
 void Heap::GarbageCollectionPrologue() {
@@ -394,11 +393,11 @@
   }
 
   if (FLAG_gc_verbose) Print();
-#endif
+#endif  // DEBUG
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
   ReportStatisticsBeforeGC();
-#endif
+#endif  // DEBUG
 
   LiveObjectList::GCPrologue();
 }
@@ -435,12 +434,12 @@
       symbol_table()->Capacity());
   isolate_->counters()->number_of_symbols()->Set(
       symbol_table()->NumberOfElements());
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
   ReportStatisticsAfterGC();
-#endif
+#endif  // DEBUG
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->AfterGarbageCollection();
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
 }
 
 
@@ -513,11 +512,6 @@
     GarbageCollectionEpilogue();
   }
 
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (FLAG_log_gc) HeapProfiler::WriteSample();
-#endif
-
   return next_gc_likely_to_collect_more;
 }
 
@@ -736,7 +730,7 @@
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
-
+    sweep_generation_++;
     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
         IsStableOrIncreasingSurvivalTrend();
 
@@ -771,12 +765,13 @@
 
   isolate_->counters()->objs_since_last_young()->Set(0);
 
-  if (collector == MARK_COMPACTOR) {
-    DisableAssertNoAllocation allow_allocation;
+  gc_post_processing_depth_++;
+  { DisableAssertNoAllocation allow_allocation;
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     next_gc_likely_to_collect_more =
-        isolate_->global_handles()->PostGarbageCollectionProcessing();
+        isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
   }
+  gc_post_processing_depth_--;
 
   // Update relocatables.
   Relocatable::PostGarbageCollectionProcessing();
@@ -846,12 +841,16 @@
   isolate_->keyed_lookup_cache()->Clear();
   isolate_->context_slot_cache()->Clear();
   isolate_->descriptor_lookup_cache()->Clear();
+  StringSplitCache::Clear(string_split_cache());
 
   isolate_->compilation_cache()->MarkCompactPrologue();
 
   CompletelyClearInstanceofCache();
 
   if (is_compacting) FlushNumberStringCache();
+  if (FLAG_cleanup_code_caches_at_gc) {
+    polymorphic_code_cache()->set_cache(undefined_value());
+  }
 
   ClearNormalizedMapCaches();
 }
@@ -935,6 +934,12 @@
 }
 
 
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+  return heap->InNewSpace(*p) &&
+      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -1029,6 +1034,12 @@
   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
 
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+      &IsUnscavengedHeapObject);
+  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+      &scavenge_visitor);
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
 
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1269,6 +1280,7 @@
     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
     table_.Register(kVisitByteArray, &EvacuateByteArray);
     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
 
     table_.Register(kVisitGlobalContext,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
@@ -1278,10 +1290,22 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<ConsString::kSize>);
 
+    table_.Register(kVisitSlicedString,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                        template VisitSpecialized<SlicedString::kSize>);
+
     table_.Register(kVisitSharedFunctionInfo,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<SharedFunctionInfo::kSize>);
 
+    table_.Register(kVisitJSWeakMap,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                    Visit);
+
+    table_.Register(kVisitJSRegExp,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                    Visit);
+
     table_.Register(kVisitJSFunction,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<JSFunction::kSize>);
@@ -1307,15 +1331,12 @@
   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
     bool should_record = false;
 #ifdef DEBUG
     should_record = FLAG_heap_stats;
 #endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
     should_record = should_record || FLAG_log_gc;
-#endif
     if (should_record) {
       if (heap->new_space()->Contains(obj)) {
         heap->new_space()->RecordAllocation(obj);
@@ -1324,7 +1345,6 @@
       }
     }
   }
-#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 
   // Helper function used by CopyObject to copy a source object to an
   // allocated target object and update the forwarding pointer in the source
@@ -1340,21 +1360,17 @@
     source->set_map_word(MapWord::FromForwardingAddress(target));
 
     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
       // Update NewSpace stats if necessary.
       RecordCopiedObject(heap, target);
-#endif
       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
-#if defined(ENABLE_LOGGING_AND_PROFILING)
       Isolate* isolate = heap->isolate();
       if (isolate->logger()->is_logging() ||
-          isolate->cpu_profiler()->is_profiling()) {
+          CpuProfiler::is_profiling(isolate)) {
         if (target->IsSharedFunctionInfo()) {
           PROFILE(isolate, SharedFunctionInfoMoveEvent(
               source->address(), target->address()));
         }
       }
-#endif
     }
 
     return target;
@@ -1416,6 +1432,18 @@
   }
 
 
+  static inline void EvacuateFixedDoubleArray(Map* map,
+                                              HeapObject** slot,
+                                              HeapObject* object) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    int object_size = FixedDoubleArray::SizeFor(length);
+    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
+                                              slot,
+                                              object,
+                                              object_size);
+  }
+
+
   static inline void EvacuateByteArray(Map* map,
                                        HeapObject** slot,
                                        HeapObject* object) {
@@ -1523,8 +1551,8 @@
     return;
   }
 
-  if (isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
+  if (isolate()->logger()->is_logging() |
+      CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
        isolate()->heap_profiler()->is_profiling())) {
     // If one of the isolates is doing scavenge at this moment of time
@@ -1593,12 +1621,13 @@
   map->set_instance_size(instance_size);
   map->set_inobject_properties(0);
   map->set_pre_allocated_property_fields(0);
-  map->set_instance_descriptors(empty_descriptor_array());
+  map->init_instance_descriptors();
   map->set_code_cache(empty_fixed_array());
   map->set_prototype_transitions(empty_fixed_array());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
-  map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
+  map->set_bit_field2(1 << Map::kIsExtensible);
+  map->set_elements_kind(FAST_ELEMENTS);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1622,6 +1651,11 @@
 }
 
 
+MaybeObject* Heap::AllocatePolymorphicCodeCache() {
+  return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
+}
+
+
 const Heap::StringTypeTable Heap::string_type_table[] = {
 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   {type, size, k##camel_name##MapRootIndex},
@@ -1686,15 +1720,15 @@
   set_empty_descriptor_array(DescriptorArray::cast(obj));
 
   // Fix the instance_descriptors for the existing maps.
-  meta_map()->set_instance_descriptors(empty_descriptor_array());
+  meta_map()->init_instance_descriptors();
   meta_map()->set_code_cache(empty_fixed_array());
   meta_map()->set_prototype_transitions(empty_fixed_array());
 
-  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+  fixed_array_map()->init_instance_descriptors();
   fixed_array_map()->set_code_cache(empty_fixed_array());
   fixed_array_map()->set_prototype_transitions(empty_fixed_array());
 
-  oddball_map()->set_instance_descriptors(empty_descriptor_array());
+  oddball_map()->init_instance_descriptors();
   oddball_map()->set_code_cache(empty_fixed_array());
   oddball_map()->set_prototype_transitions(empty_fixed_array());
 
@@ -1715,15 +1749,21 @@
   set_fixed_cow_array_map(Map::cast(obj));
   ASSERT(fixed_array_map() != fixed_cow_array_map());
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_serialized_scope_info_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_heap_number_map(Map::cast(obj));
 
-  { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
+  { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_proxy_map(Map::cast(obj));
+  set_foreign_map(Map::cast(obj));
 
   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
     const StringTypeTable& entry = string_type_table[i];
@@ -1747,6 +1787,12 @@
   Map::cast(obj)->set_is_undetectable();
 
   { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_fixed_double_array_map(Map::cast(obj));
+
+  { MaybeObject* maybe_obj =
         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1805,6 +1851,18 @@
   }
   set_external_float_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_non_strict_arguments_elements_map(Map::cast(obj));
+
+  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
+                                         ExternalArray::kAlignedSize);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_external_double_array_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1844,7 +1902,7 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_context_map(Map::cast(obj));
+  set_function_context_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj =
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
@@ -1856,6 +1914,18 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
+  set_with_context_map(Map::cast(obj));
+
+  { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_block_context_map(Map::cast(obj));
+
+  { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
   Map* global_context_map = Map::cast(obj);
   global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
   set_global_context_map(global_context_map);
@@ -2102,26 +2172,31 @@
   }
   hidden_symbol_ = String::cast(obj);
 
-  // Allocate the proxy for __proto__.
+  // Allocate the foreign for __proto__.
   { MaybeObject* maybe_obj =
-        AllocateProxy((Address) &Accessors::ObjectPrototype);
+        AllocateForeign((Address) &Accessors::ObjectPrototype);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_prototype_accessors(Proxy::cast(obj));
+  set_prototype_accessors(Foreign::cast(obj));
 
   // Allocate the code_stubs dictionary. The initial size is set to avoid
   // expanding the dictionary during bootstrapping.
-  { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
+  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_code_stubs(NumberDictionary::cast(obj));
+  set_code_stubs(UnseededNumberDictionary::cast(obj));
 
   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   // is set to avoid expanding the dictionary during bootstrapping.
-  { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
+  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_non_monomorphic_cache(NumberDictionary::cast(obj));
+  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
+
+  { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
 
   set_instanceof_cache_function(Smi::FromInt(0));
   set_instanceof_cache_map(Smi::FromInt(0));
@@ -2148,6 +2223,13 @@
   }
   set_single_character_string_cache(FixedArray::cast(obj));
 
+  // Allocate cache for string split.
+  { MaybeObject* maybe_obj =
+        AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_string_split_cache(FixedArray::cast(obj));
+
   // Allocate cache for external strings pointing to native source code.
   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2173,6 +2255,75 @@
 }
 
 
+Object* StringSplitCache::Lookup(
+    FixedArray* cache, String* string, String* pattern) {
+  if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
+  uint32_t hash = string->Hash();
+  uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+      ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == string &&
+      cache->get(index + kPatternOffset) == pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
+  if (cache->get(index + kStringOffset) == string &&
+      cache->get(index + kPatternOffset) == pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  return Smi::FromInt(0);
+}
+
+
+void StringSplitCache::Enter(Heap* heap,
+                             FixedArray* cache,
+                             String* string,
+                             String* pattern,
+                             FixedArray* array) {
+  if (!string->IsSymbol() || !pattern->IsSymbol()) return;
+  uint32_t hash = string->Hash();
+  uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+      ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+    cache->set(index + kStringOffset, string);
+    cache->set(index + kPatternOffset, pattern);
+    cache->set(index + kArrayOffset, array);
+  } else {
+    uint32_t index2 =
+        ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
+    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+      cache->set(index2 + kStringOffset, string);
+      cache->set(index2 + kPatternOffset, pattern);
+      cache->set(index2 + kArrayOffset, array);
+    } else {
+      cache->set(index2 + kStringOffset, Smi::FromInt(0));
+      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
+      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+      cache->set(index + kStringOffset, string);
+      cache->set(index + kPatternOffset, pattern);
+      cache->set(index + kArrayOffset, array);
+    }
+  }
+  if (array->length() < 100) {  // Limit how many new symbols we want to make.
+    for (int i = 0; i < array->length(); i++) {
+      String* str = String::cast(array->get(i));
+      Object* symbol;
+      MaybeObject* maybe_symbol = heap->LookupSymbol(str);
+      if (maybe_symbol->ToObject(&symbol)) {
+        array->set(i, symbol);
+      }
+    }
+  }
+  array->set_map(heap->fixed_cow_array_map());
+}
+
+
+void StringSplitCache::Clear(FixedArray* cache) {
+  for (int i = 0; i < kStringSplitCacheSize; i++) {
+    cache->set(i, Smi::FromInt(0));
+  }
+}
+
+
 MaybeObject* Heap::InitializeNumberStringCache() {
   // Compute the size of the number string cache based on the max heap size.
   // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
@@ -2293,6 +2444,8 @@
       return kExternalUnsignedIntArrayMapRootIndex;
     case kExternalFloatArray:
       return kExternalFloatArrayMapRootIndex;
+    case kExternalDoubleArray:
+      return kExternalDoubleArrayMapRootIndex;
     case kExternalPixelArray:
       return kExternalPixelArrayMapRootIndex;
     default:
@@ -2323,54 +2476,56 @@
 }
 
 
-MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
-  // Statically ensure that it is safe to allocate proxies in paged spaces.
-  STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
+MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate foreigns in paged spaces.
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   Object* result;
-  { MaybeObject* maybe_result = Allocate(proxy_map(), space);
+  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  Proxy::cast(result)->set_proxy(proxy);
+  Foreign::cast(result)->set_address(address);
   return result;
 }
 
 
 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
-  Object* result;
-  { MaybeObject* maybe_result =
-        Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  SharedFunctionInfo* share;
+  MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
+  if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
 
-  SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
+  // Set pointer fields.
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
   share->set_scope_info(SerializedScopeInfo::Empty());
-  Code* construct_stub = isolate_->builtins()->builtin(
-      Builtins::kJSConstructStubGeneric);
+  Code* construct_stub =
+      isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   share->set_construct_stub(construct_stub);
-  share->set_expected_nof_properties(0);
-  share->set_length(0);
-  share->set_formal_parameter_count(0);
   share->set_instance_class_name(Object_symbol());
   share->set_function_data(undefined_value());
   share->set_script(undefined_value());
-  share->set_start_position_and_type(0);
   share->set_debug_info(undefined_value());
   share->set_inferred_name(empty_string());
-  share->set_compiler_hints(0);
-  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
   share->set_initial_map(undefined_value());
-  share->set_this_property_assignments_count(0);
   share->set_this_property_assignments(undefined_value());
-  share->set_opt_count(0);
+  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
+
+  // Set integer fields (smi or int, depending on the architecture).
+  share->set_length(0);
+  share->set_formal_parameter_count(0);
+  share->set_expected_nof_properties(0);
   share->set_num_literals(0);
+  share->set_start_position_and_type(0);
   share->set_end_position(0);
   share->set_function_token_position(0);
-  return result;
+  // All compiler hints default to false or 0.
+  share->set_compiler_hints(0);
+  share->set_this_property_assignments_count(0);
+  share->set_opt_count(0);
+
+  return share;
 }
 
 
@@ -2489,6 +2644,8 @@
 
   // If the resulting string is small make a flat string.
   if (length < String::kMinNonFlatLength) {
+    // Note that neither of the two inputs can be a slice because:
+    STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
     ASSERT(first->IsFlat());
     ASSERT(second->IsFlat());
     if (is_ascii) {
@@ -2560,12 +2717,13 @@
 
 
 MaybeObject* Heap::AllocateSubString(String* buffer,
-                                int start,
-                                int end,
-                                PretenureFlag pretenure) {
+                                     int start,
+                                     int end,
+                                     PretenureFlag pretenure) {
   int length = end - start;
-
-  if (length == 1) {
+  if (length == 0) {
+    return empty_string();
+  } else if (length == 1) {
     return LookupSingleCharacterStringFromCode(buffer->Get(start));
   } else if (length == 2) {
     // Optimization for 2-byte strings often used as keys in a decompression
@@ -2579,24 +2737,69 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  Object* result;
-  { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                   ? AllocateRawAsciiString(length, pretenure )
-                   : AllocateRawTwoByteString(length, pretenure);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  String* string_result = String::cast(result);
-  // Copy the characters into the new object.
-  if (buffer->IsAsciiRepresentation()) {
-    ASSERT(string_result->IsAsciiRepresentation());
-    char* dest = SeqAsciiString::cast(string_result)->GetChars();
-    String::WriteToFlat(buffer, dest, start, end);
-  } else {
-    ASSERT(string_result->IsTwoByteRepresentation());
-    uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
-    String::WriteToFlat(buffer, dest, start, end);
+  // TODO(1626): For now slicing external strings is not supported.  However,
+  // a flat cons string can have an external string as first part in some cases.
+  // Therefore we have to single out this case as well.
+  if (!FLAG_string_slices ||
+      (buffer->IsConsString() &&
+        (!buffer->IsFlat() ||
+         !ConsString::cast(buffer)->first()->IsSeqString())) ||
+      buffer->IsExternalString() ||
+      length < SlicedString::kMinLength ||
+      pretenure == TENURED) {
+    Object* result;
+    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
+                     ? AllocateRawAsciiString(length, pretenure)
+                     : AllocateRawTwoByteString(length, pretenure);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    String* string_result = String::cast(result);
+    // Copy the characters into the new object.
+    if (buffer->IsAsciiRepresentation()) {
+      ASSERT(string_result->IsAsciiRepresentation());
+      char* dest = SeqAsciiString::cast(string_result)->GetChars();
+      String::WriteToFlat(buffer, dest, start, end);
+    } else {
+      ASSERT(string_result->IsTwoByteRepresentation());
+      uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
+      String::WriteToFlat(buffer, dest, start, end);
+    }
+    return result;
   }
 
+  ASSERT(buffer->IsFlat());
+  ASSERT(!buffer->IsExternalString());
+#if DEBUG
+  buffer->StringVerify();
+#endif
+
+  Object* result;
+  { Map* map = buffer->IsAsciiRepresentation()
+                 ? sliced_ascii_string_map()
+                 : sliced_string_map();
+    MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+
+  AssertNoAllocation no_gc;
+  SlicedString* sliced_string = SlicedString::cast(result);
+  sliced_string->set_length(length);
+  sliced_string->set_hash_field(String::kEmptyHashField);
+  if (buffer->IsConsString()) {
+    ConsString* cons = ConsString::cast(buffer);
+    ASSERT(cons->second()->length() == 0);
+    sliced_string->set_parent(cons->first());
+    sliced_string->set_offset(start);
+  } else if (buffer->IsSlicedString()) {
+    // Prevent nesting sliced strings.
+    SlicedString* parent_slice = SlicedString::cast(buffer);
+    sliced_string->set_parent(parent_slice->parent());
+    sliced_string->set_offset(start + parent_slice->offset());
+  } else {
+    sliced_string->set_parent(buffer);
+    sliced_string->set_offset(start);
+  }
+  ASSERT(sliced_string->parent()->IsSeqString());
   return result;
 }
 
@@ -2792,6 +2995,7 @@
     code->set_check_type(RECEIVER_MAP_CHECK);
   }
   code->set_deoptimization_data(empty_fixed_array());
+  code->set_next_code_flushing_candidate(undefined_value());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -2904,9 +3108,6 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   HeapObject::cast(result)->set_map(map);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
-#endif
   return result;
 }
 
@@ -3204,6 +3405,50 @@
 }
 
 
+MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
+  // Allocate map.
+  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+  // maps. Will probably depend on the identity of the handler object, too.
+  Map* map;
+  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
+  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  map->set_prototype(prototype);
+
+  // Allocate the proxy object.
+  JSProxy* result;
+  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+  if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
+  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
+  result->set_handler(handler);
+  return result;
+}
+
+
+MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
+                                           Object* call_trap,
+                                           Object* construct_trap,
+                                           Object* prototype) {
+  // Allocate map.
+  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+  // maps. Will probably depend on the identity of the handler object, too.
+  Map* map;
+  MaybeObject* maybe_map_obj =
+      AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
+  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  map->set_prototype(prototype);
+
+  // Allocate the proxy object.
+  JSFunctionProxy* result;
+  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+  if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
+  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
+  result->set_handler(handler);
+  result->set_call_trap(call_trap);
+  result->set_construct_trap(construct_trap);
+  return result;
+}
+
+
 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
   ASSERT(constructor->has_initial_map());
   Map* map = constructor->initial_map();
@@ -3267,7 +3512,7 @@
 
   // Setup the global object as a normalized object.
   global->set_map(new_map);
-  global->map()->set_instance_descriptors(empty_descriptor_array());
+  global->map()->clear_instance_descriptors();
   global->set_properties(dictionary);
 
   // Make sure result is a global object with properties in dictionary.
@@ -3314,17 +3559,22 @@
               object_size);
   }
 
-  FixedArray* elements = FixedArray::cast(source->elements());
+  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
   if (elements->length() > 0) {
     Object* elem;
-    { MaybeObject* maybe_elem =
-          (elements->map() == fixed_cow_array_map()) ?
-          elements : CopyFixedArray(elements);
+    { MaybeObject* maybe_elem;
+      if (elements->map() == fixed_cow_array_map()) {
+        maybe_elem = FixedArray::cast(elements);
+      } else if (source->HasFastDoubleElements()) {
+        maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+      } else {
+        maybe_elem = CopyFixedArray(FixedArray::cast(elements));
+      }
       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
     }
-    JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
+    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
   }
   // Update properties if necessary.
   if (properties->length() > 0) {
@@ -3335,13 +3585,65 @@
     JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
   }
   // Return the new clone.
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
-#endif
   return clone;
 }
 
 
+MaybeObject* Heap::ReinitializeJSReceiver(
+    JSReceiver* object, InstanceType type, int size) {
+  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+
+  // Allocate fresh map.
+  // TODO(rossberg): Once we optimize proxies, cache these maps.
+  Map* map;
+  MaybeObject* maybe_map_obj = AllocateMap(type, size);
+  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+
+  // Check that the receiver has at least the size of the fresh object.
+  int size_difference = object->map()->instance_size() - map->instance_size();
+  ASSERT(size_difference >= 0);
+
+  map->set_prototype(object->map()->prototype());
+
+  // Allocate the backing storage for the properties.
+  int prop_size = map->unused_property_fields() - map->inobject_properties();
+  Object* properties;
+  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
+    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+  }
+
+  // Reset the map for the object.
+  object->set_map(map);
+
+  // Reinitialize the object from the constructor map.
+  InitializeJSObjectFromMap(JSObject::cast(object),
+                            FixedArray::cast(properties), map);
+
+  // Functions require some minimal initialization.
+  if (type == JS_FUNCTION_TYPE) {
+    String* name;
+    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe_name->To<String>(&name)) return maybe_name;
+    SharedFunctionInfo* shared;
+    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
+    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
+    JSFunction* func;
+    MaybeObject* maybe_func =
+        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
+    func->set_context(isolate()->context()->global_context());
+  }
+
+  // Put in filler if the new object is smaller than the old.
+  if (size_difference > 0) {
+    CreateFillerObjectAt(
+        object->address() + map->instance_size(), size_difference);
+  }
+
+  return object;
+}
+
+
 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
                                              JSGlobalProxy* object) {
   ASSERT(constructor->has_initial_map());
@@ -3370,6 +3672,9 @@
 
 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
                                            PretenureFlag pretenure) {
+  if (string.length() == 1) {
+    return Heap::LookupSingleCharacterStringFromCode(string[0]);
+  }
   Object* result;
   { MaybeObject* maybe_result =
         AllocateRawAsciiString(string.length(), pretenure);
@@ -3656,6 +3961,23 @@
 }
 
 
+MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
+                                               Map* map) {
+  int len = src->length();
+  Object* obj;
+  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  HeapObject* dst = HeapObject::cast(obj);
+  dst->set_map(map);
+  CopyBlock(
+      dst->address() + FixedDoubleArray::kLengthOffset,
+      src->address() + FixedDoubleArray::kLengthOffset,
+      FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+  return obj;
+}
+
+
 MaybeObject* Heap::AllocateFixedArray(int length) {
   ASSERT(length >= 0);
   if (length == 0) return empty_fixed_array();
@@ -3752,6 +4074,62 @@
 }
 
 
+MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
+  int size = FixedDoubleArray::SizeFor(0);
+  Object* result;
+  { MaybeObject* maybe_result =
+        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  // Initialize the object.
+  reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+      fixed_double_array_map());
+  reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
+  return result;
+}
+
+
+MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
+    int length,
+    PretenureFlag pretenure) {
+  if (length == 0) return empty_fixed_double_array();
+
+  Object* obj;
+  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+
+  reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+  FixedDoubleArray::cast(obj)->set_length(length);
+  return obj;
+}
+
+
+MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
+                                               PretenureFlag pretenure) {
+  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+    return Failure::OutOfMemoryException();
+  }
+
+  AllocationSpace space =
+      (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+  int size = FixedDoubleArray::SizeFor(length);
+  if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
+    // Too big for new space.
+    space = LO_SPACE;
+  } else if (space == OLD_DATA_SPACE &&
+             size > MaxObjectSizeInPagedSpace()) {
+    // Too big for old data space.
+    space = LO_SPACE;
+  }
+
+  AllocationSpace retry_space =
+      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+
+  return AllocateRaw(size, space, retry_space);
+}
+
+
 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
   Object* result;
   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
@@ -3784,38 +4162,80 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(context_map());
+  context->set_map(function_context_map());
   context->set_closure(function);
-  context->set_fcontext(context);
-  context->set_previous(NULL);
+  context->set_previous(function->context());
   context->set_extension(NULL);
   context->set_global(function->context()->global());
-  ASSERT(!context->IsGlobalContext());
-  ASSERT(context->is_function_context());
-  ASSERT(result->IsContext());
-  return result;
+  return context;
 }
 
 
-MaybeObject* Heap::AllocateWithContext(Context* previous,
-                                       JSObject* extension,
-                                       bool is_catch_context) {
+MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
+                                        Context* previous,
+                                        String* name,
+                                        Object* thrown_object) {
+  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+  Object* result;
+  { MaybeObject* maybe_result =
+        AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map(catch_context_map());
+  context->set_closure(function);
+  context->set_previous(previous);
+  context->set_extension(name);
+  context->set_global(previous->global());
+  context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
+  return context;
+}
+
+
+MaybeObject* Heap::AllocateWithContext(JSFunction* function,
+                                       Context* previous,
+                                       JSObject* extension) {
   Object* result;
   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(is_catch_context ? catch_context_map() :
-      context_map());
-  context->set_closure(previous->closure());
-  context->set_fcontext(previous->fcontext());
+  context->set_map(with_context_map());
+  context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(extension);
   context->set_global(previous->global());
-  ASSERT(!context->IsGlobalContext());
-  ASSERT(!context->is_function_context());
-  ASSERT(result->IsContext());
-  return result;
+  return context;
+}
+
+
+MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
+                                        Context* previous,
+                                        SerializedScopeInfo* scope_info) {
+  Object* result;
+  { MaybeObject* maybe_result =
+        AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map(block_context_map());
+  context->set_closure(function);
+  context->set_previous(previous);
+  context->set_extension(scope_info);
+  context->set_global(previous->global());
+  return context;
+}
+
+
+MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
+  Object* result;
+  { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  SerializedScopeInfo* scope_info =
+      reinterpret_cast<SerializedScopeInfo*>(result);
+  scope_info->set_map(serialized_scope_info_map());
+  return scope_info;
 }
 
 
@@ -4139,6 +4559,26 @@
 }
 
 
+MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                     int from,
+                                     int length) {
+  Object* symbol = NULL;
+  Object* new_table;
+  { MaybeObject* maybe_new_table =
+        symbol_table()->LookupSubStringAsciiSymbol(string,
+                                                   from,
+                                                   length,
+                                                   &symbol);
+    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
+  }
+  // Can't use set_symbol_table because SymbolTable::cast knows that
+  // SymbolTable is a singleton and checks for identity.
+  roots_[kSymbolTableRootIndex] = new_table;
+  ASSERT(symbol != NULL);
+  return symbol;
+}
+
+
 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
   Object* symbol = NULL;
   Object* new_table;
@@ -4461,7 +4901,8 @@
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
   v->Synchronize("symbol_table");
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE &&
+      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
@@ -4485,6 +4926,9 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->Iterate(v);
+  if (isolate_->deoptimizer_data() != NULL) {
+    isolate_->deoptimizer_data()->Iterate(v);
+  }
 #endif
   v->Synchronize("debug");
   isolate_->compilation_cache()->Iterate(v);
@@ -4497,16 +4941,24 @@
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE &&
+      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
   v->Synchronize("builtins");
 
   // Iterate over global handles.
-  if (mode == VISIT_ONLY_STRONG) {
-    isolate_->global_handles()->IterateStrongRoots(v);
-  } else {
-    isolate_->global_handles()->IterateAllRoots(v);
+  switch (mode) {
+    case VISIT_ONLY_STRONG:
+      isolate_->global_handles()->IterateStrongRoots(v);
+      break;
+    case VISIT_ALL_IN_SCAVENGE:
+      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
+      break;
+    case VISIT_ALL_IN_SWEEP_NEWSPACE:
+    case VISIT_ALL:
+      isolate_->global_handles()->IterateAllRoots(v);
+      break;
   }
   v->Synchronize("globalhandles");
 
@@ -4910,6 +5362,17 @@
   if (lo_space_ == NULL) return false;
   if (!lo_space_->Setup()) return false;
 
+  // Setup the seed that is used to randomize the string hash function.
+  ASSERT(hash_seed() == 0);
+  if (FLAG_randomize_hashes) {
+    if (FLAG_hash_seed == 0) {
+      set_hash_seed(
+          Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
+    } else {
+      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+    }
+  }
+
   if (create_heap_objects) {
     // Create initial maps.
     if (!CreateInitialMaps()) return false;
@@ -4924,11 +5387,6 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  // This should be called only after initial objects have been created.
-  isolate_->producer_heap_profile()->Setup();
-#endif
-
   return true;
 }
 
@@ -5022,28 +5480,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void Heap::Protect() {
-  if (HasBeenSetup()) {
-    AllSpaces spaces;
-    for (Space* space = spaces.next(); space != NULL; space = spaces.next())
-      space->Protect();
-  }
-}
-
-
-void Heap::Unprotect() {
-  if (HasBeenSetup()) {
-    AllSpaces spaces;
-    for (Space* space = spaces.next(); space != NULL; space = spaces.next())
-      space->Unprotect();
-  }
-}
-
-#endif
-
-
 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
   ASSERT(callback != NULL);
   GCPrologueCallbackPair pair(callback, gc_type);
@@ -5739,9 +6175,7 @@
     PrintF("\n");
   }
 
-#if defined(ENABLE_LOGGING_AND_PROFILING)
   heap_->PrintShortHeapStatistics();
-#endif
 }
 
 
diff --git a/src/heap.h b/src/heap.h
index ae4e9e7..b1948a9 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include <math.h>
 
+#include "allocation.h"
 #include "globals.h"
 #include "list.h"
 #include "mark-compact.h"
@@ -64,7 +65,9 @@
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
+  V(Map, serialized_scope_info_map, SerializedScopeInfoMap)                    \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
+  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
   V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
   V(Map, meta_map, MetaMap)                                                    \
   V(Map, hash_table_map, HashTableMap)                                         \
@@ -74,9 +77,12 @@
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
   V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, string_split_cache, StringSplitCache)                          \
   V(Object, termination_exception, TerminationException)                       \
+  V(Smi, hash_seed, HashSeed)                                                  \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
   V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray)         \
   V(String, empty_string, EmptyString)                                         \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Map, string_map, StringMap)                                                \
@@ -84,6 +90,8 @@
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
+  V(Map, sliced_string_map, SlicedStringMap)                                   \
+  V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
   V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
   V(Map, cons_symbol_map, ConsSymbolMap)                                       \
   V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap)                            \
@@ -103,21 +111,26 @@
   V(Map, external_int_array_map, ExternalIntArrayMap)                          \
   V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap)         \
   V(Map, external_float_array_map, ExternalFloatArrayMap)                      \
-  V(Map, context_map, ContextMap)                                              \
+  V(Map, external_double_array_map, ExternalDoubleArrayMap)                    \
+  V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap)     \
+  V(Map, function_context_map, FunctionContextMap)                             \
   V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, code_map, CodeMap)                                                    \
   V(Map, oddball_map, OddballMap)                                              \
   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, proxy_map, ProxyMap)                                                  \
+  V(Map, foreign_map, ForeignMap)                                              \
   V(Object, nan_value, NanValue)                                               \
   V(Object, minus_zero_value, MinusZeroValue)                                  \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
-  V(Proxy, prototype_accessors, PrototypeAccessors)                            \
-  V(NumberDictionary, code_stubs, CodeStubs)                                   \
-  V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache)              \
+  V(Foreign, prototype_accessors, PrototypeAccessors)                          \
+  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
+  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
+  V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
@@ -137,7 +150,6 @@
   V(StringImpl_symbol, "StringImpl")                                     \
   V(arguments_symbol, "arguments")                                       \
   V(Arguments_symbol, "Arguments")                                       \
-  V(arguments_shadow_symbol, ".arguments")                               \
   V(call_symbol, "call")                                                 \
   V(apply_symbol, "apply")                                               \
   V(caller_symbol, "caller")                                             \
@@ -153,6 +165,8 @@
   V(function_symbol, "function")                                         \
   V(length_symbol, "length")                                             \
   V(name_symbol, "name")                                                 \
+  V(native_symbol, "native")                                             \
+  V(null_symbol, "null")                                                 \
   V(number_symbol, "number")                                             \
   V(Number_symbol, "Number")                                             \
   V(nan_symbol, "NaN")                                                   \
@@ -176,8 +190,14 @@
   V(value_of_symbol, "valueOf")                                          \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
-  V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized")                 \
-  V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized")               \
+  V(KeyedLoadElementMonomorphic_symbol,                                  \
+    "KeyedLoadElementMonomorphic")                                       \
+  V(KeyedLoadElementPolymorphic_symbol,                                  \
+    "KeyedLoadElementPolymorphic")                                       \
+  V(KeyedStoreElementMonomorphic_symbol,                                 \
+    "KeyedStoreElementMonomorphic")                                      \
+  V(KeyedStoreElementPolymorphic_symbol,                                 \
+    "KeyedStoreElementPolymorphic")                                      \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -206,29 +226,8 @@
   V(identity_hash_symbol, "v8::IdentityHash")                            \
   V(closure_symbol, "(closure)")                                         \
   V(use_strict, "use strict")                                            \
-  V(KeyedLoadExternalByteArray_symbol, "KeyedLoadExternalByteArray")     \
-  V(KeyedLoadExternalUnsignedByteArray_symbol,                           \
-      "KeyedLoadExternalUnsignedByteArray")                              \
-  V(KeyedLoadExternalShortArray_symbol,                                  \
-      "KeyedLoadExternalShortArray")                                     \
-  V(KeyedLoadExternalUnsignedShortArray_symbol,                          \
-      "KeyedLoadExternalUnsignedShortArray")                             \
-  V(KeyedLoadExternalIntArray_symbol, "KeyedLoadExternalIntArray")       \
-  V(KeyedLoadExternalUnsignedIntArray_symbol,                            \
-       "KeyedLoadExternalUnsignedIntArray")                              \
-  V(KeyedLoadExternalFloatArray_symbol, "KeyedLoadExternalFloatArray")   \
-  V(KeyedLoadExternalPixelArray_symbol, "KeyedLoadExternalPixelArray")   \
-  V(KeyedStoreExternalByteArray_symbol, "KeyedStoreExternalByteArray")   \
-  V(KeyedStoreExternalUnsignedByteArray_symbol,                          \
-        "KeyedStoreExternalUnsignedByteArray")                           \
-  V(KeyedStoreExternalShortArray_symbol, "KeyedStoreExternalShortArray") \
-  V(KeyedStoreExternalUnsignedShortArray_symbol,                         \
-        "KeyedStoreExternalUnsignedShortArray")                          \
-  V(KeyedStoreExternalIntArray_symbol, "KeyedStoreExternalIntArray")     \
-  V(KeyedStoreExternalUnsignedIntArray_symbol,                           \
-        "KeyedStoreExternalUnsignedIntArray")                            \
-  V(KeyedStoreExternalFloatArray_symbol, "KeyedStoreExternalFloatArray") \
-  V(KeyedStoreExternalPixelArray_symbol, "KeyedStoreExternalPixelArray")
+  V(dot_symbol, ".")                                                     \
+  V(anonymous_function_symbol, "(anonymous function)")
 
 // Forward declarations.
 class GCTracer;
@@ -417,12 +416,6 @@
   // Uncommit unused semi space.
   bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the heap by marking all spaces read-only/writable.
-  void Protect();
-  void Unprotect();
-#endif
-
   // Allocates and initializes a new JavaScript object based on a
   // constructor.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -448,6 +441,26 @@
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
 
+  // Allocates a Harmony proxy or function proxy.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
+                                               Object* prototype);
+
+  MUST_USE_RESULT MaybeObject* AllocateJSFunctionProxy(Object* handler,
+                                                       Object* call_trap,
+                                                       Object* construct_trap,
+                                                       Object* prototype);
+
+  // Reinitialize a JSReceiver into an (empty) JS object of respective type and
+  // size, but keeping the original prototype.  The receiver must have at least
+  // the size of the new object.  The object is reinitialized and behaves as an
+  // object that has been freshly allocated.
+  MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
+                                                      InstanceType type,
+                                                      int size);
+
   // Reinitialize an JSGlobalProxy based on a constructor.  The object
   // must have the same size as objects allocated using the
   // constructor.  The object is reinitialized and behaves as an
@@ -485,6 +498,12 @@
   // Allocates an empty code cache.
   MUST_USE_RESULT MaybeObject* AllocateCodeCache();
 
+  // Allocates a serialized scope info.
+  MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
+
+  // Allocates an empty PolymorphicCodeCache.
+  MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
+
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
 
@@ -616,6 +635,16 @@
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
   MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
 
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray(
+      FixedDoubleArray* src);
+
+  // Make a copy of src, set the map, and return the copy. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
+      FixedDoubleArray* src, Map* map);
+
   // Allocates a fixed array initialized with the hole values.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -624,6 +653,17 @@
       int length,
       PretenureFlag pretenure = NOT_TENURED);
 
+  MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+      int length,
+      PretenureFlag pretenure);
+
+  // Allocates a fixed double array with uninitialized values. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  // Please note this does not perform a garbage collection.
+  MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
+
   // AllocateHashTable is identical to AllocateFixedArray except
   // that the resulting object has hash_table_map as map.
   MUST_USE_RESULT MaybeObject* AllocateHashTable(
@@ -634,12 +674,22 @@
 
   // Allocate a function context.
   MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
-                                                       JSFunction* closure);
+                                                       JSFunction* function);
 
+  // Allocate a catch context.
+  MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
+                                                    Context* previous,
+                                                    String* name,
+                                                    Object* thrown_object);
   // Allocate a 'with' context.
-  MUST_USE_RESULT MaybeObject* AllocateWithContext(Context* previous,
-                                                   JSObject* extension,
-                                                   bool is_catch_context);
+  MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
+                                                   Context* previous,
+                                                   JSObject* extension);
+
+  // Allocate a block context.
+  MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
+                                                    Context* previous,
+                                                    SerializedScopeInfo* info);
 
   // Allocates a new utility object in the old generation.
   MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -696,12 +746,12 @@
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
 
-  // Allocates a new proxy object.
+  // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateProxy(
-      Address proxy, PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT MaybeObject* AllocateForeign(
+      Address address, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a new SharedFunctionInfo object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -792,12 +842,15 @@
   // Please note this function does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
-  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
-      Vector<const uc16> str);
+  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
     return LookupSymbol(CStrVector(str));
   }
   MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
+  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                                 int from,
+                                                 int length);
+
   bool LookupSymbolIfExists(String* str, String** symbol);
   bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
 
@@ -984,7 +1037,7 @@
   inline AllocationSpace TargetSpaceId(InstanceType type);
 
   // Sets the stub_cache_ (only used when expanding the dictionary).
-  void public_set_code_stubs(NumberDictionary* value) {
+  void public_set_code_stubs(UnseededNumberDictionary* value) {
     roots_[kCodeStubsRootIndex] = value;
   }
 
@@ -996,7 +1049,7 @@
   }
 
   // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
-  void public_set_non_monomorphic_cache(NumberDictionary* value) {
+  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
     roots_[kNonMonomorphicCacheRootIndex] = value;
   }
 
@@ -1030,10 +1083,8 @@
   void ZapFromSpace();
 #endif
 
-#if defined(ENABLE_LOGGING_AND_PROFILING)
   // Print short heap statistics.
   void PrintShortHeapStatistics();
-#endif
 
   // Makes a new symbol object
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1058,6 +1109,8 @@
   enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
   inline HeapState gc_state() { return gc_state_; }
 
+  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+
 #ifdef DEBUG
   bool IsAllocationAllowed() { return allocation_allowed_; }
   inline bool allow_allocation(bool enable);
@@ -1232,6 +1285,11 @@
     return &external_string_table_;
   }
 
+  // Returns the current sweep generation.
+  int sweep_generation() {
+    return sweep_generation_;
+  }
+
   inline Isolate* isolate();
   bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
@@ -1243,6 +1301,12 @@
     if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
   }
 
+  uint32_t HashSeed() {
+    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+    ASSERT(FLAG_randomize_hashes || seed == 0);
+    return seed;
+  }
+
  private:
   Heap();
 
@@ -1261,6 +1325,9 @@
   // scavenge since last new space expansion.
   int survived_since_last_expansion_;
 
+  // For keeping track on when to flush RegExp code.
+  int sweep_generation_;
+
   int always_allocate_scope_depth_;
   int linear_allocation_scope_depth_;
 
@@ -1281,6 +1348,7 @@
   CellSpace* cell_space_;
   LargeObjectSpace* lo_space_;
   HeapState gc_state_;
+  int gc_post_processing_depth_;
 
   // Returns the size of object residing in non new spaces.
   intptr_t PromotedSpaceSize();
@@ -1457,6 +1525,9 @@
   // Allocate empty fixed array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
 
+  // Allocate empty fixed double array.
+  MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+
   void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
 
   // Performs a minor collection in new generation.
@@ -1478,11 +1549,9 @@
   // around a GC).
   inline void CompletelyClearInstanceofCache();
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
   void ReportStatisticsAfterGC();
-#endif
 
   // Slow part of scavenge object.
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -1605,6 +1674,7 @@
   friend class Page;
   friend class Isolate;
   friend class MarkCompactCollector;
+  friend class StaticMarkingVisitor;
   friend class MapCompact;
 
   DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -1634,7 +1704,7 @@
   int* weak_global_handle_count;        // 15
   int* pending_global_handle_count;     // 16
   int* near_death_global_handle_count;  // 17
-  int* destroyed_global_handle_count;   // 18
+  int* free_global_handle_count;        // 18
   intptr_t* memory_allocator_size;           // 19
   intptr_t* memory_allocator_capacity;       // 20
   int* objects_per_type;                // 21
@@ -1902,6 +1972,7 @@
   void Clear();
 
   static const int kAbsent = -2;
+
  private:
   DescriptorLookupCache() {
     for (int i = 0; i < kLength; ++i) {
@@ -2120,6 +2191,27 @@
 };
 
 
+class StringSplitCache {
+ public:
+  static Object* Lookup(FixedArray* cache, String* string, String* pattern);
+  static void Enter(Heap* heap,
+                    FixedArray* cache,
+                    String* string,
+                    String* pattern,
+                    FixedArray* array);
+  static void Clear(FixedArray* cache);
+  static const int kStringSplitCacheSize = 0x100;
+
+ private:
+  static const int kArrayEntriesPerCacheEntry = 4;
+  static const int kStringOffset = 0;
+  static const int kPatternOffset = 1;
+  static const int kArrayOffset = 2;
+
+  static MaybeObject* WrapFixedArrayInJSArray(Object* fixed_array);
+};
+
+
 class TranscendentalCache {
  public:
   enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 2cf60d7..5630ce3 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -60,12 +60,20 @@
     case kDouble: return "d";
     case kInteger32: return "i";
     case kExternal: return "x";
-    case kNumRepresentations:
+    default:
       UNREACHABLE();
       return NULL;
   }
-  UNREACHABLE();
-  return NULL;
+}
+
+
+void HValue::AssumeRepresentation(Representation r) {
+  if (CheckFlag(kFlexibleRepresentation)) {
+    ChangeRepresentation(r);
+    // The representation of the value is dictated by type feedback and
+    // will not be changed later.
+    ClearFlag(kFlexibleRepresentation);
+  }
 }
 
 
@@ -264,31 +272,60 @@
 }
 
 
-int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
-  for (int i = 0; i < OperandCount(); ++i) {
-    if (OperandAt(i) == op) {
-      if (occurrence_index == 0) return i;
-      --occurrence_index;
-    }
-  }
-  return -1;
-}
-
-
 bool HValue::IsDefinedAfter(HBasicBlock* other) const {
   return block()->block_id() > other->block_id();
 }
 
 
-bool HValue::UsesMultipleTimes(HValue* op) {
-  bool seen = false;
-  for (int i = 0; i < OperandCount(); ++i) {
-    if (OperandAt(i) == op) {
-      if (seen) return true;
-      seen = true;
-    }
+HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
+  Advance();
+}
+
+
+void HUseIterator::Advance() {
+  current_ = next_;
+  if (current_ != NULL) {
+    next_ = current_->tail();
+    value_ = current_->value();
+    index_ = current_->index();
   }
-  return false;
+}
+
+
+int HValue::UseCount() const {
+  int count = 0;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
+  return count;
+}
+
+
+HUseListNode* HValue::RemoveUse(HValue* value, int index) {
+  HUseListNode* previous = NULL;
+  HUseListNode* current = use_list_;
+  while (current != NULL) {
+    if (current->value() == value && current->index() == index) {
+      if (previous == NULL) {
+        use_list_ = current->tail();
+      } else {
+        previous->set_tail(current->tail());
+      }
+      break;
+    }
+
+    previous = current;
+    current = current->tail();
+  }
+
+#ifdef DEBUG
+  // Do not reuse use list nodes in debug mode, zap them.
+  if (current != NULL) {
+    HUseListNode* temp =
+        new HUseListNode(current->value(), current->index(), NULL);
+    current->Zap();
+    current = temp;
+  }
+#endif
+  return current;
 }
 
 
@@ -317,71 +354,48 @@
 }
 
 
+const char* HValue::Mnemonic() const {
+  switch (opcode()) {
+#define MAKE_CASE(type) case k##type: return #type;
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    case kPhi: return "Phi";
+    default: return "";
+  }
+}
+
+
 void HValue::SetOperandAt(int index, HValue* value) {
-  ASSERT(value == NULL || !value->representation().IsNone());
   RegisterUse(index, value);
   InternalSetOperandAt(index, value);
 }
 
 
-void HValue::ReplaceAndDelete(HValue* other) {
-  if (other != NULL) ReplaceValue(other);
-  Delete();
-}
-
-
-void HValue::ReplaceValue(HValue* other) {
-  for (int i = 0; i < uses_.length(); ++i) {
-    HValue* use = uses_[i];
-    ASSERT(!use->block()->IsStartBlock());
-    InternalReplaceAtUse(use, other);
-    other->uses_.Add(use);
-  }
-  uses_.Rewind(0);
-}
-
-
-void HValue::ClearOperands() {
-  for (int i = 0; i < OperandCount(); ++i) {
-    SetOperandAt(i, NULL);
-  }
-}
-
-
-void HValue::Delete() {
+void HValue::DeleteAndReplaceWith(HValue* other) {
+  // We replace all uses first, so Delete can assert that there are none.
+  if (other != NULL) ReplaceAllUsesWith(other);
   ASSERT(HasNoUses());
   ClearOperands();
   DeleteFromGraph();
 }
 
 
-void HValue::ReplaceAtUse(HValue* use, HValue* other) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->OperandAt(i) == this) {
-      use->SetOperandAt(i, other);
-    }
+void HValue::ReplaceAllUsesWith(HValue* other) {
+  while (use_list_ != NULL) {
+    HUseListNode* list_node = use_list_;
+    HValue* value = list_node->value();
+    ASSERT(!value->block()->IsStartBlock());
+    value->InternalSetOperandAt(list_node->index(), other);
+    use_list_ = list_node->tail();
+    list_node->set_tail(other->use_list_);
+    other->use_list_ = list_node;
   }
 }
 
 
-void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->RequiredInputRepresentation(i).Equals(r) &&
-        use->OperandAt(i) == this) {
-      use->SetOperandAt(i, other);
-      return;
-    }
-  }
-}
-
-
-void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->OperandAt(i) == this) {
-      // Call internal method that does not update use lists. The caller is
-      // responsible for doing so.
-      use->InternalSetOperandAt(i, other);
-    }
+void HValue::ClearOperands() {
+  for (int i = 0; i < OperandCount(); ++i) {
+    SetOperandAt(i, NULL);
   }
 }
 
@@ -395,8 +409,39 @@
 }
 
 
-void HValue::PrintTypeTo(HType type, StringStream* stream) {
-  stream->Add(type.ToShortString());
+void HValue::PrintTypeTo(StringStream* stream) {
+  if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
+  stream->Add(" type[%s]", type().ToString());
+}
+
+
+void HValue::PrintRangeTo(StringStream* stream) {
+  if (range() == NULL || range()->IsMostGeneric()) return;
+  stream->Add(" range[%d,%d,m0=%d]",
+              range()->lower(),
+              range()->upper(),
+              static_cast<int>(range()->CanBeMinusZero()));
+}
+
+
+void HValue::PrintChangesTo(StringStream* stream) {
+  int changes_flags = ChangesFlags();
+  if (changes_flags == 0) return;
+  stream->Add(" changes[");
+  if (changes_flags == AllSideEffects()) {
+    stream->Add("*");
+  } else {
+    bool add_comma = false;
+#define PRINT_DO(type)                         \
+    if (changes_flags & (1 << kChanges##type)) { \
+      if (add_comma) stream->Add(",");           \
+      add_comma = true;                          \
+      stream->Add(#type);                        \
+    }
+    GVN_FLAG_LIST(PRINT_DO);
+#undef PRINT_DO
+  }
+  stream->Add("]");
 }
 
 
@@ -416,9 +461,20 @@
 void HValue::RegisterUse(int index, HValue* new_value) {
   HValue* old_value = OperandAt(index);
   if (old_value == new_value) return;
-  if (old_value != NULL) old_value->uses_.RemoveElement(this);
+
+  HUseListNode* removed = NULL;
+  if (old_value != NULL) {
+    removed = old_value->RemoveUse(this, index);
+  }
+
   if (new_value != NULL) {
-    new_value->uses_.Add(this);
+    if (removed == NULL) {
+      new_value->use_list_ =
+          new HUseListNode(this, index, new_value->use_list_);
+    } else {
+      removed->set_tail(new_value->use_list_);
+      new_value->use_list_ = removed;
+    }
   }
 }
 
@@ -447,28 +503,16 @@
 
 
 void HInstruction::PrintTo(StringStream* stream) {
-  stream->Add("%s", Mnemonic());
-  if (HasSideEffects()) stream->Add("*");
-  stream->Add(" ");
+  PrintMnemonicTo(stream);
   PrintDataTo(stream);
+  PrintRangeTo(stream);
+  PrintChangesTo(stream);
+  PrintTypeTo(stream);
+}
 
-  if (range() != NULL &&
-      !range()->IsMostGeneric() &&
-      !range()->CanBeMinusZero()) {
-    stream->Add(" range[%d,%d,m0=%d]",
-                range()->lower(),
-                range()->upper(),
-                static_cast<int>(range()->CanBeMinusZero()));
-  }
 
-  int changes_flags = (flags() & HValue::ChangesFlagsMask());
-  if (changes_flags != 0) {
-    stream->Add(" changes[0x%x]", changes_flags);
-  }
-
-  if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
-    stream->Add(" type[%s]", type().ToString());
-  }
+void HInstruction::PrintMnemonicTo(StringStream* stream) {
+  stream->Add("%s ", Mnemonic());
 }
 
 
@@ -553,6 +597,8 @@
         ASSERT(cur == other_operand);
       }
     } else {
+      // If the following assert fires, you may have forgotten an
+      // AddInstruction.
       ASSERT(other_block->Dominates(cur_block));
     }
   }
@@ -587,6 +633,13 @@
 }
 
 
+void HBoundsCheck::PrintDataTo(StringStream* stream) {
+  index()->PrintNameTo(stream);
+  stream->Add(" ");
+  length()->PrintNameTo(stream);
+}
+
+
 void HCallConstantFunction::PrintDataTo(StringStream* stream) {
   if (IsApplyFunction()) {
     stream->Add("optimized apply ");
@@ -621,7 +674,7 @@
 }
 
 
-void HClassOfTest::PrintDataTo(StringStream* stream) {
+void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("class_of_test(");
   value()->PrintNameTo(stream);
   stream->Add(", \"%o\")", *class_name());
@@ -638,15 +691,13 @@
 
 
 void HControlInstruction::PrintDataTo(StringStream* stream) {
-  if (FirstSuccessor() != NULL) {
-    int first_id = FirstSuccessor()->block_id();
-    if (SecondSuccessor() == NULL) {
-      stream->Add(" B%d", first_id);
-    } else {
-      int second_id = SecondSuccessor()->block_id();
-      stream->Add(" goto (B%d, B%d)", first_id, second_id);
-    }
+  stream->Add(" goto (");
+  bool first_block = true;
+  for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
+    stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
+    first_block = false;
   }
+  stream->Add(")");
 }
 
 
@@ -656,6 +707,11 @@
 }
 
 
+void HReturn::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+}
+
+
 void HCompareMap::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add(" (%p)", *map());
@@ -696,10 +752,10 @@
 }
 
 
-void HHasInstanceType::PrintDataTo(StringStream* stream) {
+void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   switch (from_) {
-    case FIRST_JS_OBJECT_TYPE:
+    case FIRST_JS_RECEIVER_TYPE:
       if (to_ == LAST_TYPE) stream->Add(" spec_object");
       break;
     case JS_REGEXP_TYPE:
@@ -717,10 +773,10 @@
 }
 
 
-void HTypeofIs::PrintDataTo(StringStream* stream) {
+void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add(" == ");
-  stream->Add(type_literal_->ToAsciiVector());
+  stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
 }
 
 
@@ -733,10 +789,59 @@
 }
 
 
-HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
-    HValue* value)  {
-  STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
-  return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
+void HJSArrayLength::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+  stream->Add(" ");
+  typecheck()->PrintNameTo(stream);
+}
+
+
+HValue* HCheckInstanceType::Canonicalize() {
+  if (check_ == IS_STRING &&
+      !value()->type().IsUninitialized() &&
+      value()->type().IsString()) {
+    return NULL;
+  }
+  if (check_ == IS_SYMBOL &&
+      value()->IsConstant() &&
+      HConstant::cast(value())->handle()->IsSymbol()) {
+    return NULL;
+  }
+  return this;
+}
+
+
+void HCheckInstanceType::GetCheckInterval(InstanceType* first,
+                                          InstanceType* last) {
+  ASSERT(is_interval_check());
+  switch (check_) {
+    case IS_SPEC_OBJECT:
+      *first = FIRST_SPEC_OBJECT_TYPE;
+      *last = LAST_SPEC_OBJECT_TYPE;
+      return;
+    case IS_JS_ARRAY:
+      *first = *last = JS_ARRAY_TYPE;
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
+  ASSERT(!is_interval_check());
+  switch (check_) {
+    case IS_STRING:
+      *mask = kIsNotStringMask;
+      *tag = kStringTag;
+      return;
+    case IS_SYMBOL:
+      *mask = kIsSymbolMask;
+      *tag = kSymbolTag;
+      return;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -769,19 +874,25 @@
 
 
 Range* HValue::InferRange() {
-  if (representation().IsTagged()) {
-    // Tagged values are always in int32 range when converted to integer,
-    // but they can contain -0.
-    Range* result = new Range();
-    result->set_can_be_minus_zero(true);
-    return result;
-  } else if (representation().IsNone()) {
-    return NULL;
-  } else {
-    // Untagged integer32 cannot be -0 and we don't compute ranges for
-    // untagged doubles.
-    return new Range();
+  // Untagged integer32 cannot be -0, all other representations can.
+  Range* result = new Range();
+  result->set_can_be_minus_zero(!representation().IsInteger32());
+  return result;
+}
+
+
+Range* HChange::InferRange() {
+  Range* input_range = value()->range();
+  if (from().IsInteger32() &&
+      to().IsTagged() &&
+      input_range != NULL && input_range->IsInSmiRange()) {
+    set_type(HType::Smi());
   }
+  Range* result = (input_range != NULL)
+      ? input_range->Copy()
+      : HValue::InferRange();
+  if (to().IsInteger32()) result->set_can_be_minus_zero(false);
+  return result;
 }
 
 
@@ -914,11 +1025,14 @@
     value->PrintNameTo(stream);
     stream->Add(" ");
   }
-  stream->Add(" uses%d_%di_%dd_%dt]",
-              uses()->length(),
+  stream->Add(" uses%d_%di_%dd_%dt",
+              UseCount(),
               int32_non_phi_uses() + int32_indirect_uses(),
               double_non_phi_uses() + double_indirect_uses(),
               tagged_non_phi_uses() + tagged_indirect_uses());
+  stream->Add("%s%s]",
+              is_live() ? "_live" : "",
+              IsConvertibleToInteger() ? "" : "_ncti");
 }
 
 
@@ -933,8 +1047,8 @@
 
 
 bool HPhi::HasRealUses() {
-  for (int i = 0; i < uses()->length(); i++) {
-    if (!uses()->at(i)->IsPhi()) return true;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsPhi()) return true;
   }
   return false;
 }
@@ -967,12 +1081,11 @@
 void HPhi::InitRealUses(int phi_id) {
   // Initialize real uses.
   phi_id_ = phi_id;
-  for (int j = 0; j < uses()->length(); j++) {
-    HValue* use = uses()->at(j);
-    if (!use->IsPhi()) {
-      int index = use->LookupOperandIndex(0, this);
-      Representation req_rep = use->RequiredInputRepresentation(index);
-      non_phi_uses_[req_rep.kind()]++;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* value = it.value();
+    if (!value->IsPhi()) {
+      Representation rep = value->RequiredInputRepresentation(it.index());
+      ++non_phi_uses_[rep.kind()];
     }
   }
 }
@@ -1009,18 +1122,27 @@
 }
 
 
+void HDeoptimize::PrintDataTo(StringStream* stream) {
+  if (OperandCount() == 0) return;
+  OperandAt(0)->PrintNameTo(stream);
+  for (int i = 1; i < OperandCount(); ++i) {
+    stream->Add(" ");
+    OperandAt(i)->PrintNameTo(stream);
+  }
+}
+
+
 void HEnterInlined::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name = function()->debug_name()->ToCString();
+  SmartArrayPointer<char> name = function()->debug_name()->ToCString();
   stream->Add("%s, id=%d", *name, function()->id());
 }
 
 
 HConstant::HConstant(Handle<Object> handle, Representation r)
     : handle_(handle),
-      constant_type_(HType::TypeFromValue(handle)),
       has_int32_value_(false),
-      int32_value_(0),
       has_double_value_(false),
+      int32_value_(0),
       double_value_(0)  {
   set_representation(r);
   SetFlag(kUseGVN);
@@ -1122,6 +1244,7 @@
           ? left()->range()->Copy()
           : new Range();
       result->Sar(c->Integer32Value());
+      result->set_can_be_minus_zero(false);
       return result;
     }
   }
@@ -1129,6 +1252,31 @@
 }
 
 
+Range* HShr::InferRange() {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int shift_count = c->Integer32Value() & 0x1f;
+      if (left()->range()->CanBeNegative()) {
+        // Only compute bounds if the result always fits into an int32.
+        return (shift_count >= 1)
+            ? new Range(0, static_cast<uint32_t>(0xffffffff) >> shift_count)
+            : new Range();
+      } else {
+        // For positive inputs we can use the >> operator.
+        Range* result = (left()->range() != NULL)
+            ? left()->range()->Copy()
+            : new Range();
+        result->Sar(c->Integer32Value());
+        result->set_can_be_minus_zero(false);
+        return result;
+      }
+    }
+  }
+  return HValue::InferRange();
+}
+
+
 Range* HShl::InferRange() {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
@@ -1137,6 +1285,7 @@
           ? left()->range()->Copy()
           : new Range();
       result->Shl(c->Integer32Value());
+      result->set_can_be_minus_zero(false);
       return result;
     }
   }
@@ -1145,25 +1294,34 @@
 
 
 
-void HCompare::PrintDataTo(StringStream* stream) {
+void HCompareGeneric::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
   HBinaryOperation::PrintDataTo(stream);
 }
 
 
-void HCompare::SetInputRepresentation(Representation r) {
+void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
+void HGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", SuccessorAt(0)->block_id());
+}
+
+
+void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
   input_representation_ = r;
-  if (r.IsTagged()) {
-    SetAllSideEffects();
-    ClearFlag(kUseGVN);
-  } else if (r.IsDouble()) {
+  if (r.IsDouble()) {
     SetFlag(kDeoptimizeOnUndefined);
-    ClearAllSideEffects();
-    SetFlag(kUseGVN);
   } else {
-    ClearAllSideEffects();
-    SetFlag(kUseGVN);
+    ASSERT(r.IsInteger32());
   }
 }
 
@@ -1179,13 +1337,15 @@
 }
 
 
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* object,
-                                                       ZoneMapList* types,
+HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
+                                                       HValue* object,
+                                                       SmallMapList* types,
                                                        Handle<String> name)
-    : HUnaryOperation(object),
-      types_(Min(types->length(), kMaxLoadPolymorphism)),
+    : types_(Min(types->length(), kMaxLoadPolymorphism)),
       name_(name),
       need_generic_(false) {
+  SetOperandAt(0, context);
+  SetOperandAt(1, object);
   set_representation(Representation::Tagged());
   SetFlag(kDependsOnMaps);
   for (int i = 0;
@@ -1194,13 +1354,23 @@
     Handle<Map> map = types->at(i);
     LookupResult lookup;
     map->LookupInDescriptors(NULL, *name, &lookup);
-    if (lookup.IsProperty() && lookup.type() == FIELD) {
-      types_.Add(types->at(i));
-      int index = lookup.GetLocalFieldIndexFromMap(*map);
-      if (index < 0) {
-        SetFlag(kDependsOnInobjectFields);
-      } else {
-        SetFlag(kDependsOnBackingStoreFields);
+    if (lookup.IsProperty()) {
+      switch (lookup.type()) {
+        case FIELD: {
+          int index = lookup.GetLocalFieldIndexFromMap(*map);
+          if (index < 0) {
+            SetFlag(kDependsOnInobjectFields);
+          } else {
+            SetFlag(kDependsOnBackingStoreFields);
+          }
+          types_.Add(types->at(i));
+          break;
+        }
+        case CONSTANT_FUNCTION:
+          types_.Add(types->at(i));
+          break;
+        default:
+          break;
       }
     }
   }
@@ -1233,6 +1403,20 @@
 }
 
 
+void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
+  object()->PrintNameTo(stream);
+  stream->Add(" .");
+  stream->Add(*String::cast(*name())->ToCString());
+}
+
+
+void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintNameTo(stream);
+  stream->Add(" .");
+  stream->Add(*String::cast(*name())->ToCString());
+}
+
+
 void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1241,6 +1425,28 @@
 }
 
 
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return true;
+  }
+  return false;
+}
+
+
+void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("]");
+}
+
+
+bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
+  return true;
+}
+
+
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1253,31 +1459,40 @@
     StringStream* stream) {
   external_pointer()->PrintNameTo(stream);
   stream->Add(".");
-  switch (array_type()) {
-    case kExternalByteArray:
+  switch (elements_kind()) {
+    case EXTERNAL_BYTE_ELEMENTS:
       stream->Add("byte");
       break;
-    case kExternalUnsignedByteArray:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
       stream->Add("u_byte");
       break;
-    case kExternalShortArray:
+    case EXTERNAL_SHORT_ELEMENTS:
       stream->Add("short");
       break;
-    case kExternalUnsignedShortArray:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       stream->Add("u_short");
       break;
-    case kExternalIntArray:
+    case EXTERNAL_INT_ELEMENTS:
       stream->Add("int");
       break;
-    case kExternalUnsignedIntArray:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       stream->Add("u_int");
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       stream->Add("float");
       break;
-    case kExternalPixelArray:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      stream->Add("double");
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
   }
   stream->Add("[");
   key()->PrintNameTo(stream);
@@ -1317,6 +1532,15 @@
 }
 
 
+void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("] = ");
+  value()->PrintNameTo(stream);
+}
+
+
 void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1330,31 +1554,40 @@
     StringStream* stream) {
   external_pointer()->PrintNameTo(stream);
   stream->Add(".");
-  switch (array_type()) {
-    case kExternalByteArray:
+  switch (elements_kind()) {
+    case EXTERNAL_BYTE_ELEMENTS:
       stream->Add("byte");
       break;
-    case kExternalUnsignedByteArray:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
       stream->Add("u_byte");
       break;
-    case kExternalShortArray:
+    case EXTERNAL_SHORT_ELEMENTS:
       stream->Add("short");
       break;
-    case kExternalUnsignedShortArray:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       stream->Add("u_short");
       break;
-    case kExternalIntArray:
+    case EXTERNAL_INT_ELEMENTS:
       stream->Add("int");
       break;
-    case kExternalUnsignedIntArray:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       stream->Add("u_int");
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       stream->Add("float");
       break;
-    case kExternalPixelArray:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      stream->Add("double");
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
   }
   stream->Add("[");
   key()->PrintNameTo(stream);
@@ -1439,21 +1672,26 @@
 
 
 HType HConstant::CalculateInferredType() {
-  return constant_type_;
+  return HType::TypeFromValue(handle_);
 }
 
 
-HType HCompare::CalculateInferredType() {
+HType HCompareGeneric::CalculateInferredType() {
   return HType::Boolean();
 }
 
 
-HType HCompareJSObjectEq::CalculateInferredType() {
+HType HInstanceOf::CalculateInferredType() {
   return HType::Boolean();
 }
 
 
-HType HUnaryPredicate::CalculateInferredType() {
+HType HDeleteProperty::CalculateInferredType() {
+  return HType::Boolean();
+}
+
+
+HType HInstanceOfKnownGlobal::CalculateInferredType() {
   return HType::Boolean();
 }
 
@@ -1543,6 +1781,13 @@
 }
 
 
+HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
+    BitVector* visited) {
+  visited->Add(id());
+  return value();
+}
+
+
 HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
   visited->Add(id());
   if (range() == NULL || range()->CanBeMinusZero()) {
@@ -1593,6 +1838,13 @@
 }
 
 
+void HIn::PrintDataTo(StringStream* stream) {
+  key()->PrintNameTo(stream);
+  stream->Add(" ");
+  object()->PrintNameTo(stream);
+}
+
+
 // Node-specific verification code is only included in debug mode.
 #ifdef DEBUG
 
@@ -1614,11 +1866,6 @@
 }
 
 
-void HBoundsCheck::Verify() {
-  HInstruction::Verify();
-}
-
-
 void HCheckSmi::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
@@ -1631,18 +1878,6 @@
 }
 
 
-void HCheckInstanceType::Verify() {
-  HInstruction::Verify();
-  ASSERT(HasNoUses());
-}
-
-
-void HCheckMap::Verify() {
-  HInstruction::Verify();
-  ASSERT(HasNoUses());
-}
-
-
 void HCheckFunction::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d5f4ea1..0af5489 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -30,9 +30,13 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "code-stubs.h"
+#include "data-flow.h"
 #include "small-pointer-list.h"
 #include "string-stream.h"
+#include "v8conversions.h"
+#include "v8utils.h"
 #include "zone.h"
 
 namespace v8 {
@@ -48,18 +52,10 @@
 class LChunkBuilder;
 
 
-#define HYDROGEN_ALL_INSTRUCTION_LIST(V)       \
-  V(ArithmeticBinaryOperation)                 \
-  V(BinaryCall)                                \
-  V(BinaryOperation)                           \
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V)  \
   V(BitwiseBinaryOperation)                    \
   V(ControlInstruction)                        \
   V(Instruction)                               \
-  V(Phi)                                       \
-  V(UnaryCall)                                 \
-  V(UnaryControlInstruction)                   \
-  V(UnaryOperation)                            \
-  HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
 
 
 #define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)  \
@@ -77,6 +73,7 @@
   V(BitXor)                                    \
   V(BlockEntry)                                \
   V(BoundsCheck)                               \
+  V(Branch)                                    \
   V(CallConstantFunction)                      \
   V(CallFunction)                              \
   V(CallGlobal)                                \
@@ -93,31 +90,38 @@
   V(CheckNonSmi)                               \
   V(CheckPrototypeMaps)                        \
   V(CheckSmi)                                  \
-  V(ClassOfTest)                               \
-  V(Compare)                                   \
-  V(CompareJSObjectEq)                         \
+  V(ClampToUint8)                              \
+  V(ClassOfTestAndBranch)                      \
+  V(CompareIDAndBranch)                        \
+  V(CompareGeneric)                            \
+  V(CompareObjectEqAndBranch)                  \
   V(CompareMap)                                \
+  V(CompareConstantEqAndBranch)                \
   V(Constant)                                  \
   V(Context)                                   \
   V(DeleteProperty)                            \
   V(Deoptimize)                                \
   V(Div)                                       \
+  V(ElementsKind)                              \
   V(EnterInlined)                              \
-  V(ExternalArrayLength)                       \
-  V(FixedArrayLength)                          \
+  V(FixedArrayBaseLength)                      \
+  V(ForceRepresentation)                       \
   V(FunctionLiteral)                           \
   V(GetCachedArrayIndex)                       \
   V(GlobalObject)                              \
   V(GlobalReceiver)                            \
   V(Goto)                                      \
-  V(HasInstanceType)                           \
-  V(HasCachedArrayIndex)                       \
+  V(HasCachedArrayIndexAndBranch)              \
+  V(HasInstanceTypeAndBranch)                  \
+  V(In)                                        \
   V(InstanceOf)                                \
   V(InstanceOfKnownGlobal)                     \
-  V(IsNull)                                    \
-  V(IsObject)                                  \
-  V(IsSmi)                                     \
-  V(IsConstructCall)                           \
+  V(InvokeFunction)                            \
+  V(IsConstructCallAndBranch)                  \
+  V(IsNullAndBranch)                           \
+  V(IsObjectAndBranch)                         \
+  V(IsSmiAndBranch)                            \
+  V(IsUndetectableAndBranch)                   \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -126,6 +130,7 @@
   V(LoadFunctionPrototype)                     \
   V(LoadGlobalCell)                            \
   V(LoadGlobalGeneric)                         \
+  V(LoadKeyedFastDoubleElement)                \
   V(LoadKeyedFastElement)                      \
   V(LoadKeyedGeneric)                          \
   V(LoadKeyedSpecializedArrayElement)          \
@@ -146,26 +151,31 @@
   V(Shl)                                       \
   V(Shr)                                       \
   V(Simulate)                                  \
+  V(SoftDeoptimize)                            \
   V(StackCheck)                                \
   V(StoreContextSlot)                          \
   V(StoreGlobalCell)                           \
   V(StoreGlobalGeneric)                        \
+  V(StoreKeyedFastDoubleElement)               \
   V(StoreKeyedFastElement)                     \
-  V(StoreKeyedSpecializedArrayElement)         \
   V(StoreKeyedGeneric)                         \
+  V(StoreKeyedSpecializedArrayElement)         \
   V(StoreNamedField)                           \
   V(StoreNamedGeneric)                         \
+  V(StringAdd)                                 \
   V(StringCharCodeAt)                          \
   V(StringCharFromCode)                        \
   V(StringLength)                              \
   V(Sub)                                       \
-  V(Test)                                      \
+  V(ThisFunction)                              \
   V(Throw)                                     \
   V(ToFastProperties)                          \
+  V(ToInt32)                                   \
   V(Typeof)                                    \
-  V(TypeofIs)                                  \
+  V(TypeofIsAndBranch)                         \
   V(UnaryMathOperation)                        \
   V(UnknownOSRValue)                           \
+  V(UseConst)                                  \
   V(ValueOf)
 
 #define GVN_FLAG_LIST(V)                       \
@@ -173,6 +183,7 @@
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
   V(ArrayElements)                             \
+  V(DoubleArrayElements)                       \
   V(SpecializedArrayElements)                  \
   V(GlobalVars)                                \
   V(Maps)                                      \
@@ -180,19 +191,21 @@
   V(ContextSlots)                              \
   V(OsrEntries)
 
-#define DECLARE_INSTRUCTION(type)                   \
+#define DECLARE_ABSTRACT_INSTRUCTION(type)          \
   virtual bool Is##type() const { return true; }    \
   static H##type* cast(HValue* value) {             \
     ASSERT(value->Is##type());                      \
     return reinterpret_cast<H##type*>(value);       \
-  }                                                 \
-  Opcode opcode() const { return HValue::k##type; }
+  }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+#define DECLARE_CONCRETE_INSTRUCTION(type)                        \
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
-  virtual const char* Mnemonic() const { return mnemonic; }       \
-  DECLARE_INSTRUCTION(type)
+  static H##type* cast(HValue* value) {                           \
+    ASSERT(value->Is##type());                                    \
+    return reinterpret_cast<H##type*>(value);                     \
+  }                                                               \
+  virtual Opcode opcode() const { return HValue::k##type; }
 
 
 class Range: public ZoneObject {
@@ -214,14 +227,20 @@
   Range* next() const { return next_; }
   Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
   Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
-  Range* Copy() const { return new Range(lower_, upper_); }
+  Range* Copy() const {
+    Range* result = new Range(lower_, upper_);
+    result->set_can_be_minus_zero(CanBeMinusZero());
+    return result;
+  }
   int32_t Mask() const;
   void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
   bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
   bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
   bool CanBeNegative() const { return lower_ < 0; }
   bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
-  bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
+  bool IsMostGeneric() const {
+    return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
+  }
   bool IsInSmiRange() const {
     return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
   }
@@ -394,7 +413,7 @@
     kBoolean = 0x85,         // 0000 0000 1000 0101
     kNonPrimitive = 0x101,   // 0000 0001 0000 0001
     kJSObject = 0x301,       // 0000 0011 0000 0001
-    kJSArray = 0x701,        // 0000 0111 1000 0001
+    kJSArray = 0x701,        // 0000 0111 0000 0001
     kUninitialized = 0x1fff  // 0001 1111 1111 1111
   };
 
@@ -407,6 +426,62 @@
 };
 
 
+class HUseListNode: public ZoneObject {
+ public:
+  HUseListNode(HValue* value, int index, HUseListNode* tail)
+      : tail_(tail), value_(value), index_(index) {
+  }
+
+  HUseListNode* tail() const { return tail_; }
+  HValue* value() const { return value_; }
+  int index() const { return index_; }
+
+  void set_tail(HUseListNode* list) { tail_ = list; }
+
+#ifdef DEBUG
+  void Zap() {
+    tail_ = reinterpret_cast<HUseListNode*>(1);
+    value_ = NULL;
+    index_ = -1;
+  }
+#endif
+
+ private:
+  HUseListNode* tail_;
+  HValue* value_;
+  int index_;
+};
+
+
+// We reuse use list nodes behind the scenes as uses are added and deleted.
+// This class is the safe way to iterate uses while deleting them.
+class HUseIterator BASE_EMBEDDED {
+ public:
+  bool Done() { return current_ == NULL; }
+  void Advance();
+
+  HValue* value() {
+    ASSERT(!Done());
+    return value_;
+  }
+
+  int index() {
+    ASSERT(!Done());
+    return index_;
+  }
+
+ private:
+  explicit HUseIterator(HUseListNode* head);
+
+  HUseListNode* current_;
+  HUseListNode* next_;
+  HValue* value_;
+  int index_;
+
+  friend class HValue;
+};
+
+
 class HValue: public ZoneObject {
  public:
   static const int kNoNumber = -1;
@@ -420,6 +495,10 @@
     GVN_FLAG_LIST(DECLARE_DO)
   #undef DECLARE_DO
     kFlexibleRepresentation,
+    // Participate in Global Value Numbering, i.e. elimination of
+    // unnecessary recomputations. If an instruction sets this flag, it must
+    // implement DataEquals(), which will be used to determine if other
+    // occurrences of the instruction are indeed the same.
     kUseGVN,
     kCanOverflow,
     kBailoutOnMinusZero,
@@ -434,19 +513,6 @@
 
   static const int kChangesToDependsFlagsLeftShift = 1;
 
-  static int ChangesFlagsMask() {
-    int result = 0;
-    // Create changes mask.
-#define DECLARE_DO(type) result |= (1 << kChanges##type);
-  GVN_FLAG_LIST(DECLARE_DO)
-#undef DECLARE_DO
-    return result;
-  }
-
-  static int DependsFlagsMask() {
-    return ConvertChangesToDependsFlags(ChangesFlagsMask());
-  }
-
   static int ConvertChangesToDependsFlags(int flags) {
     return flags << kChangesToDependsFlagsLeftShift;
   }
@@ -455,15 +521,30 @@
 
   enum Opcode {
     // Declare a unique enum value for each hydrogen instruction.
-  #define DECLARE_DO(type) k##type,
-    HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-  #undef DECLARE_DO
-    kMaxInstructionClass
+  #define DECLARE_OPCODE(type) k##type,
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kPhi
+  #undef DECLARE_OPCODE
   };
+  virtual Opcode opcode() const = 0;
+
+  // Declare a non-virtual predicates for each concrete HInstruction or HValue.
+  #define DECLARE_PREDICATE(type) \
+    bool Is##type() const { return opcode() == k##type; }
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
+    bool IsPhi() const { return opcode() == kPhi; }
+
+  // Declare virtual predicates for abstract HInstruction or HValue
+  #define DECLARE_PREDICATE(type) \
+    virtual bool Is##type() const { return false; }
+    HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
 
   HValue() : block_(NULL),
              id_(kNoNumber),
              type_(HType::Tagged()),
+             use_list_(NULL),
              range_(NULL),
              flags_(0) {}
   virtual ~HValue() {}
@@ -474,23 +555,25 @@
   int id() const { return id_; }
   void set_id(int id) { id_ = id; }
 
-  SmallPointerList<HValue>* uses() { return &uses_; }
+  HUseIterator uses() const { return HUseIterator(use_list_); }
 
   virtual bool EmitAtUses() { return false; }
   Representation representation() const { return representation_; }
   void ChangeRepresentation(Representation r) {
     // Representation was already set and is allowed to be changed.
-    ASSERT(!representation_.IsNone());
     ASSERT(!r.IsNone());
     ASSERT(CheckFlag(kFlexibleRepresentation));
     RepresentationChanged(r);
     representation_ = r;
   }
+  void AssumeRepresentation(Representation r);
+
+  virtual bool IsConvertibleToInteger() const { return true; }
 
   HType type() const { return type_; }
-  void set_type(HType type) {
-    ASSERT(uses_.length() == 0);
-    type_ = type;
+  void set_type(HType new_type) {
+    ASSERT(new_type.IsSubtypeOf(type_));
+    type_ = new_type;
   }
 
   // An operation needs to override this function iff:
@@ -500,9 +583,9 @@
   // it would otherwise output what should be a minus zero as an int32 zero.
   // If the operation also exists in a form that takes int32 and outputs int32
   // then the operation should return its input value so that we can propagate
-  // back.  There are two operations that need to propagate back to more than
-  // one input.  They are phi and binary add.  They always return NULL and
-  // expect the caller to take care of things.
+  // back.  There are three operations that need to propagate back to more than
+  // one input.  They are phi and binary div and mul.  They always return NULL
+  // and expect the caller to take care of things.
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
     visited->Add(id());
     return NULL;
@@ -515,16 +598,14 @@
   virtual HValue* OperandAt(int index) = 0;
   void SetOperandAt(int index, HValue* value);
 
-  int LookupOperandIndex(int occurrence_index, HValue* op);
-  bool UsesMultipleTimes(HValue* op);
-
-  void ReplaceAndDelete(HValue* other);
-  void ReplaceValue(HValue* other);
-  void ReplaceAtUse(HValue* use, HValue* other);
-  void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
-  bool HasNoUses() const { return uses_.is_empty(); }
+  void DeleteAndReplaceWith(HValue* other);
+  void ReplaceAllUsesWith(HValue* other);
+  bool HasNoUses() const { return use_list_ == NULL; }
+  bool HasMultipleUses() const {
+    return use_list_ != NULL && use_list_->tail() != NULL;
+  }
+  int UseCount() const;
   void ClearOperands();
-  void Delete();
 
   int flags() const { return flags_; }
   void SetFlag(Flag f) { flags_ |= (1 << f); }
@@ -535,6 +616,8 @@
   void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
   bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
 
+  int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
   void AddNewRange(Range* r);
@@ -554,21 +637,17 @@
   // then return it.  Return NULL to have the instruction deleted.
   virtual HValue* Canonicalize() { return this; }
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
   bool Equals(HValue* other);
   virtual intptr_t Hashcode();
 
   // Printing support.
   virtual void PrintTo(StringStream* stream) = 0;
   void PrintNameTo(StringStream* stream);
-  static void PrintTypeTo(HType type, StringStream* stream);
+  void PrintTypeTo(StringStream* stream);
+  void PrintRangeTo(StringStream* stream);
+  void PrintChangesTo(StringStream* stream);
 
-  virtual const char* Mnemonic() const = 0;
-  virtual Opcode opcode() const = 0;
+  const char* Mnemonic() const;
 
   // Updated the inferred type of this instruction and returns true if
   // it has changed.
@@ -603,12 +682,24 @@
   }
 
  private:
+  static int ChangesFlagsMask() {
+    int result = 0;
+    // Create changes mask.
+#define ADD_FLAG(type) result |= (1 << kChanges##type);
+  GVN_FLAG_LIST(ADD_FLAG)
+#undef ADD_FLAG
+    return result;
+  }
+
   // A flag mask to mark an instruction as having arbitrary side effects.
   static int AllSideEffects() {
     return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
   }
 
-  void InternalReplaceAtUse(HValue* use, HValue* other);
+  // Remove the matching use from the use list if present.  Returns the
+  // removed list node or NULL.
+  HUseListNode* RemoveUse(HValue* value, int index);
+
   void RegisterUse(int index, HValue* new_value);
 
   HBasicBlock* block_;
@@ -619,7 +710,7 @@
 
   Representation representation_;
   HType type_;
-  SmallPointerList<HValue> uses_;
+  HUseListNode* use_list_;
   Range* range_;
   int flags_;
 
@@ -650,13 +741,9 @@
   virtual void Verify();
 #endif
 
-  // Returns whether this is some kind of deoptimizing check
-  // instruction.
-  virtual bool IsCheckInstruction() const { return false; }
-
   virtual bool IsCall() { return false; }
 
-  DECLARE_INSTRUCTION(Instruction)
+  DECLARE_ABSTRACT_INSTRUCTION(Instruction)
 
  protected:
   HInstruction()
@@ -674,6 +761,8 @@
     SetBlock(block);
   }
 
+  void PrintMnemonicTo(StringStream* stream);
+
   HInstruction* next_;
   HInstruction* previous_;
   int position_;
@@ -682,53 +771,6 @@
 };
 
 
-class HControlInstruction: public HInstruction {
- public:
-  HControlInstruction(HBasicBlock* first, HBasicBlock* second)
-      : first_successor_(first), second_successor_(second) {
-  }
-
-  HBasicBlock* FirstSuccessor() const { return first_successor_; }
-  HBasicBlock* SecondSuccessor() const { return second_successor_; }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_INSTRUCTION(ControlInstruction)
-
- private:
-  HBasicBlock* first_successor_;
-  HBasicBlock* second_successor_;
-};
-
-
-template<int NumElements>
-class HOperandContainer {
- public:
-  HOperandContainer() : elems_() { }
-
-  int length() { return NumElements; }
-  HValue*& operator[](int i) {
-    ASSERT(i < length());
-    return elems_[i];
-  }
-
- private:
-  HValue* elems_[NumElements];
-};
-
-
-template<>
-class HOperandContainer<0> {
- public:
-  int length() { return 0; }
-  HValue*& operator[](int i) {
-    UNREACHABLE();
-    static HValue* t = 0;
-    return t;
-  }
-};
-
-
 template<int V>
 class HTemplateInstruction : public HInstruction {
  public:
@@ -739,23 +781,61 @@
   void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
 
  private:
-  HOperandContainer<V> inputs_;
+  EmbeddedContainer<HValue*, V> inputs_;
 };
 
 
-template<int V>
-class HTemplateControlInstruction : public HControlInstruction {
+class HControlInstruction: public HInstruction {
  public:
-  HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second)
-    : HControlInstruction(first, second) { }
+  virtual HBasicBlock* SuccessorAt(int i) = 0;
+  virtual int SuccessorCount() = 0;
+  virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  HBasicBlock* FirstSuccessor() {
+    return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
+  }
+  HBasicBlock* SecondSuccessor() {
+    return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
+  }
+
+  DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
+};
+
+
+class HSuccessorIterator BASE_EMBEDDED {
+ public:
+  explicit HSuccessorIterator(HControlInstruction* instr)
+      : instr_(instr), current_(0) { }
+
+  bool Done() { return current_ >= instr_->SuccessorCount(); }
+  HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
+  void Advance() { current_++; }
+
+ private:
+  HControlInstruction* instr_;
+  int current_;
+};
+
+
+template<int S, int V>
+class HTemplateControlInstruction: public HControlInstruction {
+ public:
+  int SuccessorCount() { return S; }
+  HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
+  void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
+
   int OperandCount() { return V; }
   HValue* OperandAt(int i) { return inputs_[i]; }
 
+
  protected:
   void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
 
  private:
-  HOperandContainer<V> inputs_;
+  EmbeddedContainer<HBasicBlock*, S> successors_;
+  EmbeddedContainer<HValue*, V> inputs_;
 };
 
 
@@ -765,15 +845,26 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+  DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
+};
+
+
+// We insert soft-deoptimize when we hit code with unknown typefeedback,
+// so that we get a chance of re-optimizing with useful typefeedback.
+// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
+class HSoftDeoptimize: public HTemplateInstruction<0> {
+ public:
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
 };
 
 
 class HDeoptimize: public HControlInstruction {
  public:
-  explicit HDeoptimize(int environment_length)
-      : HControlInstruction(NULL, NULL),
-        values_(environment_length) { }
+  explicit HDeoptimize(int environment_length) : values_(environment_length) { }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
@@ -781,13 +872,28 @@
 
   virtual int OperandCount() { return values_.length(); }
   virtual HValue* OperandAt(int index) { return values_[index]; }
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual int SuccessorCount() { return 0; }
+  virtual HBasicBlock* SuccessorAt(int i) {
+    UNREACHABLE();
+    return NULL;
+  }
+  virtual void SetSuccessorAt(int i, HBasicBlock* block) {
+    UNREACHABLE();
+  }
 
   void AddEnvironmentValue(HValue* value) {
     values_.Add(NULL);
     SetOperandAt(values_.length() - 1, value);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+  enum UseEnvironment {
+    kNoUses,
+    kUseAll
+  };
 
  protected:
   virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -799,57 +905,64 @@
 };
 
 
-class HGoto: public HTemplateControlInstruction<0> {
+class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
-  explicit HGoto(HBasicBlock* target)
-      : HTemplateControlInstruction<0>(target, NULL),
-        include_stack_check_(false) { }
-
-  void set_include_stack_check(bool include_stack_check) {
-    include_stack_check_ = include_stack_check;
-  }
-  bool include_stack_check() const { return include_stack_check_; }
+  explicit HGoto(HBasicBlock* target) {
+        SetSuccessorAt(0, target);
+      }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream);
 
- private:
-  bool include_stack_check_;
+  DECLARE_CONCRETE_INSTRUCTION(Goto)
 };
 
 
-class HUnaryControlInstruction: public HTemplateControlInstruction<1> {
+class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
  public:
-  explicit HUnaryControlInstruction(HValue* value,
-                                    HBasicBlock* true_target,
-                                    HBasicBlock* false_target)
-      : HTemplateControlInstruction<1>(true_target, false_target) {
+  HUnaryControlInstruction(HValue* value,
+                           HBasicBlock* true_target,
+                           HBasicBlock* false_target) {
     SetOperandAt(0, value);
+    SetSuccessorAt(0, true_target);
+    SetSuccessorAt(1, false_target);
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
-
-  DECLARE_INSTRUCTION(UnaryControlInstruction)
 };
 
 
-class HTest: public HUnaryControlInstruction {
+class HBranch: public HUnaryControlInstruction {
  public:
-  HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
-      : HUnaryControlInstruction(value, true_target, false_target) {
+  HBranch(HValue* value,
+          HBasicBlock* true_target,
+          HBasicBlock* false_target,
+          ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
+      : HUnaryControlInstruction(value, true_target, false_target),
+        expected_input_types_(expected_input_types) {
     ASSERT(true_target != NULL && false_target != NULL);
   }
+  explicit HBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) { }
+
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Test, "test")
+  ToBooleanStub::Types expected_input_types() const {
+    return expected_input_types_;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch)
+
+ private:
+  ToBooleanStub::Types expected_input_types_;
 };
 
 
@@ -874,36 +987,38 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
+  DECLARE_CONCRETE_INSTRUCTION(CompareMap)
 
  private:
   Handle<Map> map_;
 };
 
 
-class HReturn: public HUnaryControlInstruction {
+class HReturn: public HTemplateControlInstruction<0, 1> {
  public:
-  explicit HReturn(HValue* value)
-      : HUnaryControlInstruction(value, NULL, NULL) {
+  explicit HReturn(HValue* value) {
+    SetOperandAt(0, value);
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+  virtual void PrintDataTo(StringStream* stream);
+
+  HValue* value() { return OperandAt(0); }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return)
 };
 
 
-class HAbnormalExit: public HTemplateControlInstruction<0> {
+class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
  public:
-  HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
+  DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
 };
 
 
@@ -913,16 +1028,24 @@
     SetOperandAt(0, value);
   }
 
+  static HUnaryOperation* cast(HValue* value) {
+    return reinterpret_cast<HUnaryOperation*>(value);
+  }
+
+  virtual bool CanTruncateToInt32() const {
+    return CheckFlag(kTruncatingToInt32);
+  }
+
   HValue* value() { return OperandAt(0); }
   virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_INSTRUCTION(UnaryOperation)
 };
 
 
-class HThrow: public HUnaryOperation {
+class HThrow: public HTemplateInstruction<2> {
  public:
-  explicit HThrow(HValue* value) : HUnaryOperation(value) {
+  HThrow(HValue* context, HValue* value) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
     SetAllSideEffects();
   }
 
@@ -930,7 +1053,41 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+  HValue* context() { return OperandAt(0); }
+  HValue* value() { return OperandAt(1); }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw)
+};
+
+
+class HUseConst: public HUnaryOperation {
+ public:
+  explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UseConst)
+};
+
+
+class HForceRepresentation: public HTemplateInstruction<1> {
+ public:
+  HForceRepresentation(HValue* value, Representation required_representation) {
+    SetOperandAt(0, value);
+    set_representation(required_representation);
+  }
+
+  HValue* value() { return OperandAt(0); }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();  // Same as the output representation.
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 };
 
 
@@ -949,10 +1106,6 @@
     set_representation(to);
     SetFlag(kUseGVN);
     if (is_truncating) SetFlag(kTruncatingToInt32);
-    if (from.IsInteger32() && to.IsTagged() && value->range() != NULL &&
-        value->range()->IsInSmiRange()) {
-      set_type(HType::Smi());
-    }
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -964,19 +1117,17 @@
     return from_;
   }
 
-  bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
+  virtual Range* InferRange();
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(Change,
-                               CanTruncateToInt32() ? "truncate" : "change")
+  DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
   virtual bool DataEquals(HValue* other) {
     if (!other->IsChange()) return false;
     HChange* change = HChange::cast(other);
-    return value() == change->value()
-        && to().Equals(change->to())
+    return to().Equals(change->to())
         && deoptimize_on_undefined() == change->deoptimize_on_undefined();
   }
 
@@ -986,6 +1137,56 @@
 };
 
 
+class HClampToUint8: public HUnaryOperation {
+ public:
+  explicit HClampToUint8(HValue* value)
+      : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HToInt32: public HUnaryOperation {
+ public:
+  explicit HToInt32(HValue* value)
+      : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  virtual bool CanTruncateToInt32() const {
+    return true;
+  }
+
+  virtual HValue* Canonicalize() {
+    if (value()->representation().IsInteger32()) {
+      return value();
+    } else {
+      return this;
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToInt32)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
 class HSimulate: public HInstruction {
  public:
   HSimulate(int ast_id, int pop_count)
@@ -1026,7 +1227,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
+  DECLARE_CONCRETE_INSTRUCTION(Simulate)
 
 #ifdef DEBUG
   virtual void Verify();
@@ -1054,38 +1255,67 @@
 };
 
 
-class HStackCheck: public HTemplateInstruction<0> {
+class HStackCheck: public HTemplateInstruction<1> {
  public:
-  HStackCheck() { }
+  enum Type {
+    kFunctionEntry,
+    kBackwardsBranch
+  };
 
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
+  HStackCheck(HValue* context, Type type) : type_(type) {
+    SetOperandAt(0, context);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
+  HValue* context() { return OperandAt(0); }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  void Eliminate() {
+    // The stack check eliminator might try to eliminate the same stack
+    // check instruction multiple times.
+    if (IsLinked()) {
+      DeleteFromGraph();
+    }
+  }
+
+  bool is_function_entry() { return type_ == kFunctionEntry; }
+  bool is_backwards_branch() { return type_ == kBackwardsBranch; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck)
+
+ private:
+  Type type_;
 };
 
 
 class HEnterInlined: public HTemplateInstruction<0> {
  public:
-  HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
-      : closure_(closure), function_(function) {
+  HEnterInlined(Handle<JSFunction> closure,
+                FunctionLiteral* function,
+                CallKind call_kind)
+      : closure_(closure),
+        function_(function),
+        call_kind_(call_kind) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<JSFunction> closure() const { return closure_; }
   FunctionLiteral* function() const { return function_; }
+  CallKind call_kind() const { return call_kind_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
+  DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
 
  private:
   Handle<JSFunction> closure_;
   FunctionLiteral* function_;
+  CallKind call_kind_;
 };
 
 
@@ -1097,7 +1327,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
+  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
 };
 
 
@@ -1113,7 +1343,25 @@
 
   HValue* argument() { return OperandAt(0); }
 
-  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument)
+};
+
+
+class HThisFunction: public HTemplateInstruction<0> {
+ public:
+  HThisFunction() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1128,7 +1376,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+  DECLARE_CONCRETE_INSTRUCTION(Context)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1142,7 +1390,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1160,7 +1408,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1179,7 +1427,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1224,8 +1472,6 @@
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
-
-  DECLARE_INSTRUCTION(UnaryCall)
 };
 
 
@@ -1245,8 +1491,23 @@
 
   HValue* first() { return OperandAt(0); }
   HValue* second() { return OperandAt(1); }
+};
 
-  DECLARE_INSTRUCTION(BinaryCall)
+
+class HInvokeFunction: public HBinaryCall {
+ public:
+  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
 };
 
 
@@ -1268,7 +1529,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
 
  private:
   Handle<JSFunction> function_;
@@ -1288,7 +1549,7 @@
   HValue* context() { return first(); }
   HValue* key() { return second(); }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
 };
 
 
@@ -1303,7 +1564,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1326,7 +1587,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction)
 };
 
 
@@ -1345,7 +1606,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
 
  private:
   Handle<String> name_;
@@ -1365,7 +1626,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
 
  private:
   Handle<JSFunction> target_;
@@ -1385,26 +1646,31 @@
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
+  DECLARE_CONCRETE_INSTRUCTION(CallNew)
 };
 
 
-class HCallRuntime: public HCall<0> {
+class HCallRuntime: public HCall<1> {
  public:
-  HCallRuntime(Handle<String> name,
+  HCallRuntime(HValue* context,
+               Handle<String> name,
                const Runtime::Function* c_function,
                int argument_count)
-      : HCall<0>(argument_count), c_function_(c_function), name_(name) { }
+      : HCall<1>(argument_count), c_function_(c_function), name_(name) {
+    SetOperandAt(0, context);
+  }
+
   virtual void PrintDataTo(StringStream* stream);
 
+  HValue* context() { return OperandAt(0); }
   const Runtime::Function* function() const { return c_function_; }
   Handle<String> name() const { return name_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
+    return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
 
  private:
   const Runtime::Function* c_function_;
@@ -1412,12 +1678,14 @@
 };
 
 
-class HJSArrayLength: public HUnaryOperation {
+class HJSArrayLength: public HTemplateInstruction<2> {
  public:
-  explicit HJSArrayLength(HValue* value) : HUnaryOperation(value) {
+  HJSArrayLength(HValue* value, HValue* typecheck) {
     // The length of an array is stored as a tagged value in the array
     // object. It is guaranteed to be 32 bit integer, but it can be
     // represented as either a smi or heap number.
+    SetOperandAt(0, value);
+    SetOperandAt(1, typecheck);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnArrayLengths);
@@ -1428,16 +1696,21 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
+  virtual void PrintDataTo(StringStream* stream);
+
+  HValue* value() { return OperandAt(0); }
+  HValue* typecheck() { return OperandAt(1); }
+
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HFixedArrayLength: public HUnaryOperation {
+class HFixedArrayBaseLength: public HUnaryOperation {
  public:
-  explicit HFixedArrayLength(HValue* value) : HUnaryOperation(value) {
+  explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnArrayLengths);
@@ -1447,28 +1720,26 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HExternalArrayLength: public HUnaryOperation {
+class HElementsKind: public HUnaryOperation {
  public:
-  explicit HExternalArrayLength(HValue* value) : HUnaryOperation(value) {
+  explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
-    // The result of this instruction is idempotent as long as its inputs don't
-    // change.  The length of a pixel array cannot change once set, so it's not
-    // necessary to introduce a kDependsOnArrayLengths or any other dependency.
     SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external_array_length")
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1488,17 +1759,19 @@
   }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+  DECLARE_CONCRETE_INSTRUCTION(BitNot)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HUnaryMathOperation: public HUnaryOperation {
+class HUnaryMathOperation: public HTemplateInstruction<2> {
  public:
-  HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
-      : HUnaryOperation(value), op_(op) {
+  HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
+      : op_(op) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
     switch (op) {
       case kMathFloor:
       case kMathRound:
@@ -1522,6 +1795,9 @@
     SetFlag(kUseGVN);
   }
 
+  HValue* context() { return OperandAt(0); }
+  HValue* value() { return OperandAt(1); }
+
   virtual void PrintDataTo(StringStream* stream);
 
   virtual HType CalculateInferredType();
@@ -1529,21 +1805,25 @@
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    switch (op_) {
-      case kMathFloor:
-      case kMathRound:
-      case kMathCeil:
-      case kMathSqrt:
-      case kMathPowHalf:
-      case kMathLog:
-      case kMathSin:
-      case kMathCos:
-        return Representation::Double();
-      case kMathAbs:
-        return representation();
-      default:
-        UNREACHABLE();
-        return Representation::None();
+    if (index == 0) {
+      return Representation::Tagged();
+    } else {
+      switch (op_) {
+        case kMathFloor:
+        case kMathRound:
+        case kMathCeil:
+        case kMathSqrt:
+        case kMathPowHalf:
+        case kMathLog:
+        case kMathSin:
+        case kMathCos:
+          return Representation::Double();
+        case kMathAbs:
+          return representation();
+        default:
+          UNREACHABLE();
+          return Representation::None();
+      }
     }
   }
 
@@ -1560,7 +1840,7 @@
   BuiltinFunctionId op() const { return op_; }
   const char* OpName() const;
 
-  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1585,7 +1865,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1608,38 +1888,36 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
-                               "load-external-array-pointer")
+  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HCheckMap: public HUnaryOperation {
+class HCheckMap: public HTemplateInstruction<2> {
  public:
-  HCheckMap(HValue* value, Handle<Map> map)
-      : HUnaryOperation(value), map_(map) {
+  HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
+      : map_(map) {
+    SetOperandAt(0, value);
+    // If callers don't depend on a typecheck, they can pass in NULL. In that
+    // case we use a copy of the |value| argument as a dummy value.
+    SetOperandAt(1, typecheck != NULL ? typecheck : value);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnMaps);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
 
-#ifdef DEBUG
-  virtual void Verify();
-#endif
-
+  HValue* value() { return OperandAt(0); }
   Handle<Map> map() const { return map_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1660,8 +1938,6 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
@@ -1674,7 +1950,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1689,37 +1965,30 @@
 
 class HCheckInstanceType: public HUnaryOperation {
  public:
-  // Check that the instance type is in the range [first, last] where
-  // both first and last are included.
-  HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
-      : HUnaryOperation(value), first_(first), last_(last) {
-    ASSERT(first <= last);
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-    if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
-        (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
-      // A particular string instance type can change because of GC or
-      // externalization, but the value still remains a string.
-      SetFlag(kDependsOnMaps);
-    }
+  static HCheckInstanceType* NewIsSpecObject(HValue* value) {
+    return new HCheckInstanceType(value, IS_SPEC_OBJECT);
   }
-
-  virtual bool IsCheckInstruction() const { return true; }
+  static HCheckInstanceType* NewIsJSArray(HValue* value) {
+    return new HCheckInstanceType(value, IS_JS_ARRAY);
+  }
+  static HCheckInstanceType* NewIsString(HValue* value) {
+    return new HCheckInstanceType(value, IS_STRING);
+  }
+  static HCheckInstanceType* NewIsSymbol(HValue* value) {
+    return new HCheckInstanceType(value, IS_SYMBOL);
+  }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-#ifdef DEBUG
-  virtual void Verify();
-#endif
+  virtual HValue* Canonicalize();
 
-  static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
+  bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
+  void GetCheckInterval(InstanceType* first, InstanceType* last);
+  void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
 
-  InstanceType first() const { return first_; }
-  InstanceType last() const { return last_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
 
  protected:
   // TODO(ager): It could be nice to allow the ommision of instance
@@ -1727,12 +1996,25 @@
   // with a larger range.
   virtual bool DataEquals(HValue* other) {
     HCheckInstanceType* b = HCheckInstanceType::cast(other);
-    return (first_ == b->first()) && (last_ == b->last());
+    return check_ == b->check_;
   }
 
  private:
-  InstanceType first_;
-  InstanceType last_;
+  enum Check {
+    IS_SPEC_OBJECT,
+    IS_JS_ARRAY,
+    IS_STRING,
+    IS_SYMBOL,
+    LAST_INTERVAL_CHECK = IS_JS_ARRAY
+  };
+
+  HCheckInstanceType(HValue* value, Check check)
+      : HUnaryOperation(value), check_(check) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  const Check check_;
 };
 
 
@@ -1743,8 +2025,6 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
@@ -1755,7 +2035,19 @@
   virtual void Verify();
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+  virtual HValue* Canonicalize() {
+    HType value_type = value()->type();
+    if (!value_type.IsUninitialized() &&
+        (value_type.IsHeapNumber() ||
+         value_type.IsString() ||
+         value_type.IsBoolean() ||
+         value_type.IsNonPrimitive())) {
+      return NULL;
+    }
+    return this;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1770,8 +2062,6 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
 #ifdef DEBUG
   virtual void Verify();
 #endif
@@ -1779,7 +2069,7 @@
   Handle<JSObject> prototype() const { return prototype_; }
   Handle<JSObject> holder() const { return holder_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
@@ -1812,8 +2102,6 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
@@ -1823,7 +2111,7 @@
   virtual void Verify();
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1836,7 +2124,8 @@
       : inputs_(2),
         merged_index_(merged_index),
         phi_id_(-1),
-        is_live_(false) {
+        is_live_(false),
+        is_convertible_to_integer_(true) {
     for (int i = 0; i < Representation::kNumRepresentations; i++) {
       non_phi_uses_[i] = 0;
       indirect_uses_[i] = 0;
@@ -1876,16 +2165,12 @@
 
   int merged_index() const { return merged_index_; }
 
-  virtual const char* Mnemonic() const { return "phi"; }
-
   virtual void PrintTo(StringStream* stream);
 
 #ifdef DEBUG
   virtual void Verify();
 #endif
 
-  DECLARE_INSTRUCTION(Phi)
-
   void InitRealUses(int id);
   void AddNonPhiUsesFrom(HPhi* other);
   void AddIndirectUsesTo(int* use_count);
@@ -1912,6 +2197,27 @@
   bool is_live() { return is_live_; }
   void set_is_live(bool b) { is_live_ = b; }
 
+  static HPhi* cast(HValue* value) {
+    ASSERT(value->IsPhi());
+    return reinterpret_cast<HPhi*>(value);
+  }
+  virtual Opcode opcode() const { return HValue::kPhi; }
+
+  virtual bool IsConvertibleToInteger() const {
+    return is_convertible_to_integer_;
+  }
+
+  void set_is_convertible_to_integer(bool b) {
+    is_convertible_to_integer_ = b;
+  }
+
+  bool AllOperandsConvertibleToInteger() {
+    for (int i = 0; i < OperandCount(); ++i) {
+      if (!OperandAt(i)->IsConvertibleToInteger()) return false;
+    }
+    return true;
+  }
+
  protected:
   virtual void DeleteFromGraph();
   virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -1926,6 +2232,7 @@
   int indirect_uses_[Representation::kNumRepresentations];
   int phi_id_;
   bool is_live_;
+  bool is_convertible_to_integer_;
 };
 
 
@@ -1940,7 +2247,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
 };
 
 
@@ -1956,6 +2263,14 @@
     return Representation::None();
   }
 
+  virtual bool IsConvertibleToInteger() const {
+    if (handle_->IsSmi()) return true;
+    if (handle_->IsHeapNumber() &&
+        (HeapNumber::cast(*handle_)->value() ==
+         static_cast<double>(NumberToInt32(*handle_)))) return true;
+    return false;
+  }
+
   virtual bool EmitAtUses() { return !representation().IsDouble(); }
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
@@ -1985,7 +2300,7 @@
   virtual void Verify() { }
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
+  DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
   virtual Range* InferRange();
@@ -1997,28 +2312,29 @@
 
  private:
   Handle<Object> handle_;
-  HType constant_type_;
 
   // The following two values represent the int32 and the double value of the
   // given constant if there is a lossless conversion between the constant
   // and the specific representation.
-  bool has_int32_value_;
+  bool has_int32_value_ : 1;
+  bool has_double_value_ : 1;
   int32_t int32_value_;
-  bool has_double_value_;
   double double_value_;
 };
 
 
-class HBinaryOperation: public HTemplateInstruction<2> {
+class HBinaryOperation: public HTemplateInstruction<3> {
  public:
-  HBinaryOperation(HValue* left, HValue* right) {
+  HBinaryOperation(HValue* context, HValue* left, HValue* right) {
     ASSERT(left != NULL && right != NULL);
-    SetOperandAt(0, left);
-    SetOperandAt(1, right);
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
   }
 
-  HValue* left() { return OperandAt(0); }
-  HValue* right() { return OperandAt(1); }
+  HValue* context() { return OperandAt(0); }
+  HValue* left() { return OperandAt(1); }
+  HValue* right() { return OperandAt(2); }
 
   // TODO(kasperl): Move these helpers to the IA-32 Lithium
   // instruction sequence builder.
@@ -2034,8 +2350,6 @@
   virtual bool IsCommutative() const { return false; }
 
   virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_INSTRUCTION(BinaryOperation)
 };
 
 
@@ -2065,7 +2379,7 @@
   HValue* length() { return OperandAt(2); }
   HValue* elements() { return OperandAt(3); }
 
-  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
 };
 
 
@@ -2078,7 +2392,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
@@ -2100,7 +2414,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2130,34 +2444,31 @@
   HValue* length() { return OperandAt(1); }
   HValue* index() { return OperandAt(2); }
 
-  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
 
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HBoundsCheck: public HBinaryOperation {
+class HBoundsCheck: public HTemplateInstruction<2> {
  public:
-  HBoundsCheck(HValue* index, HValue* length)
-      : HBinaryOperation(index, length) {
+  HBoundsCheck(HValue* index, HValue* length) {
+    SetOperandAt(0, index);
+    SetOperandAt(1, length);
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsCheckInstruction() const { return true; }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Integer32();
   }
 
-#ifdef DEBUG
-  virtual void Verify();
-#endif
+  virtual void PrintDataTo(StringStream* stream);
 
-  HValue* index() { return left(); }
-  HValue* length() { return right(); }
+  HValue* index() { return OperandAt(0); }
+  HValue* length() { return OperandAt(1); }
 
-  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2166,15 +2477,17 @@
 
 class HBitwiseBinaryOperation: public HBinaryOperation {
  public:
-  HBitwiseBinaryOperation(HValue* left, HValue* right)
-      : HBinaryOperation(left, right) {
+  HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kFlexibleRepresentation);
     SetAllSideEffects();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return representation();
+    return index == 0
+        ? Representation::Tagged()
+        : representation();
   }
 
   virtual void RepresentationChanged(Representation to) {
@@ -2188,14 +2501,14 @@
 
   virtual HType CalculateInferredType();
 
-  DECLARE_INSTRUCTION(BitwiseBinaryOperation)
+  DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
 };
 
 
 class HArithmeticBinaryOperation: public HBinaryOperation {
  public:
-  HArithmeticBinaryOperation(HValue* left, HValue* right)
-      : HBinaryOperation(left, right) {
+  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kFlexibleRepresentation);
     SetAllSideEffects();
@@ -2210,176 +2523,200 @@
 
   virtual HType CalculateInferredType();
   virtual Representation RequiredInputRepresentation(int index) const {
-    return representation();
+    return index == 0
+        ? Representation::Tagged()
+        : representation();
   }
+
   virtual Representation InferredRepresentation() {
     if (left()->representation().Equals(right()->representation())) {
       return left()->representation();
     }
     return HValue::InferredRepresentation();
   }
-
-  DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
 };
 
 
-class HCompare: public HBinaryOperation {
+class HCompareGeneric: public HBinaryOperation {
  public:
-  HCompare(HValue* left, HValue* right, Token::Value token)
-      : HBinaryOperation(left, right), token_(token) {
+  HCompareGeneric(HValue* context,
+                  HValue* left,
+                  HValue* right,
+                  Token::Value token)
+      : HBinaryOperation(context, left, right), token_(token) {
     ASSERT(Token::IsCompareOp(token));
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
 
-  void SetInputRepresentation(Representation r);
-
-  virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
-  }
-
   virtual Representation RequiredInputRepresentation(int index) const {
-    return input_representation_;
+    return Representation::Tagged();
   }
+
   Representation GetInputRepresentation() const {
-    return input_representation_;
+    return Representation::Tagged();
   }
+
   Token::Value token() const { return token_; }
   virtual void PrintDataTo(StringStream* stream);
 
   virtual HType CalculateInferredType();
 
-  virtual intptr_t Hashcode() {
-    return HValue::Hashcode() * 7 + token_;
+  DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
+
+ private:
+  Token::Value token_;
+};
+
+
+class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
+ public:
+  HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
+      : token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
+  HValue* left() { return OperandAt(0); }
+  HValue* right() { return OperandAt(1); }
+  Token::Value token() const { return token_; }
 
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HCompare* comp = HCompare::cast(other);
-    return token_ == comp->token();
+  void SetInputRepresentation(Representation r);
+  Representation GetInputRepresentation() const {
+    return input_representation_;
   }
 
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return input_representation_;
+  }
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
+
  private:
   Representation input_representation_;
   Token::Value token_;
 };
 
 
-class HCompareJSObjectEq: public HBinaryOperation {
+class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
  public:
-  HCompareJSObjectEq(HValue* left, HValue* right)
-      : HBinaryOperation(left, right) {
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+  HCompareObjectEqAndBranch(HValue* left, HValue* right) {
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
   }
 
-  virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
-  }
+  HValue* left() { return OperandAt(0); }
+  HValue* right() { return OperandAt(1); }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
-  virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
 };
 
 
-class HUnaryPredicate: public HUnaryOperation {
+class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
  public:
-  explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
+  HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
+      : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
+    ASSERT(op == Token::EQ_STRICT);
   }
 
-  virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
-  }
+  Token::Value op() const { return op_; }
+  HValue* left() { return value(); }
+  int right() const { return right_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::Tagged();
+    return Representation::Integer32();
   }
-  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
+
+ private:
+  const Token::Value op_;
+  const int right_;
 };
 
 
-class HIsNull: public HUnaryPredicate {
+class HIsNullAndBranch: public HUnaryControlInstruction {
  public:
-  HIsNull(HValue* value, bool is_strict)
-      : HUnaryPredicate(value), is_strict_(is_strict) { }
+  HIsNullAndBranch(HValue* value, bool is_strict)
+      : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
 
   bool is_strict() const { return is_strict_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
-
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HIsNull* b = HIsNull::cast(other);
-    return is_strict_ == b->is_strict();
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
   }
 
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
+
  private:
   bool is_strict_;
 };
 
 
-class HIsObject: public HUnaryPredicate {
+class HIsObjectAndBranch: public HUnaryControlInstruction {
  public:
-  explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
+  explicit HIsObjectAndBranch(HValue* value)
+    : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+};
+
+
+class HIsSmiAndBranch: public HUnaryControlInstruction {
+ public:
+  explicit HIsSmiAndBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HIsSmi: public HUnaryPredicate {
+class HIsUndetectableAndBranch: public HUnaryControlInstruction {
  public:
-  explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
+  explicit HIsUndetectableAndBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
 
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
 };
 
 
-class HIsConstructCall: public HTemplateInstruction<0> {
+class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
-  HIsConstructCall() {
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-  }
-
-  virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
-  }
-
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
 };
 
 
-class HHasInstanceType: public HUnaryPredicate {
+class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
  public:
-  HHasInstanceType(HValue* value, InstanceType type)
-      : HUnaryPredicate(value), from_(type), to_(type) { }
-  HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
-      : HUnaryPredicate(value), from_(from), to_(to) {
+  HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+      : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+  HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+      : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
     ASSERT(to == LAST_TYPE);  // Others not implemented yet in backend.
   }
 
@@ -2388,78 +2725,83 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
-
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HHasInstanceType* b = HHasInstanceType::cast(other);
-    return (from_ == b->from()) && (to_ == b->to());
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
   }
 
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
+
  private:
   InstanceType from_;
   InstanceType to_;  // Inclusive range, not all combinations work.
 };
 
 
-class HHasCachedArrayIndex: public HUnaryPredicate {
+class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
  public:
-  explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+  explicit HHasCachedArrayIndexAndBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
+};
+
+
+class HGetCachedArrayIndex: public HUnaryOperation {
+ public:
+  explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HGetCachedArrayIndex: public HUnaryPredicate {
+class HClassOfTestAndBranch: public HUnaryControlInstruction {
  public:
-  explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+  HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+      : HUnaryControlInstruction(value, NULL, NULL),
+        class_name_(class_name) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HClassOfTest: public HUnaryPredicate {
- public:
-  HClassOfTest(HValue* value, Handle<String> class_name)
-      : HUnaryPredicate(value), class_name_(class_name) { }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
 
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> class_name() const { return class_name_; }
 
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HClassOfTest* b = HClassOfTest::cast(other);
-    return class_name_.is_identical_to(b->class_name_);
-  }
-
  private:
   Handle<String> class_name_;
 };
 
 
-class HTypeofIs: public HUnaryPredicate {
+class HTypeofIsAndBranch: public HUnaryControlInstruction {
  public:
-  HTypeofIs(HValue* value, Handle<String> type_literal)
-      : HUnaryPredicate(value), type_literal_(type_literal) { }
+  HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+      : HUnaryControlInstruction(value, NULL, NULL),
+        type_literal_(type_literal) { }
 
   Handle<String> type_literal() { return type_literal_; }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HTypeofIs* b = HTypeofIs::cast(other);
-    return type_literal_.is_identical_to(b->type_literal_);
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
   }
 
  private:
@@ -2467,69 +2809,74 @@
 };
 
 
-class HInstanceOf: public HTemplateInstruction<3> {
+class HInstanceOf: public HBinaryOperation {
  public:
-  HInstanceOf(HValue* context, HValue* left, HValue* right) {
+  HInstanceOf(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType();
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
+};
+
+
+class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
+ public:
+  HInstanceOfKnownGlobal(HValue* context,
+                         HValue* left,
+                         Handle<JSFunction> right)
+      : function_(right) {
     SetOperandAt(0, context);
     SetOperandAt(1, left);
-    SetOperandAt(2, right);
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
 
   HValue* context() { return OperandAt(0); }
   HValue* left() { return OperandAt(1); }
-  HValue* right() { return OperandAt(2); }
-
-  virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::Tagged();
-  }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
-};
-
-
-class HInstanceOfKnownGlobal: public HUnaryOperation {
- public:
-  HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
-      : HUnaryOperation(left), function_(right) {
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
   Handle<JSFunction> function() { return function_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
-                               "instance_of_known_global")
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
 
  private:
   Handle<JSFunction> function_;
 };
 
 
-class HPower: public HBinaryOperation {
+class HPower: public HTemplateInstruction<2> {
  public:
-  HPower(HValue* left, HValue* right)
-      : HBinaryOperation(left, right) {
+  HPower(HValue* left, HValue* right) {
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
     set_representation(Representation::Double());
     SetFlag(kUseGVN);
   }
 
+  HValue* left() { return OperandAt(0); }
+  HValue* right() { return OperandAt(1); }
+
   virtual Representation RequiredInputRepresentation(int index) const {
-    return (index == 1) ? Representation::None() : Representation::Double();
+    return index == 0
+      ? Representation::Double()
+      : Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_CONCRETE_INSTRUCTION(Power)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2538,7 +2885,8 @@
 
 class HAdd: public HArithmeticBinaryOperation {
  public:
-  HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+  HAdd(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanOverflow);
   }
 
@@ -2552,7 +2900,7 @@
 
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Add, "add")
+  DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2563,13 +2911,14 @@
 
 class HSub: public HArithmeticBinaryOperation {
  public:
-  HSub(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+  HSub(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanOverflow);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
+  DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2580,7 +2929,8 @@
 
 class HMul: public HArithmeticBinaryOperation {
  public:
-  HMul(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+  HMul(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanOverflow);
   }
 
@@ -2591,7 +2941,7 @@
     return !representation().IsTagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
+  DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2602,7 +2952,8 @@
 
 class HMod: public HArithmeticBinaryOperation {
  public:
-  HMod(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+  HMod(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanBeDivByZero);
   }
 
@@ -2618,7 +2969,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
+  DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2629,14 +2980,15 @@
 
 class HDiv: public HArithmeticBinaryOperation {
  public:
-  HDiv(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+  HDiv(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanBeDivByZero);
     SetFlag(kCanOverflow);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Div, "div")
+  DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2647,13 +2999,13 @@
 
 class HBitAnd: public HBitwiseBinaryOperation {
  public:
-  HBitAnd(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HBitAnd(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
+  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2664,13 +3016,13 @@
 
 class HBitXor: public HBitwiseBinaryOperation {
  public:
-  HBitXor(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HBitXor(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+  DECLARE_CONCRETE_INSTRUCTION(BitXor)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2679,13 +3031,13 @@
 
 class HBitOr: public HBitwiseBinaryOperation {
  public:
-  HBitOr(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HBitOr(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
+  DECLARE_CONCRETE_INSTRUCTION(BitOr)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2696,13 +3048,13 @@
 
 class HShl: public HBitwiseBinaryOperation {
  public:
-  HShl(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HShl(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+  DECLARE_CONCRETE_INSTRUCTION(Shl)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2711,12 +3063,13 @@
 
 class HShr: public HBitwiseBinaryOperation {
  public:
-  HShr(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HShr(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
+  virtual Range* InferRange();
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+  DECLARE_CONCRETE_INSTRUCTION(Shr)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2725,13 +3078,13 @@
 
 class HSar: public HBitwiseBinaryOperation {
  public:
-  HSar(HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(left, right) { }
+  HSar(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+  DECLARE_CONCRETE_INSTRUCTION(Sar)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2750,7 +3103,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
 
  private:
   int ast_id_;
@@ -2771,7 +3124,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+  DECLARE_CONCRETE_INSTRUCTION(Parameter)
 
  private:
   unsigned index_;
@@ -2803,7 +3156,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
+  DECLARE_CONCRETE_INSTRUCTION(CallStub)
 
  private:
   CodeStub::Major major_key_;
@@ -2819,7 +3172,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
 };
 
 
@@ -2846,7 +3199,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2860,15 +3213,16 @@
 };
 
 
-class HLoadGlobalGeneric: public HBinaryOperation {
+class HLoadGlobalGeneric: public HTemplateInstruction<2> {
  public:
   HLoadGlobalGeneric(HValue* context,
                      HValue* global_object,
                      Handle<Object> name,
                      bool for_typeof)
-      : HBinaryOperation(context, global_object),
-        name_(name),
+      : name_(name),
         for_typeof_(for_typeof) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, global_object);
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -2884,7 +3238,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
 
  private:
   Handle<Object> name_;
@@ -2911,7 +3265,7 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store_global_cell")
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
@@ -2947,7 +3301,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store_global_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
 
  private:
   Handle<Object> name_;
@@ -2972,7 +3326,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2986,15 +3340,18 @@
 
 
 static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
-  return !value->type().IsSmi() &&
-      !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+  return !value->type().IsBoolean()
+      && !value->type().IsSmi()
+      && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
 }
 
 
-class HStoreContextSlot: public HBinaryOperation {
+class HStoreContextSlot: public HTemplateInstruction<2> {
  public:
   HStoreContextSlot(HValue* context, int slot_index, HValue* value)
-      : HBinaryOperation(context, value), slot_index_(slot_index) {
+      : slot_index_(slot_index) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
     SetFlag(kChangesContextSlots);
   }
 
@@ -3012,7 +3369,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
 
  private:
   int slot_index_;
@@ -3044,7 +3401,7 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -3058,14 +3415,16 @@
 };
 
 
-class HLoadNamedFieldPolymorphic: public HUnaryOperation {
+class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
  public:
-  HLoadNamedFieldPolymorphic(HValue* object,
-                             ZoneMapList* types,
+  HLoadNamedFieldPolymorphic(HValue* context,
+                             HValue* object,
+                             SmallMapList* types,
                              Handle<String> name);
 
-  HValue* object() { return OperandAt(0); }
-  ZoneMapList* types() { return &types_; }
+  HValue* context() { return OperandAt(0); }
+  HValue* object() { return OperandAt(1); }
+  SmallMapList* types() { return &types_; }
   Handle<String> name() { return name_; }
   bool need_generic() { return need_generic_; }
 
@@ -3073,8 +3432,9 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic,
-                               "load_named_field_polymorphic")
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
 
   static const int kMaxLoadPolymorphism = 4;
 
@@ -3082,17 +3442,19 @@
   virtual bool DataEquals(HValue* value);
 
  private:
-  ZoneMapList types_;
+  SmallMapList types_;
   Handle<String> name_;
   bool need_generic_;
 };
 
 
 
-class HLoadNamedGeneric: public HBinaryOperation {
+class HLoadNamedGeneric: public HTemplateInstruction<2> {
  public:
   HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
-      : HBinaryOperation(context, object), name_(name) {
+      : name_(name) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, object);
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -3105,7 +3467,9 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
 
  private:
   Handle<Object> name_;
@@ -3127,16 +3491,18 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HLoadKeyedFastElement: public HBinaryOperation {
+class HLoadKeyedFastElement: public HTemplateInstruction<2> {
  public:
-  HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
+  HLoadKeyedFastElement(HValue* obj, HValue* key) {
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
     set_representation(Representation::Tagged());
     SetFlag(kDependsOnArrayElements);
     SetFlag(kUseGVN);
@@ -3147,28 +3513,63 @@
 
   virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32.
-    return (index == 1) ? Representation::Integer32()
-        : Representation::Tagged();
+    return index == 0
+      ? Representation::Tagged()
+      : Representation::Integer32();
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
-                               "load_keyed_fast_element")
+  bool RequiresHoleCheck() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
-class HLoadKeyedSpecializedArrayElement: public HBinaryOperation {
+class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
+ public:
+  HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
+    SetOperandAt(0, elements);
+    SetOperandAt(1, key);
+    set_representation(Representation::Double());
+    SetFlag(kDependsOnDoubleArrayElements);
+    SetFlag(kUseGVN);
+  }
+
+  HValue* elements() { return OperandAt(0); }
+  HValue* key() { return OperandAt(1); }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The key is supposed to be Integer32.
+    return index == 0
+      ? Representation::Tagged()
+      : Representation::Integer32();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  bool RequiresHoleCheck() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
  public:
   HLoadKeyedSpecializedArrayElement(HValue* external_elements,
                                     HValue* key,
-                                    ExternalArrayType array_type)
-      : HBinaryOperation(external_elements, key),
-        array_type_(array_type) {
-    if (array_type == kExternalFloatArray) {
+                                    ElementsKind elements_kind)
+      :  elements_kind_(elements_kind) {
+    SetOperandAt(0, external_elements);
+    SetOperandAt(1, key);
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+        elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
       set_representation(Representation::Double());
     } else {
       set_representation(Representation::Integer32());
@@ -3184,33 +3585,33 @@
   virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32, but the base pointer
     // for the element load is a naked pointer.
-    return (index == 1) ? Representation::Integer32()
-        : Representation::External();
+    return index == 0
+      ? Representation::External()
+      : Representation::Integer32();
   }
 
   HValue* external_pointer() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
-  ExternalArrayType array_type() const { return array_type_; }
+  ElementsKind elements_kind() const { return elements_kind_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
-                               "load_keyed_specialized_array_element")
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
 
  protected:
   virtual bool DataEquals(HValue* other) {
     if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
     HLoadKeyedSpecializedArrayElement* cast_other =
         HLoadKeyedSpecializedArrayElement::cast(other);
-    return array_type_ == cast_other->array_type();
+    return elements_kind_ == cast_other->elements_kind();
   }
 
  private:
-  ExternalArrayType array_type_;
+  ElementsKind elements_kind_;
 };
 
 
 class HLoadKeyedGeneric: public HTemplateInstruction<3> {
  public:
-  HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) {
+  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
@@ -3228,21 +3629,22 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 };
 
 
-class HStoreNamedField: public HBinaryOperation {
+class HStoreNamedField: public HTemplateInstruction<2> {
  public:
   HStoreNamedField(HValue* obj,
                    Handle<String> name,
                    HValue* val,
                    bool in_object,
                    int offset)
-      : HBinaryOperation(obj, val),
-        name_(name),
+      : name_(name),
         is_in_object_(in_object),
         offset_(offset) {
+    SetOperandAt(0, obj);
+    SetOperandAt(1, val);
     if (is_in_object_) {
       SetFlag(kChangesInobjectFields);
     } else {
@@ -3250,7 +3652,7 @@
     }
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -3305,7 +3707,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
 
  private:
   Handle<String> name_;
@@ -3324,7 +3726,8 @@
 
   virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32.
-    return (index == 1) ? Representation::Integer32()
+    return index == 1
+        ? Representation::Integer32()
         : Representation::Tagged();
   }
 
@@ -3338,8 +3741,42 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
-                               "store_keyed_fast_element")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+};
+
+
+class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
+ public:
+  HStoreKeyedFastDoubleElement(HValue* elements,
+                               HValue* key,
+                               HValue* val) {
+    SetOperandAt(0, elements);
+    SetOperandAt(1, key);
+    SetOperandAt(2, val);
+    SetFlag(kChangesDoubleArrayElements);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    if (index == 1) {
+      return Representation::Integer32();
+    } else if (index == 2) {
+      return Representation::Double();
+    } else {
+      return Representation::Tagged();
+    }
+  }
+
+  HValue* elements() { return OperandAt(0); }
+  HValue* key() { return OperandAt(1); }
+  HValue* value() { return OperandAt(2); }
+
+  bool NeedsWriteBarrier() {
+    return StoringValueNeedsWriteBarrier(value());
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
 };
 
 
@@ -3348,8 +3785,8 @@
   HStoreKeyedSpecializedArrayElement(HValue* external_elements,
                                      HValue* key,
                                      HValue* val,
-                                     ExternalArrayType array_type)
-      : array_type_(array_type) {
+                                     ElementsKind elements_kind)
+      : elements_kind_(elements_kind) {
     SetFlag(kChangesSpecializedArrayElements);
     SetOperandAt(0, external_elements);
     SetOperandAt(1, key);
@@ -3362,7 +3799,10 @@
     if (index == 0) {
       return Representation::External();
     } else {
-      if (index == 2 && array_type() == kExternalFloatArray) {
+      bool float_or_double_elements =
+          elements_kind() == EXTERNAL_FLOAT_ELEMENTS ||
+          elements_kind() == EXTERNAL_DOUBLE_ELEMENTS;
+      if (index == 2 && float_or_double_elements) {
         return Representation::Double();
       } else {
         return Representation::Integer32();
@@ -3373,12 +3813,12 @@
   HValue* external_pointer() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
-  ExternalArrayType array_type() const { return array_type_; }
+  ElementsKind elements_kind() const { return elements_kind_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
-                               "store_keyed_specialized_array_element")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
+
  private:
-  ExternalArrayType array_type_;
+  ElementsKind elements_kind_;
 };
 
 
@@ -3409,17 +3849,43 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
  private:
   bool strict_mode_;
 };
 
 
-class HStringCharCodeAt: public HBinaryOperation {
+class HStringAdd: public HBinaryOperation {
  public:
-  HStringCharCodeAt(HValue* string, HValue* index)
-      : HBinaryOperation(string, index) {
+  HStringAdd(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() {
+    return HType::String();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HStringCharCodeAt: public HTemplateInstruction<3> {
+ public:
+  HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, string);
+    SetOperandAt(2, index);
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnMaps);
@@ -3427,14 +3893,16 @@
 
   virtual Representation RequiredInputRepresentation(int index) const {
     // The index is supposed to be Integer32.
-    return (index == 1) ? Representation::Integer32()
+    return index == 2
+        ? Representation::Integer32()
         : Representation::Tagged();
   }
 
-  HValue* string() { return OperandAt(0); }
-  HValue* index() { return OperandAt(1); }
+  HValue* context() { return OperandAt(0); }
+  HValue* string() { return OperandAt(1); }
+  HValue* index() { return OperandAt(2); }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3445,20 +3913,27 @@
 };
 
 
-class HStringCharFromCode: public HUnaryOperation {
+class HStringCharFromCode: public HTemplateInstruction<2> {
  public:
-  explicit HStringCharFromCode(HValue* char_code) : HUnaryOperation(char_code) {
-    set_representation(Representation::Tagged());
+  HStringCharFromCode(HValue* context, HValue* char_code) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, char_code);
+     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::Integer32();
+    return index == 0
+        ? Representation::Tagged()
+        : Representation::Integer32();
   }
 
+  HValue* context() { return OperandAt(0); }
+  HValue* value() { return OperandAt(1); }
+
   virtual bool DataEquals(HValue* other) { return true; }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string_char_from_code")
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
 };
 
 
@@ -3479,7 +3954,7 @@
     return HType::Smi();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
+  DECLARE_CONCRETE_INSTRUCTION(StringLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3507,26 +3982,30 @@
 };
 
 
-class HArrayLiteral: public HMaterializedLiteral<0> {
+class HArrayLiteral: public HMaterializedLiteral<1> {
  public:
-  HArrayLiteral(Handle<FixedArray> constant_elements,
+  HArrayLiteral(HValue* context,
+                Handle<FixedArray> constant_elements,
                 int length,
                 int literal_index,
                 int depth)
-      : HMaterializedLiteral<0>(literal_index, depth),
+      : HMaterializedLiteral<1>(literal_index, depth),
         length_(length),
-        constant_elements_(constant_elements) {}
+        constant_elements_(constant_elements) {
+    SetOperandAt(0, context);
+  }
 
+  HValue* context() { return OperandAt(0); }
   Handle<FixedArray> constant_elements() const { return constant_elements_; }
   int length() const { return length_; }
 
   bool IsCopyOnWrite() const;
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
+    return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
 
  private:
   int length_;
@@ -3560,7 +4039,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
 
  private:
   Handle<FixedArray> constant_properties_;
@@ -3569,23 +4048,28 @@
 };
 
 
-class HRegExpLiteral: public HMaterializedLiteral<0> {
+class HRegExpLiteral: public HMaterializedLiteral<1> {
  public:
-  HRegExpLiteral(Handle<String> pattern,
+  HRegExpLiteral(HValue* context,
+                 Handle<String> pattern,
                  Handle<String> flags,
                  int literal_index)
-      : HMaterializedLiteral<0>(literal_index, 0),
+      : HMaterializedLiteral<1>(literal_index, 0),
         pattern_(pattern),
-        flags_(flags) { }
+        flags_(flags) {
+    SetOperandAt(0, context);
+    SetAllSideEffects();
+  }
 
+  HValue* context() { return OperandAt(0); }
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
+    return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
 
  private:
   Handle<String> pattern_;
@@ -3593,18 +4077,23 @@
 };
 
 
-class HFunctionLiteral: public HTemplateInstruction<0> {
+class HFunctionLiteral: public HTemplateInstruction<1> {
  public:
-  HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
+  HFunctionLiteral(HValue* context,
+                   Handle<SharedFunctionInfo> shared,
+                   bool pretenure)
       : shared_info_(shared), pretenure_(pretenure) {
+    SetOperandAt(0, context);
     set_representation(Representation::Tagged());
   }
 
+  HValue* context() { return OperandAt(0); }
+
   virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
+    return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
 
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   bool pretenure() const { return pretenure_; }
@@ -3615,17 +4104,22 @@
 };
 
 
-class HTypeof: public HUnaryOperation {
+class HTypeof: public HTemplateInstruction<2> {
  public:
-  explicit HTypeof(HValue* value) : HUnaryOperation(value) {
+  explicit HTypeof(HValue* context, HValue* value) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
     set_representation(Representation::Tagged());
   }
 
+  HValue* context() { return OperandAt(0); }
+  HValue* value() { return OperandAt(1); }
+
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+  DECLARE_CONCRETE_INSTRUCTION(Typeof)
 };
 
 
@@ -3643,7 +4137,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to_fast_properties")
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
 };
 
 
@@ -3657,14 +4151,14 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf)
 };
 
 
 class HDeleteProperty: public HBinaryOperation {
  public:
-  HDeleteProperty(HValue* obj, HValue* key)
-      : HBinaryOperation(obj, key) {
+  HDeleteProperty(HValue* context, HValue* obj, HValue* key)
+      : HBinaryOperation(context, obj, key) {
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -3673,12 +4167,42 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
 
   HValue* object() { return left(); }
   HValue* key() { return right(); }
 };
 
+
+class HIn: public HTemplateInstruction<3> {
+ public:
+  HIn(HValue* context, HValue* key, HValue* object) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, key);
+    SetOperandAt(2, object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* key() { return OperandAt(1); }
+  HValue* object() { return OperandAt(2); }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() {
+    return HType::Boolean();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(In)
+};
+
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d07e6c7..c625fba 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -29,11 +29,11 @@
 #include "hydrogen.h"
 
 #include "codegen.h"
-#include "data-flow.h"
 #include "full-codegen.h"
 #include "hashmap.h"
 #include "lithium-allocator.h"
 #include "parser.h"
+#include "scopeinfo.h"
 #include "scopes.h"
 #include "stub-cache.h"
 
@@ -69,8 +69,8 @@
       last_instruction_index_(-1),
       deleted_phis_(4),
       parent_loop_header_(NULL),
-      is_inline_return_target_(false) {
-}
+      is_inline_return_target_(false),
+      is_deoptimizing_(false) { }
 
 
 void HBasicBlock::AttachLoopInformation() {
@@ -116,12 +116,13 @@
 }
 
 
-HDeoptimize* HBasicBlock::CreateDeoptimize() {
+HDeoptimize* HBasicBlock::CreateDeoptimize(
+    HDeoptimize::UseEnvironment has_uses) {
   ASSERT(HasEnvironment());
+  if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+
   HEnvironment* environment = last_environment();
-
   HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
-
   for (int i = 0; i < environment->length(); i++) {
     HValue* val = environment->values()->at(i);
     instr->AddEnvironmentValue(val);
@@ -131,16 +132,16 @@
 }
 
 
-HSimulate* HBasicBlock::CreateSimulate(int id) {
+HSimulate* HBasicBlock::CreateSimulate(int ast_id) {
   ASSERT(HasEnvironment());
   HEnvironment* environment = last_environment();
-  ASSERT(id == AstNode::kNoNumber ||
-         environment->closure()->shared()->VerifyBailoutId(id));
+  ASSERT(ast_id == AstNode::kNoNumber ||
+         environment->closure()->shared()->VerifyBailoutId(ast_id));
 
   int push_count = environment->push_count();
   int pop_count = environment->pop_count();
 
-  HSimulate* instr = new(zone()) HSimulate(id, pop_count);
+  HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count);
   for (int i = push_count - 1; i >= 0; --i) {
     instr->AddPushedValue(environment->ExpressionStackAt(i));
   }
@@ -157,23 +158,19 @@
   ASSERT(!IsFinished());
   AddInstruction(end);
   end_ = end;
-  if (end->FirstSuccessor() != NULL) {
-    end->FirstSuccessor()->RegisterPredecessor(this);
-    if (end->SecondSuccessor() != NULL) {
-      end->SecondSuccessor()->RegisterPredecessor(this);
-    }
+  for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+    it.Current()->RegisterPredecessor(this);
   }
 }
 
 
-void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
+void HBasicBlock::Goto(HBasicBlock* block) {
   if (block->IsInlineReturnTarget()) {
     AddInstruction(new(zone()) HLeaveInlined);
     last_environment_ = last_environment()->outer();
   }
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(block);
-  instr->set_include_stack_check(include_stack_check);
   Finish(instr);
 }
 
@@ -197,7 +194,7 @@
 }
 
 
-void HBasicBlock::SetJoinId(int id) {
+void HBasicBlock::SetJoinId(int ast_id) {
   int length = predecessors_.length();
   ASSERT(length > 0);
   for (int i = 0; i < length; i++) {
@@ -207,8 +204,8 @@
     // We only need to verify the ID once.
     ASSERT(i != 0 ||
            predecessor->last_environment()->closure()->shared()
-               ->VerifyBailoutId(id));
-    simulate->set_ast_id(id);
+               ->VerifyBailoutId(ast_id));
+    simulate->set_ast_id(ast_id);
   }
 }
 
@@ -223,6 +220,17 @@
 }
 
 
+int HBasicBlock::LoopNestingDepth() const {
+  const HBasicBlock* current = this;
+  int result  = (current->IsLoopHeader()) ? 1 : 0;
+  while (current->parent_loop_header() != NULL) {
+    current = current->parent_loop_header();
+    result++;
+  }
+  return result;
+}
+
+
 void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
   ASSERT(IsLoopHeader());
 
@@ -242,7 +250,7 @@
 
 
 void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
-  if (!predecessors_.is_empty()) {
+  if (HasPredecessor()) {
     // Only loop header blocks can have a predecessor added after
     // instructions have been added to the block (they have phis for all
     // values in the environment, these phis may be eliminated later).
@@ -401,8 +409,9 @@
   void Analyze() {
     while (!stack_.is_empty()) {
       HControlInstruction* end = stack_.RemoveLast()->end();
-      PushBlock(end->FirstSuccessor());
-      PushBlock(end->SecondSuccessor());
+      for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+        PushBlock(it.Current());
+      }
     }
   }
 
@@ -522,6 +531,28 @@
 }
 
 
+HConstant* HGraph::GetConstantHole() {
+  return GetConstant(&constant_hole_, isolate()->heap()->the_hole_value());
+}
+
+
+HGraphBuilder::HGraphBuilder(CompilationInfo* info,
+                             TypeFeedbackOracle* oracle)
+    : function_state_(NULL),
+      initial_function_state_(this, info, oracle),
+      ast_context_(NULL),
+      break_scope_(NULL),
+      graph_(NULL),
+      current_block_(NULL),
+      inlined_count_(0),
+      zone_(info->isolate()->zone()),
+      inline_bailout_(false) {
+  // This is not initialized in the initializer list because the
+  // constructor for the initial state relies on function_state_ == NULL
+  // to know it's the initial state.
+  function_state_= &initial_function_state_;
+}
+
 HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
                                        HBasicBlock* second,
                                        int join_id) {
@@ -556,7 +587,7 @@
                                        HBasicBlock* body_exit,
                                        HBasicBlock* loop_successor,
                                        HBasicBlock* break_block) {
-  if (body_exit != NULL) body_exit->Goto(loop_entry, true);
+  if (body_exit != NULL) body_exit->Goto(loop_entry);
   loop_entry->PostProcessLoopHeader(statement);
   if (break_block != NULL) {
     if (loop_successor != NULL) loop_successor->Goto(break_block);
@@ -618,8 +649,7 @@
       PrintF("Crankshaft Compiler - ");
     }
     CodeGenerator::MakeCodePrologue(info);
-    Code::Flags flags =
-        Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP);
+    Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
     Handle<Code> code =
         CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
     generator.FinishCode(code);
@@ -644,7 +674,7 @@
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
       HValue* value = instr->Canonicalize();
-      if (value != instr) instr->ReplaceAndDelete(value);
+      if (value != instr) instr->DeleteAndReplaceWith(value);
       instr = instr->next();
     }
   }
@@ -675,8 +705,9 @@
                                  HBasicBlock* loop_header) {
   for (int i = 0; i < loop->blocks()->length(); ++i) {
     HBasicBlock* b = loop->blocks()->at(i);
-    Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
-    Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
+    for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
+      Postorder(it.Current(), visited, order, loop_header);
+    }
     if (b->IsLoopHeader() && b != loop->loop_header()) {
       PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
     }
@@ -693,11 +724,13 @@
   visited->Add(block->block_id());
   if (block->IsLoopHeader()) {
     PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
-    Postorder(block->end()->SecondSuccessor(), visited, order, block);
-    Postorder(block->end()->FirstSuccessor(), visited, order, block);
+    for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+      Postorder(it.Current(), visited, order, block);
+    }
   } else {
-    Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
-    Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
+    for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+      Postorder(it.Current(), visited, order, loop_header);
+    }
   }
   ASSERT(block->end()->FirstSuccessor() == NULL ||
          order->Contains(block->end()->FirstSuccessor()) ||
@@ -713,6 +746,8 @@
   HPhase phase("Assign dominators", this);
   for (int i = 0; i < blocks_.length(); ++i) {
     if (blocks_[i]->IsLoopHeader()) {
+      // Only the first predecessor of a loop header is from outside the loop.
+      // All others are back edges, and thus cannot dominate the loop header.
       blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
     } else {
       for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
@@ -722,13 +757,27 @@
   }
 }
 
+// Mark all blocks that are dominated by an unconditional soft deoptimize to
+// prevent code motion across those blocks.
+void HGraph::PropagateDeoptimizingMark() {
+  HPhase phase("Propagate deoptimizing mark", this);
+  MarkAsDeoptimizingRecursively(entry_block());
+}
+
+void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
+  for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+    HBasicBlock* dominated = block->dominated_blocks()->at(i);
+    if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
+    MarkAsDeoptimizingRecursively(dominated);
+  }
+}
 
 void HGraph::EliminateRedundantPhis() {
   HPhase phase("Redundant phi elimination", this);
 
-  // Worklist of phis that can potentially be eliminated. Initialized
-  // with all phi nodes. When elimination of a phi node modifies
-  // another phi node the modified phi node is added to the worklist.
+  // Worklist of phis that can potentially be eliminated. Initialized with
+  // all phi nodes. When elimination of a phi node modifies another phi node
+  // the modified phi node is added to the worklist.
   ZoneList<HPhi*> worklist(blocks_.length());
   for (int i = 0; i < blocks_.length(); ++i) {
     worklist.AddAll(*blocks_[i]->phis());
@@ -742,18 +791,14 @@
     if (block == NULL) continue;
 
     // Get replacement value if phi is redundant.
-    HValue* value = phi->GetRedundantReplacement();
+    HValue* replacement = phi->GetRedundantReplacement();
 
-    if (value != NULL) {
-      // Iterate through uses finding the ones that should be
-      // replaced.
-      SmallPointerList<HValue>* uses = phi->uses();
-      while (!uses->is_empty()) {
-        HValue* use = uses->RemoveLast();
-        if (use != NULL) {
-          phi->ReplaceAtUse(use, value);
-          if (use->IsPhi()) worklist.Add(HPhi::cast(use));
-        }
+    if (replacement != NULL) {
+      // Iterate through the uses and replace them all.
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* value = it.value();
+        value->SetOperandAt(it.index(), replacement);
+        if (value->IsPhi()) worklist.Add(HPhi::cast(value));
       }
       block->RemovePhi(phi);
     }
@@ -805,6 +850,19 @@
 }
 
 
+bool HGraph::CheckPhis() {
+  int block_count = blocks_.length();
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      // We don't support phi uses of arguments for now.
+      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+    }
+  }
+  return true;
+}
+
+
 bool HGraph::CollectPhis() {
   int block_count = blocks_.length();
   phi_list_ = new ZoneList<HPhi*>(block_count);
@@ -812,8 +870,10 @@
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
       phi_list_->Add(phi);
-      // We don't support phi uses of arguments for now.
-      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+      // Check for the hole value (from an uninitialized const).
+      for (int k = 0; k < phi->OperandCount(); k++) {
+        if (phi->OperandAt(k) == GetConstantHole()) return false;
+      }
     }
   }
   return true;
@@ -831,8 +891,8 @@
     HValue* current = worklist->RemoveLast();
     in_worklist.Remove(current->id());
     if (current->UpdateInferredType()) {
-      for (int j = 0; j < current->uses()->length(); j++) {
-        HValue* use = current->uses()->at(j);
+      for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (!in_worklist.Contains(use->id())) {
           in_worklist.Add(use->id());
           worklist->Add(use);
@@ -852,9 +912,8 @@
  private:
   void TraceRange(const char* msg, ...);
   void Analyze(HBasicBlock* block);
-  void InferControlFlowRange(HTest* test, HBasicBlock* dest);
-  void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
-  void InferPhiRange(HPhi* phi);
+  void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
+  void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
   void InferRange(HValue* value);
   void RollBackTo(int index);
   void AddRange(HValue* value, Range* range);
@@ -876,7 +935,7 @@
 
 void HRangeAnalysis::Analyze() {
   HPhase phase("Range analysis", graph_);
-  Analyze(graph_->blocks()->at(0));
+  Analyze(graph_->entry_block());
 }
 
 
@@ -888,15 +947,15 @@
   // Infer range based on control flow.
   if (block->predecessors()->length() == 1) {
     HBasicBlock* pred = block->predecessors()->first();
-    if (pred->end()->IsTest()) {
-      InferControlFlowRange(HTest::cast(pred->end()), block);
+    if (pred->end()->IsCompareIDAndBranch()) {
+      InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
     }
   }
 
   // Process phi instructions.
   for (int i = 0; i < block->phis()->length(); ++i) {
     HPhi* phi = block->phis()->at(i);
-    InferPhiRange(phi);
+    InferRange(phi);
   }
 
   // Go through all instructions of the current block.
@@ -915,28 +974,26 @@
 }
 
 
-void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
+void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
+                                           HBasicBlock* dest) {
   ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
-  if (test->value()->IsCompare()) {
-    HCompare* compare = HCompare::cast(test->value());
-    if (compare->GetInputRepresentation().IsInteger32()) {
-      Token::Value op = compare->token();
-      if (test->SecondSuccessor() == dest) {
-        op = Token::NegateCompareOp(op);
-      }
-      Token::Value inverted_op = Token::InvertCompareOp(op);
-      InferControlFlowRange(op, compare->left(), compare->right());
-      InferControlFlowRange(inverted_op, compare->right(), compare->left());
+  if (test->GetInputRepresentation().IsInteger32()) {
+    Token::Value op = test->token();
+    if (test->SecondSuccessor() == dest) {
+      op = Token::NegateCompareOp(op);
     }
+    Token::Value inverted_op = Token::InvertCompareOp(op);
+    UpdateControlFlowRange(op, test->left(), test->right());
+    UpdateControlFlowRange(inverted_op, test->right(), test->left());
   }
 }
 
 
 // We know that value [op] other. Use this information to update the range on
 // value.
-void HRangeAnalysis::InferControlFlowRange(Token::Value op,
-                                           HValue* value,
-                                           HValue* other) {
+void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
+                                            HValue* value,
+                                            HValue* other) {
   Range temp_range;
   Range* range = other->range() != NULL ? other->range() : &temp_range;
   Range* new_range = NULL;
@@ -967,12 +1024,6 @@
 }
 
 
-void HRangeAnalysis::InferPhiRange(HPhi* phi) {
-  // TODO(twuerthinger): Infer loop phi ranges.
-  InferRange(phi);
-}
-
-
 void HRangeAnalysis::InferRange(HValue* value) {
   ASSERT(!value->HasRange());
   if (!value->representation().IsNone()) {
@@ -1025,13 +1076,13 @@
 }
 
 
-HValueMap::HValueMap(const HValueMap* other)
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
     : array_size_(other->array_size_),
       lists_size_(other->lists_size_),
       count_(other->count_),
       present_flags_(other->present_flags_),
-      array_(ZONE->NewArray<HValueMapListElement>(other->array_size_)),
-      lists_(ZONE->NewArray<HValueMapListElement>(other->lists_size_)),
+      array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+      lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
       free_list_head_(other->free_list_head_) {
   memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
   memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
@@ -1199,8 +1250,6 @@
   void Process();
 
  private:
-  void RemoveStackCheck(HBasicBlock* block);
-
   HGraph* graph_;
 };
 
@@ -1215,16 +1264,20 @@
     if (block->IsLoopHeader()) {
       HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
       HBasicBlock* dominator = back_edge;
-      bool back_edge_dominated_by_call = false;
-      while (dominator != block && !back_edge_dominated_by_call) {
+      while (true) {
         HInstruction* instr = dominator->first();
-        while (instr != NULL && !back_edge_dominated_by_call) {
+        while (instr != NULL) {
           if (instr->IsCall()) {
-            RemoveStackCheck(back_edge);
-            back_edge_dominated_by_call = true;
+            block->loop_information()->stack_check()->Eliminate();
+            break;
           }
           instr = instr->next();
         }
+
+        // Done when the loop header is processed.
+        if (dominator == block) break;
+
+        // Move up the dominator tree.
         dominator = dominator->dominator();
       }
     }
@@ -1232,16 +1285,44 @@
 }
 
 
-void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
-  HInstruction* instr = block->first();
-  while (instr != NULL) {
-    if (instr->IsGoto()) {
-      HGoto::cast(instr)->set_include_stack_check(false);
-      return;
-    }
-    instr = instr->next();
+// Simple sparse set with O(1) add, contains, and clear.
+class SparseSet {
+ public:
+  SparseSet(Zone* zone, int capacity)
+      : capacity_(capacity),
+        length_(0),
+        dense_(zone->NewArray<int>(capacity)),
+        sparse_(zone->NewArray<int>(capacity)) {
+#ifndef NVALGRIND
+    // Initialize the sparse array to make valgrind happy.
+    memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
+#endif
   }
-}
+
+  bool Contains(int n) const {
+    ASSERT(0 <= n && n < capacity_);
+    int d = sparse_[n];
+    return 0 <= d && d < length_ && dense_[d] == n;
+  }
+
+  bool Add(int n) {
+    if (Contains(n)) return false;
+    dense_[length_] = n;
+    sparse_[n] = length_;
+    ++length_;
+    return true;
+  }
+
+  void Clear() { length_ = 0; }
+
+ private:
+  int capacity_;
+  int length_;
+  int* dense_;
+  int* sparse_;
+
+  DISALLOW_COPY_AND_ASSIGN(SparseSet);
+};
 
 
 class HGlobalValueNumberer BASE_EMBEDDED {
@@ -1249,8 +1330,9 @@
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
-        block_side_effects_(graph_->blocks()->length()),
-        loop_side_effects_(graph_->blocks()->length()) {
+        block_side_effects_(graph->blocks()->length()),
+        loop_side_effects_(graph->blocks()->length()),
+        visited_on_paths_(graph->zone(), graph->blocks()->length()) {
     ASSERT(info->isolate()->heap()->allow_allocation(false));
     block_side_effects_.AddBlock(0, graph_->blocks()->length());
     loop_side_effects_.AddBlock(0, graph_->blocks()->length());
@@ -1262,6 +1344,8 @@
   void Analyze();
 
  private:
+  int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
+                                                HBasicBlock* dominated);
   void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
   void ComputeBlockSideEffects();
   void LoopInvariantCodeMotion();
@@ -1283,6 +1367,10 @@
 
   // A map of loop header block IDs to their loop's side effects.
   ZoneList<int> loop_side_effects_;
+
+  // Used when collecting side effects on paths from dominator to
+  // dominated.
+  SparseSet visited_on_paths_;
 };
 
 
@@ -1292,7 +1380,7 @@
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
-  AnalyzeBlock(graph_->blocks()->at(0), map);
+  AnalyzeBlock(graph_->entry_block(), map);
 }
 
 
@@ -1304,7 +1392,7 @@
     int id = block->block_id();
     int side_effects = 0;
     while (instr != NULL) {
-      side_effects |= (instr->flags() & HValue::ChangesFlagsMask());
+      side_effects |= instr->ChangesFlags();
       instr = instr->next();
     }
     block_side_effects_[id] |= side_effects;
@@ -1384,42 +1472,36 @@
 
 bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
                                       HBasicBlock* loop_header) {
-  // If we've disabled code motion, don't move any instructions.
-  if (!AllowCodeMotion()) return false;
+  // If we've disabled code motion or we're in a block that unconditionally
+  // deoptimizes, don't move any instructions.
+  return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
+}
 
-  // If --aggressive-loop-invariant-motion, move everything except change
-  // instructions.
-  if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
-    return true;
-  }
 
-  // Otherwise only move instructions that postdominate the loop header
-  // (i.e. are always executed inside the loop). This is to avoid
-  // unnecessary deoptimizations assuming the loop is executed at least
-  // once.  TODO(fschneider): Better type feedback should give us
-  // information about code that was never executed.
-  HBasicBlock* block = instr->block();
-  bool result = true;
-  if (block != loop_header) {
-    for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
-      bool found = false;
-      HBasicBlock* pred = loop_header->predecessors()->at(i);
-      while (pred != loop_header) {
-        if (pred == block) found = true;
-        pred = pred->dominator();
+int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+    HBasicBlock* dominator, HBasicBlock* dominated) {
+  int side_effects = 0;
+  for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+    HBasicBlock* block = dominated->predecessors()->at(i);
+    if (dominator->block_id() < block->block_id() &&
+        block->block_id() < dominated->block_id() &&
+        visited_on_paths_.Add(block->block_id())) {
+      side_effects |= block_side_effects_[block->block_id()];
+      if (block->IsLoopHeader()) {
+        side_effects |= loop_side_effects_[block->block_id()];
       }
-      if (!found) {
-        result = false;
-        break;
-      }
+      side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
+          dominator, block);
     }
   }
-  return result;
+  return side_effects;
 }
 
 
 void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
-  TraceGVN("Analyzing block B%d\n", block->block_id());
+  TraceGVN("Analyzing block B%d%s\n",
+           block->block_id(),
+           block->IsLoopHeader() ? " (loop header)" : "");
 
   // If this is a loop header kill everything killed by the loop.
   if (block->IsLoopHeader()) {
@@ -1430,7 +1512,7 @@
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
-    int flags = (instr->flags() & HValue::ChangesFlagsMask());
+    int flags = instr->ChangesFlags();
     if (flags != 0) {
       ASSERT(!instr->CheckFlag(HValue::kUseGVN));
       // Clear all instructions in the map that are affected by side effects.
@@ -1445,7 +1527,7 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
-        instr->ReplaceAndDelete(other);
+        instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
       }
@@ -1460,23 +1542,18 @@
     // No need to copy the map for the last child in the dominator tree.
     HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
 
-    // If the dominated block is not a successor to this block we have to
-    // kill everything killed on any path between this block and the
-    // dominated block.  Note we rely on the block ordering.
-    bool is_successor = false;
-    int predecessor_count = dominated->predecessors()->length();
-    for (int j = 0; !is_successor && j < predecessor_count; ++j) {
-      is_successor = (dominated->predecessors()->at(j) == block);
+    // Kill everything killed on any path between this block and the
+    // dominated block.
+    // We don't have to traverse these paths if the value map is
+    // already empty.
+    // If the range of block ids (block_id, dominated_id) is empty
+    // there are no such paths.
+    if (!successor_map->IsEmpty() &&
+        block->block_id() + 1 < dominated->block_id()) {
+      visited_on_paths_.Clear();
+      successor_map->Kill(CollectSideEffectsOnPathsToDominatedBlock(block,
+                                                                    dominated));
     }
-
-    if (!is_successor) {
-      int side_effects = 0;
-      for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
-        side_effects |= block_side_effects_[j];
-      }
-      successor_map->Kill(side_effects);
-    }
-
     AnalyzeBlock(dominated, successor_map);
   }
 }
@@ -1529,12 +1606,12 @@
 }
 
 
-void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    AddToWorklist(current->uses()->at(i));
+void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    AddToWorklist(it.value());
   }
-  for (int i = 0; i < current->OperandCount(); ++i) {
-    AddToWorklist(current->OperandAt(i));
+  for (int i = 0; i < value->OperandCount(); ++i) {
+    AddToWorklist(value->OperandAt(i));
   }
 }
 
@@ -1543,37 +1620,30 @@
 // given as the parameter has a benefit in terms of less necessary type
 // conversions. If there is a benefit, then the representation of the value is
 // specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* current) {
-  Representation r = current->representation();
-  if (r.IsSpecialization() || current->HasNoUses()) return;
-  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
-  Representation new_rep = TryChange(current);
+void HInferRepresentation::InferBasedOnUses(HValue* value) {
+  Representation r = value->representation();
+  if (r.IsSpecialization() || value->HasNoUses()) return;
+  ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation new_rep = TryChange(value);
   if (!new_rep.IsNone()) {
-    if (!current->representation().Equals(new_rep)) {
-      current->ChangeRepresentation(new_rep);
-      AddDependantsToWorklist(current);
+    if (!value->representation().Equals(new_rep)) {
+      value->ChangeRepresentation(new_rep);
+      AddDependantsToWorklist(value);
     }
   }
 }
 
 
-Representation HInferRepresentation::TryChange(HValue* current) {
+Representation HInferRepresentation::TryChange(HValue* value) {
   // Array of use counts for each representation.
-  int use_count[Representation::kNumRepresentations];
-  for (int i = 0; i < Representation::kNumRepresentations; i++) {
-    use_count[i] = 0;
-  }
+  int use_count[Representation::kNumRepresentations] = { 0 };
 
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    HValue* use = current->uses()->at(i);
-    int index = use->LookupOperandIndex(0, current);
-    Representation req_rep = use->RequiredInputRepresentation(index);
-    if (req_rep.IsNone()) continue;
-    if (use->IsPhi()) {
-      HPhi* phi = HPhi::cast(use);
-      phi->AddIndirectUsesTo(&use_count[0]);
-    }
-    use_count[req_rep.kind()]++;
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    Representation rep = use->RequiredInputRepresentation(it.index());
+    if (rep.IsNone()) continue;
+    if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
+    ++use_count[rep.kind()];
   }
   int tagged_count = use_count[Representation::kTagged];
   int double_count = use_count[Representation::kDouble];
@@ -1581,20 +1651,20 @@
   int non_tagged_count = double_count + int32_count;
 
   // If a non-loop phi has tagged uses, don't convert it to untagged.
-  if (current->IsPhi() && !current->block()->IsLoopHeader()) {
-    if (tagged_count > 0) return Representation::None();
+  if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
+    return Representation::None();
   }
 
-  if (non_tagged_count >= tagged_count) {
-    // More untagged than tagged.
-    if (double_count > 0) {
-      // There is at least one usage that is a double => guess that the
-      // correct representation is double.
-      return Representation::Double();
-    } else if (int32_count > 0) {
-      return Representation::Integer32();
-    }
+  // Prefer unboxing over boxing, the latter is more expensive.
+  if (tagged_count > non_tagged_count) Representation::None();
+
+  // Prefer Integer32 over Double, if possible.
+  if (int32_count > 0 && value->IsConvertibleToInteger()) {
+    return Representation::Integer32();
   }
+
+  if (double_count > 0) return Representation::Double();
+
   return Representation::None();
 }
 
@@ -1602,53 +1672,58 @@
 void HInferRepresentation::Analyze() {
   HPhase phase("Infer representations", graph_);
 
-  // (1) Initialize bit vectors and count real uses. Each phi
-  // gets a bit-vector of length <number of phis>.
+  // (1) Initialize bit vectors and count real uses. Each phi gets a
+  // bit-vector of length <number of phis>.
   const ZoneList<HPhi*>* phi_list = graph_->phi_list();
-  int num_phis = phi_list->length();
-  ScopedVector<BitVector*> connected_phis(num_phis);
-  for (int i = 0; i < num_phis; i++) {
+  int phi_count = phi_list->length();
+  ZoneList<BitVector*> connected_phis(phi_count);
+  for (int i = 0; i < phi_count; ++i) {
     phi_list->at(i)->InitRealUses(i);
-    connected_phis[i] = new(zone()) BitVector(num_phis);
-    connected_phis[i]->Add(i);
+    BitVector* connected_set = new(zone()) BitVector(phi_count);
+    connected_set->Add(i);
+    connected_phis.Add(connected_set);
   }
 
-  // (2) Do a fixed point iteration to find the set of connected phis.
-  // A phi is connected to another phi if its value is used either
-  // directly or indirectly through a transitive closure of the def-use
-  // relation.
+  // (2) Do a fixed point iteration to find the set of connected phis.  A
+  // phi is connected to another phi if its value is used either directly or
+  // indirectly through a transitive closure of the def-use relation.
   bool change = true;
   while (change) {
     change = false;
-    for (int i = 0; i < num_phis; i++) {
+    // We normally have far more "forward edges" than "backward edges",
+    // so we terminate faster when we walk backwards.
+    for (int i = phi_count - 1; i >= 0; --i) {
       HPhi* phi = phi_list->at(i);
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (use->IsPhi()) {
-          int phi_use = HPhi::cast(use)->phi_id();
-          if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
+          int id = HPhi::cast(use)->phi_id();
+          if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
             change = true;
-          }
         }
       }
     }
   }
 
-  // (3) Sum up the non-phi use counts of all connected phis.
-  // Don't include the non-phi uses of the phi itself.
-  for (int i = 0; i < num_phis; i++) {
+  // (3) Use the phi reachability information from step 2 to
+  //     (a) sum up the non-phi use counts of all connected phis.
+  //     (b) push information about values which can't be converted to integer
+  //         without deoptimization through the phi use-def chains, avoiding
+  //         unnecessary deoptimizations later.
+  for (int i = 0; i < phi_count; ++i) {
     HPhi* phi = phi_list->at(i);
+    bool cti = phi->AllOperandsConvertibleToInteger();
     for (BitVector::Iterator it(connected_phis.at(i));
          !it.Done();
          it.Advance()) {
       int index = it.Current();
-      if (index != i) {
-        HPhi* it_use = phi_list->at(it.Current());
-        phi->AddNonPhiUsesFrom(it_use);
-      }
+      HPhi* it_use = phi_list->at(it.Current());
+      if (index != i) phi->AddNonPhiUsesFrom(it_use);  // Don't count twice!
+      if (!cti) it_use->set_is_convertible_to_integer(false);
     }
   }
 
+  // Initialize work list
   for (int i = 0; i < graph_->blocks()->length(); ++i) {
     HBasicBlock* block = graph_->blocks()->at(i);
     const ZoneList<HPhi*>* phis = block->phis();
@@ -1663,6 +1738,7 @@
     }
   }
 
+  // Do a fixed point iteration, trying to improve representations
   while (!worklist_.is_empty()) {
     HValue* current = worklist_.RemoveLast();
     in_worklist_.Remove(current->id());
@@ -1746,17 +1822,16 @@
 
 
 void HGraph::InsertRepresentationChangeForUse(HValue* value,
-                                              HValue* use,
+                                              HValue* use_value,
+                                              int use_index,
                                               Representation to) {
   // Insert the representation change right before its use. For phi-uses we
   // insert at the end of the corresponding predecessor.
   HInstruction* next = NULL;
-  if (use->IsPhi()) {
-    int index = 0;
-    while (use->OperandAt(index) != value) ++index;
-    next = use->block()->predecessors()->at(index)->end();
+  if (use_value->IsPhi()) {
+    next = use_value->block()->predecessors()->at(use_index)->end();
   } else {
-    next = HInstruction::cast(use);
+    next = HInstruction::cast(use_value);
   }
 
   // For constants we try to make the representation change at compile
@@ -1764,8 +1839,9 @@
   // information we treat constants like normal instructions and insert the
   // change instructions for them.
   HInstruction* new_value = NULL;
-  bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
-  bool deoptimize_on_undefined = use->CheckFlag(HValue::kDeoptimizeOnUndefined);
+  bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
+  bool deoptimize_on_undefined =
+      use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
   if (value->IsConstant()) {
     HConstant* constant = HConstant::cast(value);
     // Try to create a new copy of the constant with the new representation.
@@ -1780,89 +1856,32 @@
   }
 
   new_value->InsertBefore(next);
-  value->ReplaceFirstAtUse(use, new_value, to);
+  use_value->SetOperandAt(use_index, new_value);
 }
 
 
-int CompareConversionUses(HValue* a,
-                          HValue* b,
-                          Representation a_rep,
-                          Representation b_rep) {
-  if (a_rep.kind() > b_rep.kind()) {
-    // Make sure specializations are separated in the result array.
-    return 1;
-  }
-  // Put truncating conversions before non-truncating conversions.
-  bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
-  bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
-  if (a_truncate != b_truncate) {
-    return a_truncate ? -1 : 1;
-  }
-  // Sort by increasing block ID.
-  return a->block()->block_id() - b->block()->block_id();
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(
-    HValue* current,
-    ZoneList<HValue*>* to_convert,
-    ZoneList<Representation>* to_convert_reps) {
-  Representation r = current->representation();
+void HGraph::InsertRepresentationChangesForValue(HValue* value) {
+  Representation r = value->representation();
   if (r.IsNone()) return;
-  if (current->uses()->length() == 0) return;
+  if (value->HasNoUses()) return;
 
-  // Collect the representation changes in a sorted list.  This allows
-  // us to avoid duplicate changes without searching the list.
-  ASSERT(to_convert->is_empty());
-  ASSERT(to_convert_reps->is_empty());
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    HValue* use = current->uses()->at(i);
-    // The occurrences index means the index within the operand array of "use"
-    // at which "current" is used. While iterating through the use array we
-    // also have to iterate over the different occurrence indices.
-    int occurrence_index = 0;
-    if (use->UsesMultipleTimes(current)) {
-      occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
-      if (FLAG_trace_representation) {
-        PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
-               current->id(),
-               use->id(),
-               occurrence_index);
-      }
-    }
-    int operand_index = use->LookupOperandIndex(occurrence_index, current);
-    Representation req = use->RequiredInputRepresentation(operand_index);
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use_value = it.value();
+    int use_index = it.index();
+    Representation req = use_value->RequiredInputRepresentation(use_index);
     if (req.IsNone() || req.Equals(r)) continue;
-    int index = 0;
-    while (index < to_convert->length() &&
-           CompareConversionUses(to_convert->at(index),
-                                 use,
-                                 to_convert_reps->at(index),
-                                 req) < 0) {
-      ++index;
-    }
-    if (FLAG_trace_representation) {
-      PrintF("Inserting a representation change to %s of %d for use at %d\n",
-             req.Mnemonic(),
-             current->id(),
-             use->id());
-    }
-    to_convert->InsertAt(index, use);
-    to_convert_reps->InsertAt(index, req);
+    InsertRepresentationChangeForUse(value, use_value, use_index, req);
+  }
+  if (value->HasNoUses()) {
+    ASSERT(value->IsConstant());
+    value->DeleteAndReplaceWith(NULL);
   }
 
-  for (int i = 0; i < to_convert->length(); ++i) {
-    HValue* use = to_convert->at(i);
-    Representation r_to = to_convert_reps->at(i);
-    InsertRepresentationChangeForUse(current, use, r_to);
+  // The only purpose of a HForceRepresentation is to represent the value
+  // after the (possible) HChange instruction.  We make it disappear.
+  if (value->IsForceRepresentation()) {
+    value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
   }
-
-  if (current->uses()->is_empty()) {
-    ASSERT(current->IsConstant());
-    current->Delete();
-  }
-  to_convert->Rewind(0);
-  to_convert_reps->Rewind(0);
 }
 
 
@@ -1887,8 +1906,8 @@
     for (int i = 0; i < phi_list()->length(); i++) {
       HPhi* phi = phi_list()->at(i);
       if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
           phi->ClearFlag(HValue::kTruncatingToInt32);
           change = true;
@@ -1898,19 +1917,17 @@
     }
   }
 
-  ZoneList<HValue*> value_list(4);
-  ZoneList<Representation> rep_list(4);
   for (int i = 0; i < blocks_.length(); ++i) {
     // Process phi instructions first.
-    for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
-      HPhi* phi = blocks_[i]->phis()->at(j);
-      InsertRepresentationChangesForValue(phi, &value_list, &rep_list);
+    const ZoneList<HPhi*>* phis = blocks_[i]->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      InsertRepresentationChangesForValue(phis->at(j));
     }
 
     // Process normal instructions.
     HInstruction* current = blocks_[i]->first();
     while (current != NULL) {
-      InsertRepresentationChangesForValue(current, &value_list, &rep_list);
+      InsertRepresentationChangesForValue(current);
       current = current->next();
     }
   }
@@ -1933,16 +1950,15 @@
   HPhase phase("MarkDeoptimizeOnUndefined", this);
   // Compute DeoptimizeOnUndefined flag for phis.
   // Any phi that can reach a use with DeoptimizeOnUndefined set must
-  // have DeoptimizeOnUndefined set.  Currently only HCompare, with
+  // have DeoptimizeOnUndefined set.  Currently only HCompareIDAndBranch, with
   // double input representation, has this flag set.
   // The flag is used by HChange tagged->double, which must deoptimize
   // if one of its uses has this flag set.
   for (int i = 0; i < phi_list()->length(); i++) {
     HPhi* phi = phi_list()->at(i);
     if (phi->representation().IsDouble()) {
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
-        if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
           RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
           break;
         }
@@ -1995,9 +2011,10 @@
       HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
       if_true->MarkAsInlineReturnTarget();
       if_false->MarkAsInlineReturnTarget();
+      Expression* cond = TestContext::cast(owner->ast_context())->condition();
       // The AstContext constructor pushed on the context stack.  This newed
       // instance is the reason that AstContext can't be BASE_EMBEDDED.
-      test_context_ = new TestContext(owner, if_true, if_false);
+      test_context_ = new TestContext(owner, cond, if_true, if_false);
     } else {
       function_return_ = owner->graph()->CreateBasicBlock();
       function_return()->MarkAsInlineReturnTarget();
@@ -2058,6 +2075,9 @@
 void ValueContext::ReturnValue(HValue* value) {
   // The value is tracked in the bailout environment, and communicated
   // through the environment as the result of the expression.
+  if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
+    owner()->Bailout("bad value context for arguments value");
+  }
   owner()->Push(value);
 }
 
@@ -2068,19 +2088,57 @@
 
 
 void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
   if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
+void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+  ASSERT(!instr->HasSideEffects());
+  HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, empty_true);
+  instr->SetSuccessorAt(1, empty_false);
+  owner()->current_block()->Finish(instr);
+  HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
+  owner()->set_current_block(join);
+}
+
+
 void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  ASSERT(!instr->IsControlInstruction());
+  if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+    return owner()->Bailout("bad value context for arguments object value");
+  }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
   if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
+void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+  ASSERT(!instr->HasSideEffects());
+  if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+    return owner()->Bailout("bad value context for arguments object value");
+  }
+  HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, materialize_true);
+  instr->SetSuccessorAt(1, materialize_false);
+  owner()->current_block()->Finish(instr);
+  owner()->set_current_block(materialize_true);
+  owner()->Push(owner()->graph()->GetConstantTrue());
+  owner()->set_current_block(materialize_false);
+  owner()->Push(owner()->graph()->GetConstantFalse());
+  HBasicBlock* join =
+    owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+  owner()->set_current_block(join);
+}
+
+
 void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  ASSERT(!instr->IsControlInstruction());
   HGraphBuilder* builder = owner();
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
@@ -2094,61 +2152,60 @@
 }
 
 
+void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+  ASSERT(!instr->HasSideEffects());
+  HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, empty_true);
+  instr->SetSuccessorAt(1, empty_false);
+  owner()->current_block()->Finish(instr);
+  empty_true->Goto(if_true());
+  empty_false->Goto(if_false());
+  owner()->set_current_block(NULL);
+}
+
+
 void TestContext::BuildBranch(HValue* value) {
   // We expect the graph to be in edge-split form: there is no edge that
   // connects a branch node to a join node.  We conservatively ensure that
   // property by always adding an empty block on the outgoing edges of this
   // branch.
   HGraphBuilder* builder = owner();
+  if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
+    builder->Bailout("arguments object value in a test context");
+  }
   HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
-  HTest* test = new(zone()) HTest(value, empty_true, empty_false);
+  unsigned test_id = condition()->test_id();
+  ToBooleanStub::Types expected(builder->oracle()->ToBooleanTypes(test_id));
+  HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
   builder->current_block()->Finish(test);
 
-  empty_true->Goto(if_true(), false);
-  empty_false->Goto(if_false(), false);
+  empty_true->Goto(if_true());
+  empty_false->Goto(if_false());
   builder->set_current_block(NULL);
 }
 
 
 // HGraphBuilder infrastructure for bailing out and checking bailouts.
-#define BAILOUT(reason)                         \
+#define CHECK_BAILOUT(call)                     \
   do {                                          \
-    Bailout(reason);                            \
-    return;                                     \
-  } while (false)
-
-
-#define CHECK_BAILOUT                           \
-  do {                                          \
+    call;                                       \
     if (HasStackOverflow()) return;             \
   } while (false)
 
 
-#define VISIT_FOR_EFFECT(expr)                  \
-  do {                                          \
-    VisitForEffect(expr);                       \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_VALUE(expr)                   \
-  do {                                          \
-    VisitForValue(expr);                        \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_CONTROL(expr, true_block, false_block)        \
+#define CHECK_ALIVE(call)                                       \
   do {                                                          \
-    VisitForControl(expr, true_block, false_block);             \
-    if (HasStackOverflow()) return;                             \
+    call;                                                       \
+    if (HasStackOverflow() || current_block() == NULL) return;  \
   } while (false)
 
 
 void HGraphBuilder::Bailout(const char* reason) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason);
   }
   SetStackOverflow();
@@ -2161,14 +2218,14 @@
 }
 
 
-void HGraphBuilder::VisitForValue(Expression* expr) {
-  ValueContext for_value(this);
+void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+  ValueContext for_value(this, flag);
   Visit(expr);
 }
 
 
 void HGraphBuilder::VisitForTypeOf(Expression* expr) {
-  ValueContext for_value(this);
+  ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
   for_value.set_for_typeof(true);
   Visit(expr);
 }
@@ -2178,28 +2235,30 @@
 void HGraphBuilder::VisitForControl(Expression* expr,
                                     HBasicBlock* true_block,
                                     HBasicBlock* false_block) {
-  TestContext for_test(this, true_block, false_block);
+  TestContext for_test(this, expr, true_block, false_block);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitArgument(Expression* expr) {
-  VISIT_FOR_VALUE(expr);
-  Push(AddInstruction(new(zone()) HPushArgument(Pop())));
+HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+  VisitForValue(expr);
+  if (HasStackOverflow() || current_block() == NULL) return NULL;
+  HValue* value = Pop();
+  Push(AddInstruction(new(zone()) HPushArgument(value)));
+  return value;
 }
 
 
 void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
-    VisitArgument(arguments->at(i));
-    if (HasStackOverflow() || current_block() == NULL) return;
+    CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
 void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
-    VISIT_FOR_VALUE(exprs->at(i));
+    CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
@@ -2218,8 +2277,6 @@
       return NULL;
     }
     SetupScope(scope);
-    VisitDeclarations(scope->declarations());
-    AddInstruction(new(zone()) HStackCheck());
 
     // Add an edge to the body entry.  This is warty: the graph's start
     // environment will be used by the Lithium translation as the initial
@@ -2241,6 +2298,19 @@
     current_block()->Goto(body_entry);
     body_entry->SetJoinId(AstNode::kFunctionEntryId);
     set_current_block(body_entry);
+
+    // Handle implicit declaration of the function name in named function
+    // expressions before other declarations.
+    if (scope->is_function_scope() && scope->function() != NULL) {
+      HandleDeclaration(scope->function(), Variable::CONST, NULL);
+    }
+    VisitDeclarations(scope->declarations());
+    AddSimulate(AstNode::kDeclarationsId);
+
+    HValue* context = environment()->LookupContext();
+    AddInstruction(
+        new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+
     VisitStatements(info()->function()->body());
     if (HasStackOverflow()) return NULL;
 
@@ -2253,30 +2323,26 @@
 
   graph()->OrderBlocks();
   graph()->AssignDominators();
+  graph()->PropagateDeoptimizingMark();
   graph()->EliminateRedundantPhis();
+  if (!graph()->CheckPhis()) {
+    Bailout("Unsupported phi use of arguments object");
+    return NULL;
+  }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
   if (!graph()->CollectPhis()) {
-    Bailout("Phi-use of arguments object");
+    Bailout("Unsupported phi use of uninitialized constant");
     return NULL;
   }
 
   HInferRepresentation rep(graph());
   rep.Analyze();
 
-  if (FLAG_use_range) {
-    HRangeAnalysis rangeAnalysis(graph());
-    rangeAnalysis.Analyze();
-  }
+  graph()->MarkDeoptimizeOnUndefined();
+  graph()->InsertRepresentationChanges();
 
   graph()->InitializeInferredTypes();
   graph()->Canonicalize();
-  graph()->MarkDeoptimizeOnUndefined();
-  graph()->InsertRepresentationChanges();
-  graph()->ComputeMinusZeroChecks();
-
-  // Eliminate redundant stack checks on backwards branches.
-  HStackCheckEliminator sce(graph());
-  sce.Process();
 
   // Perform common subexpression elimination and loop-invariant code motion.
   if (FLAG_use_gvn) {
@@ -2285,6 +2351,16 @@
     gvn.Analyze();
   }
 
+  if (FLAG_use_range) {
+    HRangeAnalysis rangeAnalysis(graph());
+    rangeAnalysis.Analyze();
+  }
+  graph()->ComputeMinusZeroChecks();
+
+  // Eliminate redundant stack checks on backwards branches.
+  HStackCheckEliminator sce(graph());
+  sce.Process();
+
   // Replace the results of check instructions with the original value, if the
   // result is used. This is safe now, since we don't do code motion after this
   // point. It enables better register allocation since the value produced by
@@ -2302,8 +2378,8 @@
     while (instr != NULL) {
       if (instr->IsBoundsCheck()) {
         // Replace all uses of the checked value with the original input.
-        ASSERT(instr->uses()->length() > 0);
-        instr->ReplaceValue(HBoundsCheck::cast(instr)->index());
+        ASSERT(instr->UseCount() > 0);
+        instr->ReplaceAllUsesWith(HBoundsCheck::cast(instr)->index());
       }
       instr = instr->next();
     }
@@ -2318,9 +2394,9 @@
 }
 
 
-void HGraphBuilder::AddSimulate(int id) {
+void HGraphBuilder::AddSimulate(int ast_id) {
   ASSERT(current_block() != NULL);
-  current_block()->AddSimulate(id);
+  current_block()->AddSimulate(ast_id);
 }
 
 
@@ -2352,9 +2428,6 @@
 
 
 void HGraphBuilder::SetupScope(Scope* scope) {
-  // We don't yet handle the function name for named function expressions.
-  if (scope->function() != NULL) BAILOUT("named function expression");
-
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
@@ -2362,40 +2435,41 @@
 
   // Set the initial values of parameters including "this".  "This" has
   // parameter index 0.
-  int count = scope->num_parameters() + 1;
-  for (int i = 0; i < count; ++i) {
+  ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
+
+  for (int i = 0; i < environment()->parameter_count(); ++i) {
     HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
     environment()->Bind(i, parameter);
   }
 
-  // Set the initial values of stack-allocated locals.
-  for (int i = count; i < environment()->length(); ++i) {
+  // First special is HContext.
+  HInstruction* context = AddInstruction(new(zone()) HContext);
+  environment()->BindContext(context);
+
+  // Initialize specials and locals to undefined.
+  for (int i = environment()->parameter_count() + 1;
+       i < environment()->length();
+       ++i) {
     environment()->Bind(i, undefined_constant);
   }
 
   // Handle the arguments and arguments shadow variables specially (they do
   // not have declarations).
   if (scope->arguments() != NULL) {
-    if (!scope->arguments()->IsStackAllocated() ||
-        (scope->arguments_shadow() != NULL &&
-        !scope->arguments_shadow()->IsStackAllocated())) {
-      BAILOUT("context-allocated arguments");
+    if (!scope->arguments()->IsStackAllocated()) {
+      return Bailout("context-allocated arguments");
     }
     HArgumentsObject* object = new(zone()) HArgumentsObject;
     AddInstruction(object);
     graph()->SetArgumentsObject(object);
     environment()->Bind(scope->arguments(), object);
-    if (scope->arguments_shadow() != NULL) {
-      environment()->Bind(scope->arguments_shadow(), object);
-    }
   }
 }
 
 
 void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
-    Visit(statements->at(i));
-    if (HasStackOverflow() || current_block() == NULL) break;
+    CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
@@ -2417,10 +2491,15 @@
 
 
 void HGraphBuilder::VisitBlock(Block* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  if (stmt->block_scope() != NULL) {
+    return Bailout("ScopedBlock");
+  }
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
-    VisitStatements(stmt->statements());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(VisitStatements(stmt->statements()));
   }
   HBasicBlock* break_block = break_info.break_block();
   if (break_block != NULL) {
@@ -2432,15 +2511,24 @@
 
 
 void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VisitForEffect(stmt->expression());
 }
 
 
 void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
 }
 
 
 void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->condition()->ToBooleanIsTrue()) {
     AddSimulate(stmt->ThenId());
     Visit(stmt->then_statement());
@@ -2450,20 +2538,27 @@
   } else {
     HBasicBlock* cond_true = graph()->CreateBasicBlock();
     HBasicBlock* cond_false = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->condition(), cond_true, cond_false);
-    cond_true->SetJoinId(stmt->ThenId());
-    cond_false->SetJoinId(stmt->ElseId());
+    CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
 
-    set_current_block(cond_true);
-    Visit(stmt->then_statement());
-    CHECK_BAILOUT;
-    HBasicBlock* other = current_block();
+    if (cond_true->HasPredecessor()) {
+      cond_true->SetJoinId(stmt->ThenId());
+      set_current_block(cond_true);
+      CHECK_BAILOUT(Visit(stmt->then_statement()));
+      cond_true = current_block();
+    } else {
+      cond_true = NULL;
+    }
 
-    set_current_block(cond_false);
-    Visit(stmt->else_statement());
-    CHECK_BAILOUT;
+    if (cond_false->HasPredecessor()) {
+      cond_false->SetJoinId(stmt->ElseId());
+      set_current_block(cond_false);
+      CHECK_BAILOUT(Visit(stmt->else_statement()));
+      cond_false = current_block();
+    } else {
+      cond_false = NULL;
+    }
 
-    HBasicBlock* join = CreateJoin(other, current_block(), stmt->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
     set_current_block(join);
   }
 }
@@ -2501,6 +2596,9 @@
 
 
 void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
   current_block()->Goto(continue_block);
   set_current_block(NULL);
@@ -2508,6 +2606,9 @@
 
 
 void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
   current_block()->Goto(break_block);
   set_current_block(NULL);
@@ -2515,10 +2616,13 @@
 
 
 void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   AstContext* context = call_context();
   if (context == NULL) {
     // Not an inlined return, so an actual one.
-    VISIT_FOR_VALUE(stmt->expression());
+    CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
     current_block()->FinishExit(new(zone()) HReturn(result));
     set_current_block(NULL);
@@ -2531,11 +2635,11 @@
                       test->if_true(),
                       test->if_false());
     } else if (context->IsEffect()) {
-      VISIT_FOR_EFFECT(stmt->expression());
-      current_block()->Goto(function_return(), false);
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
+      current_block()->Goto(function_return());
     } else {
       ASSERT(context->IsValue());
-      VISIT_FOR_VALUE(stmt->expression());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = environment()->Pop();
       current_block()->AddLeaveInlined(return_value, function_return());
     }
@@ -2544,27 +2648,28 @@
 }
 
 
-void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  BAILOUT("WithEnterStatement");
-}
-
-
-void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
-  BAILOUT("WithExitStatement");
+void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("WithStatement");
 }
 
 
 void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We only optimize switch statements with smi-literal smi comparisons,
   // with a bounded number of clauses.
   const int kCaseClauseLimit = 128;
   ZoneList<CaseClause*>* clauses = stmt->cases();
   int clause_count = clauses->length();
   if (clause_count > kCaseClauseLimit) {
-    BAILOUT("SwitchStatement: too many clauses");
+    return Bailout("SwitchStatement: too many clauses");
   }
 
-  VISIT_FOR_VALUE(stmt->tag());
+  CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
@@ -2575,29 +2680,32 @@
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
     if (!clause->label()->IsSmiLiteral()) {
-      BAILOUT("SwitchStatement: non-literal switch label");
+      return Bailout("SwitchStatement: non-literal switch label");
     }
 
     // Unconditionally deoptimize on the first non-smi compare.
     clause->RecordTypeFeedback(oracle());
     if (!clause->IsSmiCompare()) {
-      current_block()->FinishExitWithDeoptimization();
+      // Finish with deoptimize and add uses of enviroment values to
+      // account for invisible uses.
+      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
       set_current_block(NULL);
       break;
     }
 
     // Otherwise generate a compare and branch.
-    VISIT_FOR_VALUE(clause->label());
+    CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-    HCompare* compare =
-        new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
+    HCompareIDAndBranch* compare =
+        new(zone()) HCompareIDAndBranch(tag_value,
+                                        label_value,
+                                        Token::EQ_STRICT);
     compare->SetInputRepresentation(Representation::Integer32());
-    ASSERT(!compare->HasSideEffects());
-    AddInstruction(compare);
     HBasicBlock* body_block = graph()->CreateBasicBlock();
     HBasicBlock* next_test_block = graph()->CreateBasicBlock();
-    HTest* branch = new(zone()) HTest(compare, body_block, next_test_block);
-    current_block()->Finish(branch);
+    compare->SetSuccessorAt(0, body_block);
+    compare->SetSuccessorAt(1, next_test_block);
+    current_block()->Finish(compare);
     set_current_block(next_test_block);
   }
 
@@ -2651,8 +2759,7 @@
         set_current_block(join);
       }
 
-      VisitStatements(clause->statements());
-      CHECK_BAILOUT;
+      CHECK_BAILOUT(VisitStatements(clause->statements()));
       fall_through_block = current_block();
     }
   }
@@ -2684,7 +2791,7 @@
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
   HBasicBlock* osr_entry = graph()->CreateBasicBlock();
   HValue* true_value = graph()->GetConstantTrue();
-  HTest* test = new(zone()) HTest(true_value, non_osr_entry, osr_entry);
+  HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
   current_block()->Finish(test);
 
   HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
@@ -2694,35 +2801,51 @@
   int osr_entry_id = statement->OsrEntryId();
   // We want the correct environment at the OsrEntry instruction.  Build
   // it explicitly.  The expression stack should be empty.
-  int count = environment()->length();
-  ASSERT(count ==
-         (environment()->parameter_count() + environment()->local_count()));
-  for (int i = 0; i < count; ++i) {
-    HUnknownOSRValue* unknown = new(zone()) HUnknownOSRValue;
-    AddInstruction(unknown);
-    environment()->Bind(i, unknown);
+  ASSERT(environment()->ExpressionStackIsEmpty());
+  for (int i = 0; i < environment()->length(); ++i) {
+    HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
+    AddInstruction(osr_value);
+    environment()->Bind(i, osr_value);
   }
 
   AddSimulate(osr_entry_id);
   AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
+  HContext* context = new(zone()) HContext;
+  AddInstruction(context);
+  environment()->BindContext(context);
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
   set_current_block(loop_predecessor);
 }
 
 
+void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+                                  HBasicBlock* loop_entry,
+                                  BreakAndContinueInfo* break_info) {
+  BreakAndContinueScope push(break_info, this);
+  AddSimulate(stmt->StackCheckId());
+  HValue* context = environment()->LookupContext();
+  HStackCheck* stack_check =
+    new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
+  AddInstruction(stack_check);
+  ASSERT(loop_entry->IsLoopHeader());
+  loop_entry->loop_information()->set_stack_check(stack_check);
+  CHECK_BAILOUT(Visit(stmt->body()));
+}
+
+
 void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
-  current_block()->Goto(loop_entry, false);
+  current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
-  }
+  CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
   HBasicBlock* loop_successor = NULL;
@@ -2732,9 +2855,17 @@
     // back edge.
     body_exit = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_exit, loop_successor);
-    body_exit->SetJoinId(stmt->BackEdgeId());
-    loop_successor->SetJoinId(stmt->ExitId());
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+    if (body_exit->HasPredecessor()) {
+      body_exit->SetJoinId(stmt->BackEdgeId());
+    } else {
+      body_exit = NULL;
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
   HBasicBlock* loop_exit = CreateLoop(stmt,
                                       loop_entry,
@@ -2746,10 +2877,13 @@
 
 
 void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
-  current_block()->Goto(loop_entry, false);
+  current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
 
   // If the condition is constant true, do not generate a branch.
@@ -2757,16 +2891,22 @@
   if (!stmt->cond()->ToBooleanIsTrue()) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2780,38 +2920,45 @@
 
 
 void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->init() != NULL) {
-    Visit(stmt->init());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(Visit(stmt->init()));
   }
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
-  current_block()->Goto(loop_entry, false);
+  current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
 
   HBasicBlock* loop_successor = NULL;
   if (stmt->cond() != NULL) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
 
   if (stmt->next() != NULL && body_exit != NULL) {
     set_current_block(body_exit);
-    Visit(stmt->next());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(Visit(stmt->next()));
     body_exit = current_block();
   }
 
@@ -2825,22 +2972,34 @@
 
 
 void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  BAILOUT("ForInStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("ForInStatement");
 }
 
 
 void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  BAILOUT("TryCatchStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryCatchStatement");
 }
 
 
 void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  BAILOUT("TryFinallyStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryFinallyStatement");
 }
 
 
 void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  BAILOUT("DebuggerStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("DebuggerStatement");
 }
 
 
@@ -2865,47 +3024,67 @@
 
 
 void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Handle<SharedFunctionInfo> shared_info =
       SearchSharedFunctionInfo(info()->shared_info()->code(),
                                expr);
   if (shared_info.is_null()) {
     shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
   }
-  CHECK_BAILOUT;
+  // We also have a stack overflow if the recursive compilation did.
+  if (HasStackOverflow()) return;
+  HValue* context = environment()->LookupContext();
   HFunctionLiteral* instr =
-      new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
-  ast_context()->ReturnInstruction(instr, expr->id());
+      new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+  return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
-  BAILOUT("SharedFunctionInfoLiteral");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("SharedFunctionInfoLiteral");
 }
 
 
 void HGraphBuilder::VisitConditional(Conditional* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* cond_true = graph()->CreateBasicBlock();
   HBasicBlock* cond_false = graph()->CreateBasicBlock();
-  VISIT_FOR_CONTROL(expr->condition(), cond_true, cond_false);
-  cond_true->SetJoinId(expr->ThenId());
-  cond_false->SetJoinId(expr->ElseId());
+  CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
 
   // Visit the true and false subexpressions in the same AST context as the
   // whole expression.
-  set_current_block(cond_true);
-  Visit(expr->then_expression());
-  CHECK_BAILOUT;
-  HBasicBlock* other = current_block();
+  if (cond_true->HasPredecessor()) {
+    cond_true->SetJoinId(expr->ThenId());
+    set_current_block(cond_true);
+    CHECK_BAILOUT(Visit(expr->then_expression()));
+    cond_true = current_block();
+  } else {
+    cond_true = NULL;
+  }
 
-  set_current_block(cond_false);
-  Visit(expr->else_expression());
-  CHECK_BAILOUT;
+  if (cond_false->HasPredecessor()) {
+    cond_false->SetJoinId(expr->ElseId());
+    set_current_block(cond_false);
+    CHECK_BAILOUT(Visit(expr->else_expression()));
+    cond_false = current_block();
+  } else {
+    cond_false = NULL;
+  }
 
   if (!ast_context()->IsTest()) {
-    HBasicBlock* join = CreateJoin(other, current_block(), expr->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
     set_current_block(join);
-    if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+    if (join != NULL && !ast_context()->IsEffect()) {
+      return ast_context()->ReturnValue(Pop());
+    }
   }
 }
 
@@ -2930,87 +3109,111 @@
 
 HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
-  HInstruction* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   int length = info()->scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
-    context = new(zone()) HOuterContext(context);
-    AddInstruction(context);
+    HInstruction* context_instruction = new(zone()) HOuterContext(context);
+    AddInstruction(context_instruction);
+    context = context_instruction;
   }
   return context;
 }
 
 
 void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
-  Variable* variable = expr->AsVariable();
-  if (variable == NULL) {
-    BAILOUT("reference to rewritten variable");
-  } else if (variable->IsStackAllocated()) {
-    if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
-      BAILOUT("unsupported context for arguments object");
-    }
-    ast_context()->ReturnValue(environment()->Lookup(variable));
-  } else if (variable->IsContextSlot()) {
-    if (variable->mode() == Variable::CONST) {
-      BAILOUT("reference to const context slot");
-    }
-    HValue* context = BuildContextChainWalk(variable);
-    int index = variable->AsSlot()->index();
-    HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, index);
-    ast_context()->ReturnInstruction(instr, expr->id());
-  } else if (variable->is_global()) {
-    LookupResult lookup;
-    GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  Variable* variable = expr->var();
+  if (variable->mode() == Variable::LET) {
+    return Bailout("reference to let variable");
+  }
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      LookupResult lookup;
+      GlobalPropertyAccess type =
+          LookupGlobalProperty(variable, &lookup, false);
 
-    if (type == kUseCell &&
-        info()->global_object()->IsAccessCheckNeeded()) {
-      type = kUseGeneric;
+      if (type == kUseCell &&
+          info()->global_object()->IsAccessCheckNeeded()) {
+        type = kUseGeneric;
+      }
+
+      if (type == kUseCell) {
+        Handle<GlobalObject> global(info()->global_object());
+        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+        bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+        HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      } else {
+        HValue* context = environment()->LookupContext();
+        HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+        AddInstruction(global_object);
+        HLoadGlobalGeneric* instr =
+            new(zone()) HLoadGlobalGeneric(context,
+                                           global_object,
+                                           variable->name(),
+                                           ast_context()->is_for_typeof());
+        instr->set_position(expr->position());
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
     }
 
-    if (type == kUseCell) {
-      Handle<GlobalObject> global(info()->global_object());
-      Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-      bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-      HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
-      ast_context()->ReturnInstruction(instr, expr->id());
-    } else {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
-      HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-      AddInstruction(global_object);
-      HLoadGlobalGeneric* instr =
-          new(zone()) HLoadGlobalGeneric(context,
-                                         global_object,
-                                         variable->name(),
-                                         ast_context()->is_for_typeof());
-      instr->set_position(expr->position());
-      ASSERT(instr->HasSideEffects());
-      ast_context()->ReturnInstruction(instr, expr->id());
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      HValue* value = environment()->Lookup(variable);
+      if (variable->mode() == Variable::CONST &&
+          value == graph()->GetConstantHole()) {
+        return Bailout("reference to uninitialized const variable");
+      }
+      return ast_context()->ReturnValue(value);
     }
-  } else {
-    BAILOUT("reference to a variable which requires dynamic lookup");
+
+    case Variable::CONTEXT: {
+      if (variable->mode() == Variable::CONST) {
+        return Bailout("reference to const context slot");
+      }
+      HValue* context = BuildContextChainWalk(variable);
+      HLoadContextSlot* instr =
+          new(zone()) HLoadContextSlot(context, variable->index());
+      return ast_context()->ReturnInstruction(instr, expr->id());
+    }
+
+    case Variable::LOOKUP:
+      return Bailout("reference to a variable which requires dynamic lookup");
   }
 }
 
 
 void HGraphBuilder::VisitLiteral(Literal* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HConstant* instr =
       new(zone()) HConstant(expr->handle(), Representation::Tagged());
-  ast_context()->ReturnInstruction(instr, expr->id());
+  return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
-  HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HValue* context = environment()->LookupContext();
+
+  HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
+                                                     expr->pattern(),
                                                      expr->flags(),
                                                      expr->literal_index());
-  ast_context()->ReturnInstruction(instr, expr->id());
+  return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HValue* context = environment()->LookupContext();
   HObjectLiteral* literal =
       new(zone()) HObjectLiteral(context,
                                  expr->constant_properties(),
@@ -3038,7 +3241,7 @@
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
           if (property->emit_store()) {
-            VISIT_FOR_VALUE(value);
+            CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
             Handle<String> name = Handle<String>::cast(key->handle());
             HStoreNamedGeneric* store =
@@ -3051,7 +3254,7 @@
             AddInstruction(store);
             AddSimulate(key->id());
           } else {
-            VISIT_FOR_EFFECT(value);
+            CHECK_ALIVE(VisitForEffect(value));
           }
           break;
         }
@@ -3059,7 +3262,7 @@
       case ObjectLiteral::Property::PROTOTYPE:
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
-        BAILOUT("Object literal with complex property");
+        return Bailout("Object literal with complex property");
       default: UNREACHABLE();
     }
   }
@@ -3072,18 +3275,23 @@
     // (e.g. because of code motion).
     HToFastProperties* result = new(zone()) HToFastProperties(Pop());
     AddInstruction(result);
-    ast_context()->ReturnValue(result);
+    return ast_context()->ReturnValue(result);
   } else {
-    ast_context()->ReturnValue(Pop());
+    return ast_context()->ReturnValue(Pop());
   }
 }
 
 
 void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  HValue* context = environment()->LookupContext();
 
-  HArrayLiteral* literal = new(zone()) HArrayLiteral(expr->constant_elements(),
+  HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
+                                                     expr->constant_elements(),
                                                      length,
                                                      expr->literal_index(),
                                                      expr->depth());
@@ -3099,14 +3307,14 @@
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
-    VISIT_FOR_VALUE(subexpr);
+    CHECK_ALIVE(VisitForValue(subexpr));
     HValue* value = Pop();
-    if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+    if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
     // Load the elements array before the first store.
     if (elements == NULL)  {
-     elements = new(zone()) HLoadElements(literal);
-     AddInstruction(elements);
+      elements = new(zone()) HLoadElements(literal);
+      AddInstruction(elements);
     }
 
     HValue* key = AddInstruction(
@@ -3115,12 +3323,7 @@
     AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
     AddSimulate(expr->GetIdForElement(i));
   }
-  ast_context()->ReturnValue(Pop());
-}
-
-
-void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  BAILOUT("CatchExtensionObject");
+  return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -3186,8 +3389,7 @@
 HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
                                                     Handle<String> name,
                                                     HValue* value) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HStoreNamedGeneric(
                          context,
                          object,
@@ -3208,7 +3410,7 @@
   ASSERT(!name.is_null());
 
   LookupResult lookup;
-  ZoneMapList* types = expr->GetReceiverTypes();
+  SmallMapList* types = expr->GetReceiverTypes();
   bool is_monomorphic = expr->IsMonomorphic() &&
       ComputeStoredField(types->first(), name, &lookup);
 
@@ -3222,7 +3424,7 @@
 void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
                                                      HValue* object,
                                                      HValue* value,
-                                                     ZoneMapList* types,
+                                                     SmallMapList* types,
                                                      Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
@@ -3261,7 +3463,7 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization();
+    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
     HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
     instr->set_position(expr->position());
@@ -3283,15 +3485,14 @@
           Drop(1);
         }
       }
-      ast_context()->ReturnValue(value);
-      return;
+      return ast_context()->ReturnValue(value);
     }
   }
 
   ASSERT(join != NULL);
   join->SetJoinId(expr->id());
   set_current_block(join);
-  if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+  if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -3299,14 +3500,14 @@
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle());
-  VISIT_FOR_VALUE(prop->obj());
+  CHECK_ALIVE(VisitForValue(prop->obj()));
 
   HValue* value = NULL;
   HInstruction* instr = NULL;
 
   if (prop->key()->IsPropertyName()) {
     // Named store.
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* object = Pop();
 
@@ -3314,7 +3515,7 @@
     Handle<String> name = Handle<String>::cast(key->handle());
     ASSERT(!name.is_null());
 
-    ZoneMapList* types = expr->GetReceiverTypes();
+    SmallMapList* types = expr->GetReceiverTypes();
     LookupResult lookup;
 
     if (expr->IsMonomorphic()) {
@@ -3330,18 +3531,26 @@
 
   } else {
     // Keyed store.
-    VISIT_FOR_VALUE(prop->key());
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* key = Pop();
     HValue* object = Pop();
-    instr = BuildStoreKeyed(object, key, value, expr);
+    bool has_side_effects = false;
+    HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
+                             expr->position(),
+                             true,  // is_store
+                             &has_side_effects);
+    Push(value);
+    ASSERT(has_side_effects);  // Stores always have side effects.
+    AddSimulate(expr->AssignmentId());
+    return ast_context()->ReturnValue(Pop());
   }
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
   if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
-  ast_context()->ReturnValue(Pop());
+  return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -3363,8 +3572,7 @@
     AddInstruction(instr);
     if (instr->HasSideEffects()) AddSimulate(ast_id);
   } else {
-    HContext* context = new(zone()) HContext;
-    AddInstruction(context);
+    HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
     AddInstruction(global_object);
     HStoreGlobalGeneric* instr =
@@ -3384,42 +3592,70 @@
 void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
   Expression* target = expr->target();
   VariableProxy* proxy = target->AsVariableProxy();
-  Variable* var = proxy->AsVariable();
   Property* prop = target->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
+  ASSERT(proxy == NULL || prop == NULL);
 
   // We have a second position recorded in the FullCodeGenerator to have
   // type feedback for the binary operation.
   BinaryOperation* operation = expr->binary_operation();
 
-  if (var != NULL) {
-    VISIT_FOR_VALUE(operation);
-
-    if (var->is_global()) {
-      HandleGlobalVariableAssignment(var,
-                                     Top(),
-                                     expr->position(),
-                                     expr->AssignmentId());
-    } else if (var->IsStackAllocated()) {
-      Bind(var, Top());
-    } else if (var->IsContextSlot()) {
-      HValue* context = BuildContextChainWalk(var);
-      int index = var->AsSlot()->index();
-      HStoreContextSlot* instr =
-          new(zone()) HStoreContextSlot(context, index, Top());
-      AddInstruction(instr);
-      if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
-    } else {
-      BAILOUT("compound assignment to lookup slot");
+  if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->mode() == Variable::CONST || var->mode() == Variable::LET)  {
+      return Bailout("unsupported let or const compound assignment");
     }
-    ast_context()->ReturnValue(Pop());
+
+    CHECK_ALIVE(VisitForValue(operation));
+
+    switch (var->location()) {
+      case Variable::UNALLOCATED:
+        HandleGlobalVariableAssignment(var,
+                                       Top(),
+                                       expr->position(),
+                                       expr->AssignmentId());
+        break;
+
+      case Variable::PARAMETER:
+      case Variable::LOCAL:
+        Bind(var, Top());
+        break;
+
+      case Variable::CONTEXT: {
+        // Bail out if we try to mutate a parameter value in a function
+        // using the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (info()->scope()->arguments() != NULL) {
+          // Parameters will be allocated to context slots.  We have no
+          // direct way to detect that the variable is a parameter so we do
+          // a linear search of the parameter variables.
+          int count = info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == info()->scope()->parameter(i)) {
+              Bailout(
+                  "assignment to parameter, function uses arguments object");
+            }
+          }
+        }
+
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot* instr =
+            new(zone()) HStoreContextSlot(context, var->index(), Top());
+        AddInstruction(instr);
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        break;
+      }
+
+      case Variable::LOOKUP:
+        return Bailout("compound assignment to lookup slot");
+    }
+    return ast_context()->ReturnValue(Pop());
 
   } else if (prop != NULL) {
     prop->RecordTypeFeedback(oracle());
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -3433,7 +3669,7 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3447,20 +3683,25 @@
       Drop(2);
       Push(instr);
       if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      ast_context()->ReturnValue(Pop());
+      return ast_context()->ReturnValue(Pop());
 
     } else {
       // Keyed property.
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      HInstruction* load = BuildLoadKeyed(obj, key, prop);
-      PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+      bool has_side_effects = false;
+      HValue* load = HandleKeyedElementAccess(
+          obj, key, NULL, prop, expr->CompoundLoadId(), RelocInfo::kNoPosition,
+          false,  // is_store
+          &has_side_effects);
+      Push(load);
+      if (has_side_effects) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3469,92 +3710,127 @@
       if (instr->HasSideEffects()) AddSimulate(operation->id());
 
       expr->RecordTypeFeedback(oracle());
-      HInstruction* store = BuildStoreKeyed(obj, key, instr, expr);
-      AddInstruction(store);
+      HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
+                               RelocInfo::kNoPosition,
+                               true,  // is_store
+                               &has_side_effects);
+
       // Drop the simulated receiver, key, and value.  Return the value.
       Drop(3);
       Push(instr);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      ast_context()->ReturnValue(Pop());
+      ASSERT(has_side_effects);  // Stores always have side effects.
+      AddSimulate(expr->AssignmentId());
+      return ast_context()->ReturnValue(Pop());
     }
 
   } else {
-    BAILOUT("invalid lhs in compound assignment");
+    return Bailout("invalid lhs in compound assignment");
   }
 }
 
 
 void HGraphBuilder::VisitAssignment(Assignment* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VariableProxy* proxy = expr->target()->AsVariableProxy();
-  Variable* var = proxy->AsVariable();
   Property* prop = expr->target()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
+  ASSERT(proxy == NULL || prop == NULL);
 
   if (expr->is_compound()) {
     HandleCompoundAssignment(expr);
     return;
   }
 
-  if (var != NULL) {
-    if (proxy->IsArguments()) BAILOUT("assignment to arguments");
-
-    // Handle the assignment.
-    if (var->IsStackAllocated()) {
-      HValue* value = NULL;
-      // Handle stack-allocated variables on the right-hand side directly.
-      // We do not allow the arguments object to occur in a context where it
-      // may escape, but assignments to stack-allocated locals are
-      // permitted.  Handling such assignments here bypasses the check for
-      // the arguments object in VisitVariableProxy.
-      Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
-      if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
-        value = environment()->Lookup(rhs_var);
-      } else {
-        VISIT_FOR_VALUE(expr->value());
-        value = Pop();
+  if (prop != NULL) {
+    HandlePropertyAssignment(expr);
+  } else if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->mode() == Variable::CONST) {
+      if (expr->op() != Token::INIT_CONST) {
+        return Bailout("non-initializer assignment to const");
       }
-      Bind(var, value);
-      ast_context()->ReturnValue(value);
-
-    } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
-      VISIT_FOR_VALUE(expr->value());
-      HValue* context = BuildContextChainWalk(var);
-      int index = var->AsSlot()->index();
-      HStoreContextSlot* instr =
-          new(zone()) HStoreContextSlot(context, index, Top());
-      AddInstruction(instr);
-      if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      ast_context()->ReturnValue(Pop());
-
-    } else if (var->is_global()) {
-      VISIT_FOR_VALUE(expr->value());
-      HandleGlobalVariableAssignment(var,
-                                     Top(),
-                                     expr->position(),
-                                     expr->AssignmentId());
-      ast_context()->ReturnValue(Pop());
-
-    } else {
-      BAILOUT("assignment to LOOKUP or const CONTEXT variable");
+      if (!var->IsStackAllocated()) {
+        return Bailout("assignment to const context slot");
+      }
+      // We insert a use of the old value to detect unsupported uses of const
+      // variables (e.g. initialization inside a loop).
+      HValue* old_value = environment()->Lookup(var);
+      AddInstruction(new HUseConst(old_value));
+    } else if (var->mode() == Variable::LET) {
+      return Bailout("unsupported assignment to let");
     }
 
-  } else if (prop != NULL) {
-    HandlePropertyAssignment(expr);
+    if (proxy->IsArguments()) return Bailout("assignment to arguments");
+
+    // Handle the assignment.
+    switch (var->location()) {
+      case Variable::UNALLOCATED:
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        HandleGlobalVariableAssignment(var,
+                                       Top(),
+                                       expr->position(),
+                                       expr->AssignmentId());
+        return ast_context()->ReturnValue(Pop());
+
+      case Variable::PARAMETER:
+      case Variable::LOCAL: {
+        // We do not allow the arguments object to occur in a context where it
+        // may escape, but assignments to stack-allocated locals are
+        // permitted.
+        CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
+        HValue* value = Pop();
+        Bind(var, value);
+        return ast_context()->ReturnValue(value);
+      }
+
+      case Variable::CONTEXT: {
+        ASSERT(var->mode() != Variable::CONST);
+        // Bail out if we try to mutate a parameter value in a function using
+        // the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (info()->scope()->arguments() != NULL) {
+          // Parameters will rewrite to context slots.  We have no direct way
+          // to detect that the variable is a parameter.
+          int count = info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == info()->scope()->parameter(i)) {
+              return Bailout("assignment to parameter in arguments object");
+            }
+          }
+        }
+
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot* instr =
+            new(zone()) HStoreContextSlot(context, var->index(), Top());
+        AddInstruction(instr);
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        return ast_context()->ReturnValue(Pop());
+      }
+
+      case Variable::LOOKUP:
+        return Bailout("assignment to LOOKUP variable");
+    }
   } else {
-    BAILOUT("invalid left-hand side in assignment");
+    return Bailout("invalid left-hand side in assignment");
   }
 }
 
 
 void HGraphBuilder::VisitThrow(Throw* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We don't optimize functions with invalid left-hand sides in
   // assignments, count operations, or for-in.  Consequently throw can
   // currently only occur in an effect context.
   ASSERT(ast_context()->IsEffect());
-  VISIT_FOR_VALUE(expr->exception());
+  CHECK_ALIVE(VisitForValue(expr->exception()));
 
+  HValue* context = environment()->LookupContext();
   HValue* value = environment()->Pop();
-  HThrow* instr = new(zone()) HThrow(value);
+  HThrow* instr = new(zone()) HThrow(context, value);
   instr->set_position(expr->position());
   AddInstruction(instr);
   AddSimulate(expr->id());
@@ -3591,8 +3867,7 @@
                                                    Property* expr) {
   ASSERT(expr->key()->IsPropertyName());
   Handle<Object> name = expr->key()->AsLiteral()->handle();
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HLoadNamedGeneric(context, obj, name);
 }
 
@@ -3622,86 +3897,311 @@
 
 HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
                                                    HValue* key) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
-                                                       HValue* key,
-                                                       Property* expr) {
-  ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
-  AddInstruction(new(zone()) HCheckNonSmi(object));
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+    HValue* external_elements,
+    HValue* checked_key,
+    HValue* val,
+    ElementsKind elements_kind,
+    bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS: {
+        HClampToUint8* clamp = new(zone()) HClampToUint8(val);
+        AddInstruction(clamp);
+        val = clamp;
+        break;
+      }
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+        HToInt32* floor_val = new(zone()) HToInt32(val);
+        AddInstruction(floor_val);
+        val = floor_val;
+        break;
+      }
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+        break;
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+    return new(zone()) HStoreKeyedSpecializedArrayElement(
+        external_elements, checked_key, val, elements_kind);
+  } else {
+    return new(zone()) HLoadKeyedSpecializedArrayElement(
+        external_elements, checked_key, elements_kind);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
+                                                           HValue* key,
+                                                           HValue* val,
+                                                           Expression* expr,
+                                                           bool is_store) {
+  ASSERT(expr->IsMonomorphic());
   Handle<Map> map = expr->GetMonomorphicReceiverType();
-  ASSERT(map->has_fast_elements());
-  AddInstruction(new(zone()) HCheckMap(object, map));
-  bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
-  HLoadElements* elements = new(zone()) HLoadElements(object);
+  if (!map->has_fast_elements() &&
+      !map->has_fast_double_elements() &&
+      !map->has_external_array_elements()) {
+    return is_store ? BuildStoreKeyedGeneric(object, key, val)
+                    : BuildLoadKeyedGeneric(object, key);
+  }
+  AddInstruction(new(zone()) HCheckNonSmi(object));
+  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+  bool fast_double_elements = map->has_fast_double_elements();
+  if (is_store && map->has_fast_elements()) {
+    AddInstruction(new(zone()) HCheckMap(
+        elements, isolate()->factory()->fixed_array_map()));
+  }
   HInstruction* length = NULL;
   HInstruction* checked_key = NULL;
-  if (is_array) {
-    length = AddInstruction(new(zone()) HJSArrayLength(object));
+  if (map->has_external_array_elements()) {
+    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
     checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-    AddInstruction(elements);
-  } else {
-    AddInstruction(elements);
-    length = AddInstruction(new(zone()) HFixedArrayLength(elements));
-    checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+    HLoadExternalArrayPointer* external_elements =
+        new(zone()) HLoadExternalArrayPointer(elements);
+    AddInstruction(external_elements);
+    return BuildExternalArrayElementAccess(external_elements, checked_key,
+                                           val, map->elements_kind(), is_store);
   }
-  return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyedSpecializedArrayElement(
-    HValue* object,
-    HValue* key,
-    Property* expr) {
-  ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
-  AddInstruction(new(zone()) HCheckNonSmi(object));
-  Handle<Map> map = expr->GetMonomorphicReceiverType();
-  ASSERT(!map->has_fast_elements());
-  ASSERT(map->has_external_array_elements());
-  AddInstruction(new(zone()) HCheckMap(object, map));
-  HLoadElements* elements = new(zone()) HLoadElements(object);
-  AddInstruction(elements);
-  HInstruction* length = new(zone()) HExternalArrayLength(elements);
-  AddInstruction(length);
-  HInstruction* checked_key =
-      AddInstruction(new(zone()) HBoundsCheck(key, length));
-  HLoadExternalArrayPointer* external_elements =
-      new(zone()) HLoadExternalArrayPointer(elements);
-  AddInstruction(external_elements);
-  HLoadKeyedSpecializedArrayElement* pixel_array_value =
-      new(zone()) HLoadKeyedSpecializedArrayElement(
-          external_elements, checked_key, expr->external_array_type());
-  return pixel_array_value;
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyed(HValue* obj,
-                                            HValue* key,
-                                            Property* prop) {
-  if (prop->IsMonomorphic()) {
-    Handle<Map> receiver_type(prop->GetMonomorphicReceiverType());
-    // An object has either fast elements or pixel array elements, but never
-    // both. Pixel array maps that are assigned to pixel array elements are
-    // always created with the fast elements flag cleared.
-    if (receiver_type->has_external_array_elements()) {
-      return BuildLoadKeyedSpecializedArrayElement(obj, key, prop);
-    } else if (receiver_type->has_fast_elements()) {
-      return BuildLoadKeyedFastElement(obj, key, prop);
+  ASSERT(map->has_fast_elements() || fast_double_elements);
+  if (map->instance_type() == JS_ARRAY_TYPE) {
+    length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
+  } else {
+    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+  }
+  checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+  if (is_store) {
+    if (fast_double_elements) {
+      return new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                      checked_key,
+                                                      val);
+    } else {
+      return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
+    }
+  } else {
+    if (fast_double_elements) {
+      return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+    } else {
+      return new(zone()) HLoadKeyedFastElement(elements, checked_key);
     }
   }
-  return BuildLoadKeyedGeneric(obj, key);
+}
+
+
+HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
+                                                      HValue* key,
+                                                      HValue* val,
+                                                      Expression* prop,
+                                                      int ast_id,
+                                                      int position,
+                                                      bool is_store,
+                                                      bool* has_side_effects) {
+  *has_side_effects = false;
+  AddInstruction(new(zone()) HCheckNonSmi(object));
+  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
+  SmallMapList* maps = prop->GetReceiverTypes();
+  bool todo_external_array = false;
+
+  static const int kNumElementTypes = kElementsKindCount;
+  bool type_todo[kNumElementTypes];
+  for (int i = 0; i < kNumElementTypes; ++i) {
+    type_todo[i] = false;
+  }
+
+  for (int i = 0; i < maps->length(); ++i) {
+    ASSERT(maps->at(i)->IsMap());
+    type_todo[maps->at(i)->elements_kind()] = true;
+    if (maps->at(i)->elements_kind()
+        >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+      todo_external_array = true;
+    }
+  }
+
+  HBasicBlock* join = graph()->CreateBasicBlock();
+
+  HInstruction* elements_kind_instr =
+      AddInstruction(new(zone()) HElementsKind(object));
+  HCompareConstantEqAndBranch* elements_kind_branch = NULL;
+  HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+  HLoadExternalArrayPointer* external_elements = NULL;
+  HInstruction* checked_key = NULL;
+
+  // FAST_ELEMENTS is assumed to be the first case.
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
+
+  for (ElementsKind elements_kind = FAST_ELEMENTS;
+       elements_kind <= LAST_ELEMENTS_KIND;
+       elements_kind = ElementsKind(elements_kind + 1)) {
+    // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
+    // need to add some code that's executed for all external array cases.
+    STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
+                  LAST_ELEMENTS_KIND);
+    if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
+        && todo_external_array) {
+      HInstruction* length =
+          AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+      checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+      external_elements = new(zone()) HLoadExternalArrayPointer(elements);
+      AddInstruction(external_elements);
+    }
+    if (type_todo[elements_kind]) {
+      HBasicBlock* if_true = graph()->CreateBasicBlock();
+      HBasicBlock* if_false = graph()->CreateBasicBlock();
+      elements_kind_branch = new(zone()) HCompareConstantEqAndBranch(
+          elements_kind_instr, elements_kind, Token::EQ_STRICT);
+      elements_kind_branch->SetSuccessorAt(0, if_true);
+      elements_kind_branch->SetSuccessorAt(1, if_false);
+      current_block()->Finish(elements_kind_branch);
+
+      set_current_block(if_true);
+      HInstruction* access;
+      if (elements_kind == FAST_ELEMENTS ||
+          elements_kind == FAST_DOUBLE_ELEMENTS) {
+        bool fast_double_elements =
+            elements_kind == FAST_DOUBLE_ELEMENTS;
+        if (is_store && elements_kind == FAST_ELEMENTS) {
+          AddInstruction(new(zone()) HCheckMap(
+              elements, isolate()->factory()->fixed_array_map(),
+              elements_kind_branch));
+        }
+        HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
+        HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
+        HHasInstanceTypeAndBranch* typecheck =
+            new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
+        typecheck->SetSuccessorAt(0, if_jsarray);
+        typecheck->SetSuccessorAt(1, if_fastobject);
+        current_block()->Finish(typecheck);
+
+        set_current_block(if_jsarray);
+        HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
+        AddInstruction(length);
+        checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+        if (is_store) {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                         checked_key,
+                                                         val));
+          } else {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+          }
+        } else {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+          } else {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastElement(elements, checked_key));
+          }
+          Push(access);
+        }
+        *has_side_effects |= access->HasSideEffects();
+        if (position != -1) {
+          access->set_position(position);
+        }
+        if_jsarray->Goto(join);
+
+        set_current_block(if_fastobject);
+        length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+        checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+        if (is_store) {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                         checked_key,
+                                                         val));
+          } else {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+          }
+        } else {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+          } else {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastElement(elements, checked_key));
+          }
+        }
+      } else if (elements_kind == DICTIONARY_ELEMENTS) {
+        if (is_store) {
+          access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
+        } else {
+          access = AddInstruction(BuildLoadKeyedGeneric(object, key));
+        }
+      } else {  // External array elements.
+        access = AddInstruction(BuildExternalArrayElementAccess(
+            external_elements, checked_key, val, elements_kind, is_store));
+      }
+      *has_side_effects |= access->HasSideEffects();
+      access->set_position(position);
+      if (!is_store) {
+        Push(access);
+      }
+      current_block()->Goto(join);
+      set_current_block(if_false);
+    }
+  }
+
+  // Deopt if none of the cases matched.
+  current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+  join->SetJoinId(ast_id);
+  set_current_block(join);
+  return is_store ? NULL : Pop();
+}
+
+
+HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
+                                                HValue* key,
+                                                HValue* val,
+                                                Expression* expr,
+                                                int ast_id,
+                                                int position,
+                                                bool is_store,
+                                                bool* has_side_effects) {
+  ASSERT(!expr->IsPropertyName());
+  HInstruction* instr = NULL;
+  if (expr->IsMonomorphic()) {
+    instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+  } else if (expr->GetReceiverTypes() != NULL &&
+             !expr->GetReceiverTypes()->is_empty()) {
+    return HandlePolymorphicElementAccess(
+        obj, key, val, expr, ast_id, position, is_store, has_side_effects);
+  } else {
+    if (is_store) {
+      instr = BuildStoreKeyedGeneric(obj, key, val);
+    } else {
+      instr = BuildLoadKeyedGeneric(obj, key);
+    }
+  }
+  instr->set_position(position);
+  AddInstruction(instr);
+  *has_side_effects = instr->HasSideEffects();
+  return instr;
 }
 
 
 HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
                                                     HValue* key,
                                                     HValue* value) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HStoreKeyedGeneric(
                          context,
                          object,
@@ -3710,82 +4210,6 @@
                          function_strict_mode());
 }
 
-
-HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
-                                                        HValue* key,
-                                                        HValue* val,
-                                                        Expression* expr) {
-  ASSERT(expr->IsMonomorphic());
-  AddInstruction(new(zone()) HCheckNonSmi(object));
-  Handle<Map> map = expr->GetMonomorphicReceiverType();
-  ASSERT(map->has_fast_elements());
-  AddInstruction(new(zone()) HCheckMap(object, map));
-  HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
-  AddInstruction(new(zone()) HCheckMap(
-      elements, isolate()->factory()->fixed_array_map()));
-  bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
-  HInstruction* length = NULL;
-  if (is_array) {
-    length = AddInstruction(new(zone()) HJSArrayLength(object));
-  } else {
-    length = AddInstruction(new(zone()) HFixedArrayLength(elements));
-  }
-  HInstruction* checked_key =
-      AddInstruction(new(zone()) HBoundsCheck(key, length));
-  return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyedSpecializedArrayElement(
-    HValue* object,
-    HValue* key,
-    HValue* val,
-    Expression* expr) {
-  ASSERT(expr->IsMonomorphic());
-  AddInstruction(new(zone()) HCheckNonSmi(object));
-  Handle<Map> map = expr->GetMonomorphicReceiverType();
-  ASSERT(!map->has_fast_elements());
-  ASSERT(map->has_external_array_elements());
-  AddInstruction(new(zone()) HCheckMap(object, map));
-  HLoadElements* elements = new(zone()) HLoadElements(object);
-  AddInstruction(elements);
-  HInstruction* length = AddInstruction(
-      new(zone()) HExternalArrayLength(elements));
-  HInstruction* checked_key =
-      AddInstruction(new(zone()) HBoundsCheck(key, length));
-  HLoadExternalArrayPointer* external_elements =
-      new(zone()) HLoadExternalArrayPointer(elements);
-  AddInstruction(external_elements);
-  return new(zone()) HStoreKeyedSpecializedArrayElement(
-      external_elements,
-      checked_key,
-      val,
-      expr->external_array_type());
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyed(HValue* object,
-                                             HValue* key,
-                                             HValue* value,
-                                             Expression* expr) {
-  if (expr->IsMonomorphic()) {
-    Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
-    // An object has either fast elements or external array elements, but
-    // never both. Pixel array maps that are assigned to pixel array elements
-    // are always created with the fast elements flag cleared.
-    if (receiver_type->has_external_array_elements()) {
-      return BuildStoreKeyedSpecializedArrayElement(object,
-                                                    key,
-                                                    value,
-                                                    expr);
-    } else if (receiver_type->has_fast_elements()) {
-      return BuildStoreKeyedFastElement(object, key, value, expr);
-    }
-  }
-  return BuildStoreKeyedGeneric(object, key, value);
-}
-
-
 bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy == NULL) return false;
@@ -3810,7 +4234,7 @@
   } else {
     Push(graph()->GetArgumentsObject());
     VisitForValue(expr->key());
-    if (HasStackOverflow()) return false;
+    if (HasStackOverflow() || current_block() == NULL) return true;
     HValue* key = Pop();
     Drop(1);  // Arguments object.
     HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
@@ -3826,36 +4250,37 @@
 
 
 void HGraphBuilder::VisitProperty(Property* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   expr->RecordTypeFeedback(oracle());
 
   if (TryArgumentsAccess(expr)) return;
-  CHECK_BAILOUT;
 
-  VISIT_FOR_VALUE(expr->obj());
+  CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
   if (expr->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
-    AddInstruction(new(zone()) HCheckInstanceType(array,
-                                                  JS_ARRAY_TYPE,
-                                                  JS_ARRAY_TYPE));
-    instr = new(zone()) HJSArrayLength(array);
+    HInstruction* mapcheck =
+        AddInstruction(HCheckInstanceType::NewIsJSArray(array));
+    instr = new(zone()) HJSArrayLength(array, mapcheck);
 
   } else if (expr->IsStringLength()) {
     HValue* string = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(string));
-    AddInstruction(new(zone()) HCheckInstanceType(string,
-                                                  FIRST_STRING_TYPE,
-                                                  LAST_STRING_TYPE));
+    AddInstruction(HCheckInstanceType::NewIsString(string));
     instr = new(zone()) HStringLength(string);
   } else if (expr->IsStringAccess()) {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
     HValue* index = Pop();
     HValue* string = Pop();
-    HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+    HValue* context = environment()->LookupContext();
+    HStringCharCodeAt* char_code =
+      BuildStringCharCodeAt(context, string, index);
     AddInstruction(char_code);
-    instr = new(zone()) HStringCharFromCode(char_code);
+    instr = new(zone()) HStringCharFromCode(context, char_code);
 
   } else if (expr->IsFunctionPrototype()) {
     HValue* function = Pop();
@@ -3864,27 +4289,43 @@
 
   } else if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
-    ZoneMapList* types = expr->GetReceiverTypes();
+    SmallMapList* types = expr->GetReceiverTypes();
 
     HValue* obj = Pop();
     if (expr->IsMonomorphic()) {
       instr = BuildLoadNamed(obj, expr, types->first(), name);
     } else if (types != NULL && types->length() > 1) {
       AddInstruction(new(zone()) HCheckNonSmi(obj));
-      instr = new(zone()) HLoadNamedFieldPolymorphic(obj, types, name);
+      HValue* context = environment()->LookupContext();
+      instr = new(zone()) HLoadNamedFieldPolymorphic(context, obj, types, name);
     } else {
       instr = BuildLoadNamedGeneric(obj, expr);
     }
 
   } else {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
 
     HValue* key = Pop();
     HValue* obj = Pop();
-    instr = BuildLoadKeyed(obj, key, expr);
+
+    bool has_side_effects = false;
+    HValue* load = HandleKeyedElementAccess(
+        obj, key, NULL, expr, expr->id(), expr->position(),
+        false,  // is_store
+        &has_side_effects);
+    if (has_side_effects) {
+      if (ast_context()->IsEffect()) {
+        AddSimulate(expr->id());
+      } else {
+        Push(load);
+        AddSimulate(expr->id());
+        Drop(1);
+      }
+    }
+    return ast_context()->ReturnValue(load);
   }
   instr->set_position(expr->position());
-  ast_context()->ReturnInstruction(instr, expr->id());
+  return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
@@ -3909,7 +4350,7 @@
 
 void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
                                                HValue* receiver,
-                                               ZoneMapList* types,
+                                               SmallMapList* types,
                                                Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
@@ -3938,10 +4379,11 @@
         PrintF("Trying to inline the polymorphic call to %s\n",
                *name->ToCString());
       }
-      if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
+      if (FLAG_polymorphic_inlining && TryInline(expr)) {
+        // Trying to inline will signal that we should bailout from the
+        // entire compilation by setting stack overflow on the visitor.
+        if (HasStackOverflow()) return;
+      } else {
         HCallConstantFunction* call =
             new(zone()) HCallConstantFunction(expr->target(), argument_count);
         call->set_position(expr->position());
@@ -3959,10 +4401,9 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization();
+    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
-    HContext* context = new(zone()) HContext;
-    AddInstruction(context);
+    HValue* context = environment()->LookupContext();
     HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
     call->set_position(expr->position());
     PreProcessCall(call);
@@ -3972,8 +4413,7 @@
       if (!ast_context()->IsEffect()) Push(call);
       current_block()->Goto(join);
     } else {
-      ast_context()->ReturnInstruction(call, expr->id());
-      return;
+      return ast_context()->ReturnInstruction(call, expr->id());
     }
   }
 
@@ -3981,30 +4421,29 @@
   // even without predecessors to the join block, we set it as the exit
   // block and continue by adding instructions there.
   ASSERT(join != NULL);
-  set_current_block(join);
   if (join->HasPredecessor()) {
+    set_current_block(join);
     join->SetJoinId(expr->id());
-    if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+    if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
+  } else {
+    set_current_block(NULL);
   }
 }
 
 
-void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) {
+void HGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                Handle<JSFunction> caller,
+                                const char* reason) {
   if (FLAG_trace_inlining) {
+    SmartArrayPointer<char> target_name =
+        target->shared()->DebugName()->ToCString();
+    SmartArrayPointer<char> caller_name =
+        caller->shared()->DebugName()->ToCString();
     if (reason == NULL) {
-      // We are currently in the context of inlined function thus we have
-      // to go to an outer FunctionState to get caller.
-      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-      SmartPointer<char> caller =
-          function_state()->outer()->compilation_info()->function()->
-              debug_name()->ToCString();
-      PrintF("Inlined %s called from %s.\n", *callee, *caller);
+      PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
     } else {
-      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-      SmartPointer<char> caller =
-          info()->function()->debug_name()->ToCString();
       PrintF("Did not inline %s called from %s (%s).\n",
-             *callee, *caller, reason);
+             *target_name, *caller_name, reason);
     }
   }
 }
@@ -4013,20 +4452,28 @@
 bool HGraphBuilder::TryInline(Call* expr) {
   if (!FLAG_use_inlining) return false;
 
+  // The function call we are inlining is a method call if the call
+  // is a property call.
+  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
+  Handle<JSFunction> caller = info()->closure();
   Handle<JSFunction> target = expr->target();
+  Handle<SharedFunctionInfo> target_shared(target->shared());
 
   // Do a quick check on source code length to avoid parsing large
   // inlining candidates.
   if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
-    TraceInline(target, "target text too big");
+    TraceInline(target, caller, "target text too big");
     return false;
   }
 
   // Target must be inlineable.
   if (!target->IsInlineable()) {
-    TraceInline(target, "target not inlineable");
+    TraceInline(target, caller, "target not inlineable");
     return false;
   }
 
@@ -4035,7 +4482,7 @@
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
       outer_info->scope()->num_heap_slots() > 0) {
-    TraceInline(target, "target requires context change");
+    TraceInline(target, caller, "target requires context change");
     return false;
   }
 
@@ -4044,7 +4491,7 @@
   int current_level = 1;
   while (env->outer() != NULL) {
     if (current_level == Compiler::kMaxInliningLevels) {
-      TraceInline(target, "inline depth limit reached");
+      TraceInline(target, caller, "inline depth limit reached");
       return false;
     }
     current_level++;
@@ -4052,14 +4499,14 @@
   }
 
   // Don't inline recursive functions.
-  if (target->shared() == outer_info->closure()->shared()) {
-    TraceInline(target, "target is recursive");
+  if (*target_shared == outer_info->closure()->shared()) {
+    TraceInline(target, caller, "target is recursive");
     return false;
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
   if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
-    TraceInline(target, "cumulative AST node limit reached");
+    TraceInline(target, caller, "cumulative AST node limit reached");
     return false;
   }
 
@@ -4072,14 +4519,14 @@
     if (target_info.isolate()->has_pending_exception()) {
       // Parse or scope error, never optimize this function.
       SetStackOverflow();
-      target->shared()->set_optimization_disabled(true);
+      target_shared->DisableOptimization(*target);
     }
-    TraceInline(target, "parse failure");
+    TraceInline(target, caller, "parse failure");
     return false;
   }
 
   if (target_info.scope()->num_heap_slots() > 0) {
-    TraceInline(target, "target has context-allocated variables");
+    TraceInline(target, caller, "target has context-allocated variables");
     return false;
   }
   FunctionLiteral* function = target_info.function();
@@ -4087,32 +4534,32 @@
   // Count the number of AST nodes added by inlining this call.
   int nodes_added = AstNode::Count() - count_before;
   if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
-    TraceInline(target, "target AST is too large");
-    return false;
-  }
-
-  // Check if we can handle all declarations in the inlined functions.
-  VisitDeclarations(target_info.scope()->declarations());
-  if (HasStackOverflow()) {
-    TraceInline(target, "target has non-trivial declaration");
-    ClearStackOverflow();
+    TraceInline(target, caller, "target AST is too large");
     return false;
   }
 
   // Don't inline functions that uses the arguments object or that
   // have a mismatching number of parameters.
-  Handle<SharedFunctionInfo> target_shared(target->shared());
   int arity = expr->arguments()->length();
   if (function->scope()->arguments() != NULL ||
       arity != target_shared->formal_parameter_count()) {
-    TraceInline(target, "target requires special argument handling");
+    TraceInline(target, caller, "target requires special argument handling");
     return false;
   }
 
+  // All declarations must be inlineable.
+  ZoneList<Declaration*>* decls = target_info.scope()->declarations();
+  int decl_count = decls->length();
+  for (int i = 0; i < decl_count; ++i) {
+    if (!decls->at(i)->IsInlineable()) {
+      TraceInline(target, caller, "target has non-trivial declaration");
+      return false;
+    }
+  }
   // All statements in the body must be inlineable.
   for (int i = 0, count = function->body()->length(); i < count; ++i) {
     if (!function->body()->at(i)->IsInlineable()) {
-      TraceInline(target, "target contains unsupported syntax");
+      TraceInline(target, caller, "target contains unsupported syntax");
       return false;
     }
   }
@@ -4124,9 +4571,16 @@
     // generating the optimized inline code.
     target_info.EnableDeoptimizationSupport();
     if (!FullCodeGenerator::MakeCode(&target_info)) {
-      TraceInline(target, "could not generate deoptimization info");
+      TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
+    if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+      // The scope info might not have been set if a lazily compiled
+      // function is inlined before being called for the first time.
+      Handle<SerializedScopeInfo> target_scope_info =
+          SerializedScopeInfo::Create(target_info.scope());
+      target_shared->set_scope_info(*target_scope_info);
+    }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
     Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
                                         &target_info,
@@ -4134,6 +4588,9 @@
   }
 
   // ----------------------------------------------------------------
+  // After this point, we've made a decision to inline this function (so
+  // TryInline should always return true).
+
   // Save the pending call context and type feedback oracle. Set up new ones
   // for the inlined function.
   ASSERT(target_shared->has_deoptimization_support());
@@ -4144,25 +4601,32 @@
 
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner_env =
-      environment()->CopyForInlining(target, function, true, undefined);
+      environment()->CopyForInlining(target,
+                                     function,
+                                     undefined,
+                                     call_kind);
   HBasicBlock* body_entry = CreateBasicBlock(inner_env);
   current_block()->Goto(body_entry);
-
   body_entry->SetJoinId(expr->ReturnId());
   set_current_block(body_entry);
-  AddInstruction(new(zone()) HEnterInlined(target, function));
+  AddInstruction(new(zone()) HEnterInlined(target,
+                                           function,
+                                           call_kind));
+  VisitDeclarations(target_info.scope()->declarations());
   VisitStatements(function->body());
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
     // instead.
-    TraceInline(target, "inline graph construction failed");
-    return false;
+    TraceInline(target, caller, "inline graph construction failed");
+    target_shared->DisableOptimization(*target);
+    inline_bailout_ = true;
+    return true;
   }
 
   // Update inlined nodes count.
   inlined_count_ += nodes_added;
 
-  TraceInline(target, NULL);
+  TraceInline(target, caller, NULL);
 
   if (current_block() != NULL) {
     // Add a return of undefined if control can fall off the body.  In a
@@ -4171,7 +4635,7 @@
       ASSERT(function_return() != NULL);
       ASSERT(call_context()->IsEffect() || call_context()->IsValue());
       if (call_context()->IsEffect()) {
-        current_block()->Goto(function_return(), false);
+        current_block()->Goto(function_return());
       } else {
         current_block()->AddLeaveInlined(undefined, function_return());
       }
@@ -4183,11 +4647,11 @@
       // TODO(3168478): refactor to avoid this.
       HBasicBlock* empty_true = graph()->CreateBasicBlock();
       HBasicBlock* empty_false = graph()->CreateBasicBlock();
-      HTest* test = new(zone()) HTest(undefined, empty_true, empty_false);
+      HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
       current_block()->Finish(test);
 
-      empty_true->Goto(inlined_test_context()->if_true(), false);
-      empty_false->Goto(inlined_test_context()->if_false(), false);
+      empty_true->Goto(inlined_test_context()->if_true());
+      empty_false->Goto(inlined_test_context()->if_false());
     }
   }
 
@@ -4195,26 +4659,29 @@
   if (inlined_test_context() != NULL) {
     HBasicBlock* if_true = inlined_test_context()->if_true();
     HBasicBlock* if_false = inlined_test_context()->if_false();
-    if_true->SetJoinId(expr->id());
-    if_false->SetJoinId(expr->id());
-    ASSERT(ast_context() == inlined_test_context());
+
     // Pop the return test context from the expression context stack.
+    ASSERT(ast_context() == inlined_test_context());
     ClearInlinedTestContext();
 
     // Forward to the real test context.
-    HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-    HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-    if_true->Goto(true_target, false);
-    if_false->Goto(false_target, false);
-
-    // TODO(kmillikin): Come up with a better way to handle this. It is too
-    // subtle. NULL here indicates that the enclosing context has no control
-    // flow to handle.
+    if (if_true->HasPredecessor()) {
+      if_true->SetJoinId(expr->id());
+      HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
+      if_true->Goto(true_target);
+    }
+    if (if_false->HasPredecessor()) {
+      if_false->SetJoinId(expr->id());
+      HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
+      if_false->Goto(false_target);
+    }
     set_current_block(NULL);
 
-  } else {
+  } else if (function_return()->HasPredecessor()) {
     function_return()->SetJoinId(expr->id());
     set_current_block(function_return());
+  } else {
+    set_current_block(NULL);
   }
 
   return true;
@@ -4236,18 +4703,20 @@
       if (argument_count == 2 && check_type == STRING_CHECK) {
         HValue* index = Pop();
         HValue* string = Pop();
+        HValue* context = environment()->LookupContext();
         ASSERT(!expr->holder().is_null());
         AddInstruction(new(zone()) HCheckPrototypeMaps(
             oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
             expr->holder()));
-        HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+        HStringCharCodeAt* char_code =
+            BuildStringCharCodeAt(context, string, index);
         if (id == kStringCharCodeAt) {
           ast_context()->ReturnInstruction(char_code, expr->id());
           return true;
         }
         AddInstruction(char_code);
         HStringCharFromCode* result =
-            new(zone()) HStringCharFromCode(char_code);
+            new(zone()) HStringCharFromCode(context, char_code);
         ast_context()->ReturnInstruction(result, expr->id());
         return true;
       }
@@ -4262,8 +4731,10 @@
       if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
         HValue* argument = Pop();
+        HValue* context = environment()->LookupContext();
         Drop(1);  // Receiver.
-        HUnaryMathOperation* op = new(zone()) HUnaryMathOperation(argument, id);
+        HUnaryMathOperation* op =
+            new(zone()) HUnaryMathOperation(context, argument, id);
         op->set_position(expr->position());
         ast_context()->ReturnInstruction(op, expr->id());
         return true;
@@ -4275,31 +4746,33 @@
         HValue* right = Pop();
         HValue* left = Pop();
         Pop();  // Pop receiver.
+        HValue* context = environment()->LookupContext();
         HInstruction* result = NULL;
         // Use sqrt() if exponent is 0.5 or -0.5.
         if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
           double exponent = HConstant::cast(right)->DoubleValue();
           if (exponent == 0.5) {
-            result = new(zone()) HUnaryMathOperation(left, kMathPowHalf);
+            result =
+                new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
           } else if (exponent == -0.5) {
             HConstant* double_one =
                 new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
                                       Representation::Double());
             AddInstruction(double_one);
             HUnaryMathOperation* square_root =
-                new(zone()) HUnaryMathOperation(left, kMathPowHalf);
+                new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
             ASSERT(!square_root->HasSideEffects());
-            result = new(zone()) HDiv(double_one, square_root);
+            result = new(zone()) HDiv(context, double_one, square_root);
           } else if (exponent == 2.0) {
-            result = new(zone()) HMul(left, left);
+            result = new(zone()) HMul(context, left, left);
           }
         } else if (right->IsConstant() &&
                    HConstant::cast(right)->HasInteger32Value() &&
                    HConstant::cast(right)->Integer32Value() == 2) {
-          result = new(zone()) HMul(left, left);
+          result = new(zone()) HMul(context, left, left);
         }
 
         if (result == NULL) {
@@ -4351,14 +4824,16 @@
 
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
-  if (HasStackOverflow()) return false;
-  HValue* function = Pop();
+  if (HasStackOverflow() || current_block() == NULL) return true;
+  HValue* function = Top();
+  AddCheckConstantFunction(expr, function, function_map, true);
+  Drop(1);
+
   VisitForValue(args->at(0));
-  if (HasStackOverflow()) return false;
+  if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* receiver = Pop();
   HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
-  AddCheckConstantFunction(expr, function, function_map, true);
   HInstruction* result =
       new(zone()) HApplyArguments(function, receiver, length, elements);
   result->set_position(expr->position());
@@ -4368,6 +4843,9 @@
 
 
 void HGraphBuilder::VisitCall(Call* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Expression* callee = expr->expression();
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
@@ -4376,48 +4854,42 @@
   if (prop != NULL) {
     if (!prop->key()->IsPropertyName()) {
       // Keyed function call.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitArgument(prop->obj()));
 
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->key()));
       // Push receiver and key like the non-optimized code generator expects it.
       HValue* key = Pop();
       HValue* receiver = Pop();
       Push(key);
       Push(receiver);
 
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
-      call = PreProcessCall(
-          new(zone()) HCallKeyed(context, key, argument_count));
+      HValue* context = environment()->LookupContext();
+      call = new(zone()) HCallKeyed(context, key, argument_count);
       call->set_position(expr->position());
-      Drop(1);  // Key.
-      ast_context()->ReturnInstruction(call, expr->id());
-      return;
+      Drop(argument_count + 1);  // 1 is the key.
+      return ast_context()->ReturnInstruction(call, expr->id());
     }
 
     // Named function call.
-    expr->RecordTypeFeedback(oracle());
+    expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
 
     if (TryCallApply(expr)) return;
-    CHECK_BAILOUT;
 
-    VISIT_FOR_VALUE(prop->obj());
-    VisitExpressions(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
     Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
 
-    expr->RecordTypeFeedback(oracle());
-    ZoneMapList* types = expr->GetReceiverTypes();
+    SmallMapList* types = expr->GetReceiverTypes();
 
     HValue* receiver =
         environment()->ExpressionStackAt(expr->arguments()->length());
     if (expr->IsMonomorphic()) {
-      Handle<Map> receiver_map =
-          (types == NULL) ? Handle<Map>::null() : types->first();
+      Handle<Map> receiver_map = (types == NULL || types->is_empty())
+          ? Handle<Map>::null()
+          : types->first();
       if (TryInlineBuiltinFunction(expr,
                                    receiver,
                                    receiver_map,
@@ -4430,23 +4902,16 @@
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
         // when a primitive receiver check is required.
-        HContext* context = new(zone()) HContext;
-        AddInstruction(context);
+        HValue* context = environment()->LookupContext();
         call = PreProcessCall(
             new(zone()) HCallNamed(context, name, argument_count));
       } else {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
 
-        if (TryInline(expr)) {
-          return;
-        } else {
-          // Check for bailout, as the TryInline call in the if condition above
-          // might return false due to bailout during hydrogen processing.
-          CHECK_BAILOUT;
-          call = PreProcessCall(
-              new(zone()) HCallConstantFunction(expr->target(),
-                                                argument_count));
-        }
+        if (TryInline(expr)) return;
+        call = PreProcessCall(
+            new(zone()) HCallConstantFunction(expr->target(),
+                                              argument_count));
       }
     } else if (types != NULL && types->length() > 1) {
       ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -4454,22 +4919,18 @@
       return;
 
     } else {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
+      HValue* context = environment()->LookupContext();
       call = PreProcessCall(
           new(zone()) HCallNamed(context, name, argument_count));
     }
 
   } else {
-    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-    bool global_call = (var != NULL) && var->is_global() && !var->is_this();
-
-    if (!global_call) {
-      ++argument_count;
-      VISIT_FOR_VALUE(expr->expression());
-    }
+    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    // FIXME.
+    bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
     if (global_call) {
+      Variable* var = proxy->var();
       bool known_global_function = false;
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
@@ -4484,14 +4945,12 @@
       if (known_global_function) {
         // Push the global object instead of the global receiver because
         // code generated by the full code generator expects it.
-        HContext* context = new(zone()) HContext;
+        HValue* context = environment()->LookupContext();
         HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-        AddInstruction(context);
         PushAndAdd(global_object);
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-        VISIT_FOR_VALUE(expr->expression());
+        CHECK_ALIVE(VisitForValue(expr->expression()));
         HValue* function = Pop();
         AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
 
@@ -4505,63 +4964,61 @@
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
 
-        if (TryInline(expr)) {
-          return;
-        }
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
-
+        if (TryInline(expr)) return;
         call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
-                                                   argument_count));
+                                                           argument_count));
       } else {
-        HContext* context = new(zone()) HContext;
-        AddInstruction(context);
-        PushAndAdd(new(zone()) HGlobalObject(context));
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        HValue* context = environment()->LookupContext();
+        HGlobalObject* receiver = new(zone()) HGlobalObject(context);
+        AddInstruction(receiver);
+        PushAndAdd(new(zone()) HPushArgument(receiver));
+        CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-        call = PreProcessCall(new(zone()) HCallGlobal(context,
-                                              var->name(),
-                                              argument_count));
+        call = new(zone()) HCallGlobal(context, var->name(), argument_count);
+        Drop(argument_count);
       }
 
     } else {
-      HContext* context = new(zone()) HContext;
+      CHECK_ALIVE(VisitArgument(expr->expression()));
+      HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-      AddInstruction(context);
+      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
       AddInstruction(global_object);
-      PushAndAdd(new(zone()) HGlobalReceiver(global_object));
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      AddInstruction(receiver);
+      PushAndAdd(new(zone()) HPushArgument(receiver));
+      CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      call = PreProcessCall(new(zone()) HCallFunction(context, argument_count));
+      // The function to call is treated as an argument to the call function
+      // stub.
+      call = new(zone()) HCallFunction(context, argument_count + 1);
+      Drop(argument_count + 1);
     }
   }
 
   call->set_position(expr->position());
-  ast_context()->ReturnInstruction(call, expr->id());
+  return ast_context()->ReturnInstruction(call, expr->id());
 }
 
 
 void HGraphBuilder::VisitCallNew(CallNew* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // The constructor function is also used as the receiver argument to the
   // JS construct call builtin.
-  VISIT_FOR_VALUE(expr->expression());
-  VisitExpressions(expr->arguments());
-  CHECK_BAILOUT;
+  HValue* constructor = NULL;
+  CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+  CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
 
   // The constructor is both an operand to the instruction and an argument
   // to the construct call.
   int arg_count = expr->arguments()->length() + 1;  // Plus constructor.
-  HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
   HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
   call->set_position(expr->position());
-  PreProcessCall(call);
-  ast_context()->ReturnInstruction(call, expr->id());
+  Drop(arg_count);
+  return ast_context()->ReturnInstruction(call, expr->id());
 }
 
 
@@ -4581,8 +5038,11 @@
 
 
 void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (expr->is_jsruntime()) {
-    BAILOUT("call to a JavaScript runtime function");
+    return Bailout("call to a JavaScript runtime function");
   }
 
   const Runtime::Function* function = expr->function();
@@ -4602,177 +5062,290 @@
     (this->*generator)(expr);
   } else {
     ASSERT(function->intrinsic_type == Runtime::RUNTIME);
-    VisitArgumentList(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
+    HValue* context = environment()->LookupContext();
     Handle<String> name = expr->name();
     int argument_count = expr->arguments()->length();
     HCallRuntime* call =
-        new(zone()) HCallRuntime(name, function, argument_count);
+        new(zone()) HCallRuntime(context, name, function, argument_count);
     call->set_position(RelocInfo::kNoPosition);
     Drop(argument_count);
-    ast_context()->ReturnInstruction(call, expr->id());
+    return ast_context()->ReturnInstruction(call, expr->id());
   }
 }
 
 
 void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
-  Token::Value op = expr->op();
-  if (op == Token::VOID) {
-    VISIT_FOR_EFFECT(expr->expression());
-    ast_context()->ReturnValue(graph()->GetConstantUndefined());
-  } else if (op == Token::DELETE) {
-    Property* prop = expr->expression()->AsProperty();
-    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-    if (prop == NULL && var == NULL) {
-      // Result of deleting non-property, non-variable reference is true.
-      // Evaluate the subexpression for side effects.
-      VISIT_FOR_EFFECT(expr->expression());
-      ast_context()->ReturnValue(graph()->GetConstantTrue());
-    } else if (var != NULL &&
-               !var->is_global() &&
-               var->AsSlot() != NULL &&
-               var->AsSlot()->type() != Slot::LOOKUP) {
-      // Result of deleting non-global, non-dynamic variables is false.
-      // The subexpression does not have side effects.
-      ast_context()->ReturnValue(graph()->GetConstantFalse());
-    } else if (prop != NULL) {
-      if (prop->is_synthetic()) {
-        // Result of deleting parameters is false, even when they rewrite
-        // to accesses on the arguments object.
-        ast_context()->ReturnValue(graph()->GetConstantFalse());
-      } else {
-        VISIT_FOR_VALUE(prop->obj());
-        VISIT_FOR_VALUE(prop->key());
-        HValue* key = Pop();
-        HValue* obj = Pop();
-        HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
-        ast_context()->ReturnInstruction(instr, expr->id());
-      }
-    } else if (var->is_global()) {
-      BAILOUT("delete with global variable");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::DELETE: return VisitDelete(expr);
+    case Token::VOID: return VisitVoid(expr);
+    case Token::TYPEOF: return VisitTypeof(expr);
+    case Token::ADD: return VisitAdd(expr);
+    case Token::SUB: return VisitSub(expr);
+    case Token::BIT_NOT: return VisitBitNot(expr);
+    case Token::NOT: return VisitNot(expr);
+    default: UNREACHABLE();
+  }
+}
+
+void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Property* prop = expr->expression()->AsProperty();
+  VariableProxy* proxy = expr->expression()->AsVariableProxy();
+  if (prop != NULL) {
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    HValue* key = Pop();
+    HValue* obj = Pop();
+    HValue* context = environment()->LookupContext();
+    HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
+    return ast_context()->ReturnInstruction(instr, expr->id());
+  } else if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->IsUnallocated()) {
+      Bailout("delete with global variable");
+    } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+      // Result of deleting non-global variables is false.  'this' is not
+      // really a variable, though we implement it as one.  The
+      // subexpression does not have side effects.
+      HValue* value = var->is_this()
+          ? graph()->GetConstantTrue()
+          : graph()->GetConstantFalse();
+      return ast_context()->ReturnValue(value);
     } else {
-      BAILOUT("delete with non-global variable");
+      Bailout("delete with non-global variable");
     }
-  } else if (op == Token::NOT) {
-    if (ast_context()->IsTest()) {
-      TestContext* context = TestContext::cast(ast_context());
-      VisitForControl(expr->expression(),
-                      context->if_false(),
-                      context->if_true());
-    } else if (ast_context()->IsValue()) {
-      HBasicBlock* materialize_false = graph()->CreateBasicBlock();
-      HBasicBlock* materialize_true = graph()->CreateBasicBlock();
-      VISIT_FOR_CONTROL(expr->expression(),
-                        materialize_false,
-                        materialize_true);
-      materialize_false->SetJoinId(expr->expression()->id());
-      materialize_true->SetJoinId(expr->expression()->id());
-
-      set_current_block(materialize_false);
-      Push(graph()->GetConstantFalse());
-      set_current_block(materialize_true);
-      Push(graph()->GetConstantTrue());
-
-      HBasicBlock* join =
-          CreateJoin(materialize_false, materialize_true, expr->id());
-      set_current_block(join);
-      ast_context()->ReturnValue(Pop());
-    } else {
-      ASSERT(ast_context()->IsEffect());
-      VisitForEffect(expr->expression());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    VisitForTypeOf(expr->expression());
-    if (HasStackOverflow()) return;
-    HValue* value = Pop();
-    ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
-
   } else {
-    VISIT_FOR_VALUE(expr->expression());
-    HValue* value = Pop();
-    HInstruction* instr = NULL;
-    switch (op) {
-      case Token::BIT_NOT:
-        instr = new(zone()) HBitNot(value);
-        break;
-      case Token::SUB:
-        instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
-        break;
-      case Token::ADD:
-        instr = new(zone()) HMul(value, graph_->GetConstant1());
-        break;
-      default:
-        BAILOUT("Value: unsupported unary operation");
-        break;
-    }
-    ast_context()->ReturnInstruction(instr, expr->id());
+    // Result of deleting non-property, non-variable reference is true.
+    // Evaluate the subexpression for side effects.
+    CHECK_ALIVE(VisitForEffect(expr->expression()));
+    return ast_context()->ReturnValue(graph()->GetConstantTrue());
   }
 }
 
 
-HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
-  HConstant* delta = increment
+void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->expression()));
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForTypeOf(expr->expression()));
+  HValue* value = Pop();
+  HValue* context = environment()->LookupContext();
+  HInstruction* instr = new(zone()) HTypeof(context, value);
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HValue* context = environment()->LookupContext();
+  HInstruction* instr =
+      new(zone()) HMul(context, value, graph_->GetConstant1());
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HValue* context = environment()->LookupContext();
+  HInstruction* instr =
+      new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+  TypeInfo info = oracle()->UnaryType(expr);
+  if (info.IsUninitialized()) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    current_block()->MarkAsDeoptimizing();
+    info = TypeInfo::Unknown();
+  }
+  Representation rep = ToRepresentation(info);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  TypeInfo info = oracle()->UnaryType(expr);
+  if (info.IsUninitialized()) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    current_block()->MarkAsDeoptimizing();
+  }
+  HInstruction* instr = new(zone()) HBitNot(value);
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+  // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    VisitForControl(expr->expression(),
+                    context->if_false(),
+                    context->if_true());
+    return;
+  }
+
+  if (ast_context()->IsEffect()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  ASSERT(ast_context()->IsValue());
+  HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+  HBasicBlock* materialize_true = graph()->CreateBasicBlock();
+  CHECK_BAILOUT(VisitForControl(expr->expression(),
+                                materialize_false,
+                                materialize_true));
+
+  if (materialize_false->HasPredecessor()) {
+    materialize_false->SetJoinId(expr->expression()->id());
+    set_current_block(materialize_false);
+    Push(graph()->GetConstantFalse());
+  } else {
+    materialize_false = NULL;
+  }
+
+  if (materialize_true->HasPredecessor()) {
+    materialize_true->SetJoinId(expr->expression()->id());
+    set_current_block(materialize_true);
+    Push(graph()->GetConstantTrue());
+  } else {
+    materialize_true = NULL;
+  }
+
+  HBasicBlock* join =
+    CreateJoin(materialize_false, materialize_true, expr->id());
+  set_current_block(join);
+  if (join != NULL) return ast_context()->ReturnValue(Pop());
+}
+
+
+HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
+                                            CountOperation* expr) {
+  // The input to the count operation is on top of the expression stack.
+  TypeInfo info = oracle()->IncrementType(expr);
+  Representation rep = ToRepresentation(info);
+  if (rep.IsTagged()) {
+    rep = Representation::Integer32();
+  }
+
+  if (returns_original_input) {
+    // We need an explicit HValue representing ToNumber(input).  The
+    // actual HChange instruction we need is (sometimes) added in a later
+    // phase, so it is not available now to be used as an input to HAdd and
+    // as the return value.
+    HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
+    AddInstruction(number_input);
+    Push(number_input);
+  }
+
+  // The addition has no side effects, so we do not need
+  // to simulate the expression stack after this instruction.
+  // Any later failures deopt to the load of the input or earlier.
+  HConstant* delta = (expr->op() == Token::INC)
       ? graph_->GetConstant1()
       : graph_->GetConstantMinus1();
-  HInstruction* instr = new(zone()) HAdd(value, delta);
-  AssumeRepresentation(instr,  Representation::Integer32());
+  HValue* context = environment()->LookupContext();
+  HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
+  AddInstruction(instr);
   return instr;
 }
 
 
 void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
-  Variable* var = proxy->AsVariable();
   Property* prop = target->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-  bool inc = expr->op() == Token::INC;
+  if (proxy == NULL && prop == NULL) {
+    return Bailout("invalid lhs in count operation");
+  }
 
-  if (var != NULL) {
-    VISIT_FOR_VALUE(target);
+  // Match the full code generator stack by simulating an extra stack
+  // element for postfix operations in a non-effect context.  The return
+  // value is ToNumber(input).
+  bool returns_original_input =
+      expr->is_postfix() && !ast_context()->IsEffect();
+  HValue* input = NULL;  // ToNumber(original_input).
+  HValue* after = NULL;  // The result after incrementing or decrementing.
 
-    // Match the full code generator stack by simulating an extra stack
-    // element for postfix operations in a non-effect context.
-    bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-    HValue* before = has_extra ? Top() : Pop();
-    HInstruction* after = BuildIncrement(before, inc);
-    AddInstruction(after);
+  if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->mode() == Variable::CONST)  {
+      return Bailout("unsupported count operation with const");
+    }
+    // Argument of the count operation is a variable, not a property.
+    ASSERT(prop == NULL);
+    CHECK_ALIVE(VisitForValue(target));
+
+    after = BuildIncrement(returns_original_input, expr);
+    input = returns_original_input ? Top() : Pop();
     Push(after);
 
-    if (var->is_global()) {
-      HandleGlobalVariableAssignment(var,
-                                     after,
-                                     expr->position(),
-                                     expr->AssignmentId());
-    } else if (var->IsStackAllocated()) {
-      Bind(var, after);
-    } else if (var->IsContextSlot()) {
-      HValue* context = BuildContextChainWalk(var);
-      int index = var->AsSlot()->index();
-      HStoreContextSlot* instr =
-          new(zone()) HStoreContextSlot(context, index, after);
-      AddInstruction(instr);
-      if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
-    } else {
-      BAILOUT("lookup variable in count operation");
-    }
-    Drop(has_extra ? 2 : 1);
-    ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+    switch (var->location()) {
+      case Variable::UNALLOCATED:
+        HandleGlobalVariableAssignment(var,
+                                       after,
+                                       expr->position(),
+                                       expr->AssignmentId());
+        break;
 
-  } else if (prop != NULL) {
+      case Variable::PARAMETER:
+      case Variable::LOCAL:
+        Bind(var, after);
+        break;
+
+      case Variable::CONTEXT: {
+        // Bail out if we try to mutate a parameter value in a function
+        // using the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (info()->scope()->arguments() != NULL) {
+          // Parameters will rewrite to context slots.  We have no direct
+          // way to detect that the variable is a parameter so we use a
+          // linear search of the parameter list.
+          int count = info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == info()->scope()->parameter(i)) {
+              return Bailout("assignment to parameter in arguments object");
+            }
+          }
+        }
+
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot* instr =
+            new(zone()) HStoreContextSlot(context, var->index(), after);
+        AddInstruction(instr);
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        break;
+      }
+
+      case Variable::LOOKUP:
+        return Bailout("lookup variable in count operation");
+    }
+
+  } else {
+    // Argument of the count operation is a property.
+    ASSERT(prop != NULL);
     prop->RecordTypeFeedback(oracle());
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
-      // Match the full code generator stack by simulating an extra stack
-      // element for postfix operations in a non-effect context.
-      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-      if (has_extra) Push(graph_->GetConstantUndefined());
-
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -4786,11 +5359,8 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
-      HValue* before = Pop();
-      // There is no deoptimization to after the increment, so we don't need
-      // to simulate the expression stack after this instruction.
-      HInstruction* after = BuildIncrement(before, inc);
-      AddInstruction(after);
+      after = BuildIncrement(returns_original_input, expr);
+      input = Pop();
 
       HInstruction* store = BuildStoreNamed(obj, after, prop);
       AddInstruction(store);
@@ -4799,112 +5369,121 @@
       // of the operation, and the placeholder with the original value if
       // necessary.
       environment()->SetExpressionStackAt(0, after);
-      if (has_extra) environment()->SetExpressionStackAt(1, before);
+      if (returns_original_input) environment()->SetExpressionStackAt(1, input);
       if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      Drop(has_extra ? 2 : 1);
-
-      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
 
     } else {
       // Keyed property.
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
-      // Match the full code generator stack by simulate an extra stack element
-      // for postfix operations in a non-effect context.
-      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-      if (has_extra) Push(graph_->GetConstantUndefined());
-
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      HInstruction* load = BuildLoadKeyed(obj, key, prop);
-      PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CountId());
+      bool has_side_effects = false;
+      HValue* load = HandleKeyedElementAccess(
+          obj, key, NULL, prop, expr->CountId(), RelocInfo::kNoPosition,
+          false,  // is_store
+          &has_side_effects);
+      Push(load);
+      if (has_side_effects) AddSimulate(expr->CountId());
 
-      HValue* before = Pop();
-      // There is no deoptimization to after the increment, so we don't need
-      // to simulate the expression stack after this instruction.
-      HInstruction* after = BuildIncrement(before, inc);
-      AddInstruction(after);
+      after = BuildIncrement(returns_original_input, expr);
+      input = Pop();
 
       expr->RecordTypeFeedback(oracle());
-      HInstruction* store = BuildStoreKeyed(obj, key, after, expr);
-      AddInstruction(store);
+      HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
+                               RelocInfo::kNoPosition,
+                               true,  // is_store
+                               &has_side_effects);
 
       // Drop the key from the bailout environment.  Overwrite the receiver
       // with the result of the operation, and the placeholder with the
       // original value if necessary.
       Drop(1);
       environment()->SetExpressionStackAt(0, after);
-      if (has_extra) environment()->SetExpressionStackAt(1, before);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      Drop(has_extra ? 2 : 1);
-
-      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+      if (returns_original_input) environment()->SetExpressionStackAt(1, input);
+      ASSERT(has_side_effects);  // Stores always have side effects.
+      AddSimulate(expr->AssignmentId());
     }
-
-  } else {
-    BAILOUT("invalid lhs in count operation");
   }
+
+  Drop(returns_original_input ? 2 : 1);
+  return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
 }
 
 
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
+HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
+                                                        HValue* string,
                                                         HValue* index) {
   AddInstruction(new(zone()) HCheckNonSmi(string));
-  AddInstruction(new(zone()) HCheckInstanceType(
-      string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+  AddInstruction(HCheckInstanceType::NewIsString(string));
   HStringLength* length = new(zone()) HStringLength(string);
   AddInstruction(length);
   HInstruction* checked_index =
       AddInstruction(new(zone()) HBoundsCheck(index, length));
-  return new(zone()) HStringCharCodeAt(string, checked_index);
+  return new(zone()) HStringCharCodeAt(context, string, checked_index);
 }
 
 
 HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
                                                   HValue* left,
                                                   HValue* right) {
+  HValue* context = environment()->LookupContext();
+  TypeInfo info = oracle()->BinaryType(expr);
+  if (info.IsUninitialized()) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    current_block()->MarkAsDeoptimizing();
+    info = TypeInfo::Unknown();
+  }
   HInstruction* instr = NULL;
   switch (expr->op()) {
     case Token::ADD:
-      instr = new(zone()) HAdd(left, right);
+      if (info.IsString()) {
+        AddInstruction(new(zone()) HCheckNonSmi(left));
+        AddInstruction(HCheckInstanceType::NewIsString(left));
+        AddInstruction(new(zone()) HCheckNonSmi(right));
+        AddInstruction(HCheckInstanceType::NewIsString(right));
+        instr = new(zone()) HStringAdd(context, left, right);
+      } else {
+        instr = new(zone()) HAdd(context, left, right);
+      }
       break;
     case Token::SUB:
-      instr = new(zone()) HSub(left, right);
+      instr = new(zone()) HSub(context, left, right);
       break;
     case Token::MUL:
-      instr = new(zone()) HMul(left, right);
+      instr = new(zone()) HMul(context, left, right);
       break;
     case Token::MOD:
-      instr = new(zone()) HMod(left, right);
+      instr = new(zone()) HMod(context, left, right);
       break;
     case Token::DIV:
-      instr = new(zone()) HDiv(left, right);
+      instr = new(zone()) HDiv(context, left, right);
       break;
     case Token::BIT_XOR:
-      instr = new(zone()) HBitXor(left, right);
+      instr = new(zone()) HBitXor(context, left, right);
       break;
     case Token::BIT_AND:
-      instr = new(zone()) HBitAnd(left, right);
+      instr = new(zone()) HBitAnd(context, left, right);
       break;
     case Token::BIT_OR:
-      instr = new(zone()) HBitOr(left, right);
+      instr = new(zone()) HBitOr(context, left, right);
       break;
     case Token::SAR:
-      instr = new(zone()) HSar(left, right);
+      instr = new(zone()) HSar(context, left, right);
       break;
     case Token::SHR:
-      instr = new(zone()) HShr(left, right);
+      instr = new(zone()) HShr(context, left, right);
       break;
     case Token::SHL:
-      instr = new(zone()) HShl(left, right);
+      instr = new(zone()) HShl(context, left, right);
       break;
     default:
       UNREACHABLE();
   }
-  TypeInfo info = oracle()->BinaryType(expr);
+
   // If we hit an uninitialized binary op stub we will get type info
   // for a smi operation. If one of the operands is a constant string
   // do not generate code assuming it is a smi operation.
@@ -4913,15 +5492,13 @@
        (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
     return instr;
   }
-  if (FLAG_trace_representation) {
-    PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
-  }
   Representation rep = ToRepresentation(info);
   // We only generate either int32 or generic tagged bitwise operations.
   if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
     rep = Representation::Integer32();
   }
-  AssumeRepresentation(instr, rep);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
   return instr;
 }
 
@@ -4941,113 +5518,150 @@
 
 
 void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
-  if (expr->op() == Token::COMMA) {
-    VISIT_FOR_EFFECT(expr->left());
-    // Visit the right subexpression in the same AST context as the entire
-    // expression.
-    Visit(expr->right());
-
-  } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
-    bool is_logical_and = (expr->op() == Token::AND);
-    if (ast_context()->IsTest()) {
-      TestContext* context = TestContext::cast(ast_context());
-      // Translate left subexpression.
-      HBasicBlock* eval_right = graph()->CreateBasicBlock();
-      if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
-      } else {
-        VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
-      }
-      eval_right->SetJoinId(expr->RightId());
-
-      // Translate right subexpression by visiting it in the same AST
-      // context as the entire expression.
-      set_current_block(eval_right);
-      Visit(expr->right());
-
-    } else if (ast_context()->IsValue()) {
-      VISIT_FOR_VALUE(expr->left());
-      ASSERT(current_block() != NULL);
-
-      // We need an extra block to maintain edge-split form.
-      HBasicBlock* empty_block = graph()->CreateBasicBlock();
-      HBasicBlock* eval_right = graph()->CreateBasicBlock();
-      HTest* test = is_logical_and
-          ? new(zone()) HTest(Top(), eval_right, empty_block)
-          : new(zone()) HTest(Top(), empty_block, eval_right);
-      current_block()->Finish(test);
-
-      set_current_block(eval_right);
-      Drop(1);  // Value of the left subexpression.
-      VISIT_FOR_VALUE(expr->right());
-
-      HBasicBlock* join_block =
-          CreateJoin(empty_block, current_block(), expr->id());
-      set_current_block(join_block);
-      ast_context()->ReturnValue(Pop());
-
-    } else {
-      ASSERT(ast_context()->IsEffect());
-      // In an effect context, we don't need the value of the left
-      // subexpression, only its control flow and side effects.  We need an
-      // extra block to maintain edge-split form.
-      HBasicBlock* empty_block = graph()->CreateBasicBlock();
-      HBasicBlock* right_block = graph()->CreateBasicBlock();
-      HBasicBlock* join_block = graph()->CreateBasicBlock();
-      if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
-      } else {
-        VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
-      }
-      // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
-      // actually two empty blocks (one here and one inserted by
-      // TestContext::BuildBranch, and that they both have an HSimulate
-      // though the second one is not a merge node, and that we really have
-      // no good AST ID to put on that first HSimulate.
-      empty_block->SetJoinId(expr->id());
-      right_block->SetJoinId(expr->RightId());
-      set_current_block(right_block);
-      VISIT_FOR_EFFECT(expr->right());
-
-      empty_block->Goto(join_block);
-      current_block()->Goto(join_block);
-      join_block->SetJoinId(expr->id());
-      set_current_block(join_block);
-      // We did not materialize any value in the predecessor environments,
-      // so there is no need to handle it here.
-    }
-
-  } else {
-    VISIT_FOR_VALUE(expr->left());
-    VISIT_FOR_VALUE(expr->right());
-
-    HValue* right = Pop();
-    HValue* left = Pop();
-    HInstruction* instr = BuildBinaryOperation(expr, left, right);
-    instr->set_position(expr->position());
-    ast_context()->ReturnInstruction(instr, expr->id());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::COMMA:
+      return VisitComma(expr);
+    case Token::OR:
+    case Token::AND:
+      return VisitLogicalExpression(expr);
+    default:
+      return VisitArithmeticExpression(expr);
   }
 }
 
 
-void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
-  if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
-    if (FLAG_trace_representation) {
-      PrintF("Assume representation for %s to be %s (%d)\n",
-             value->Mnemonic(),
-             r.Mnemonic(),
-             graph_->GetMaximumValueID());
+void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->left()));
+  // Visit the right subexpression in the same AST context as the entire
+  // expression.
+  Visit(expr->right());
+}
+
+
+void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    // Translate left subexpression.
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    eval_right,
+                                    context->if_false()));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    context->if_true(),
+                                    eval_right));
     }
-    value->ChangeRepresentation(r);
-    // The representation of the value is dictated by type feedback and
-    // will not be changed later.
-    value->ClearFlag(HValue::kFlexibleRepresentation);
-  } else if (FLAG_trace_representation) {
-    PrintF("No representation assumed\n");
+
+    // Translate right subexpression by visiting it in the same AST
+    // context as the entire expression.
+    if (eval_right->HasPredecessor()) {
+      eval_right->SetJoinId(expr->RightId());
+      set_current_block(eval_right);
+      Visit(expr->right());
+    }
+
+  } else if (ast_context()->IsValue()) {
+    CHECK_ALIVE(VisitForValue(expr->left()));
+    ASSERT(current_block() != NULL);
+
+    // We need an extra block to maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    unsigned test_id = expr->left()->test_id();
+    ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
+    HBranch* test = is_logical_and
+      ? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
+      : new(zone()) HBranch(Top(), empty_block, eval_right, expected);
+    current_block()->Finish(test);
+
+    set_current_block(eval_right);
+    Drop(1);  // Value of the left subexpression.
+    CHECK_BAILOUT(VisitForValue(expr->right()));
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, current_block(), expr->id());
+    set_current_block(join_block);
+    return ast_context()->ReturnValue(Pop());
+
+  } else {
+    ASSERT(ast_context()->IsEffect());
+    // In an effect context, we don't need the value of the left subexpression,
+    // only its control flow and side effects.  We need an extra block to
+    // maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* right_block = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
+    }
+
+    // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
+    // actually two empty blocks (one here and one inserted by
+    // TestContext::BuildBranch, and that they both have an HSimulate though the
+    // second one is not a merge node, and that we really have no good AST ID to
+    // put on that first HSimulate.
+
+    if (empty_block->HasPredecessor()) {
+      empty_block->SetJoinId(expr->id());
+    } else {
+      empty_block = NULL;
+    }
+
+    if (right_block->HasPredecessor()) {
+      right_block->SetJoinId(expr->RightId());
+      set_current_block(right_block);
+      CHECK_BAILOUT(VisitForEffect(expr->right()));
+      right_block = current_block();
+    } else {
+      right_block = NULL;
+    }
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, right_block, expr->id());
+    set_current_block(join_block);
+    // We did not materialize any value in the predecessor environments,
+    // so there is no need to handle it here.
   }
 }
 
 
+void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HInstruction* instr = BuildBinaryOperation(expr, left, right);
+  instr->set_position(expr->position());
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::TraceRepresentation(Token::Value op,
+                                        TypeInfo info,
+                                        HValue* value,
+                                        Representation rep) {
+  if (!FLAG_trace_representation) return;
+  // TODO(svenpanne) Under which circumstances are we actually not flexible?
+  // At first glance, this looks a bit weird...
+  bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
+  PrintF("Operation %s has type info %s, %schange representation assumption "
+         "for %s (ID %d) from %s to %s\n",
+         Token::Name(op),
+         info.ToString(),
+         flexible ? "" : " DO NOT ",
+         value->Mnemonic(),
+         graph_->GetMaximumValueID(),
+         value->representation().Mnemonic(),
+         rep.Mnemonic());
+}
+
+
 Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
   if (info.IsSmi()) return Representation::Integer32();
   if (info.IsInteger32()) return Representation::Integer32();
@@ -5057,55 +5671,86 @@
 }
 
 
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+                                               Expression* expr,
+                                               Handle<String> check) {
+  CHECK_ALIVE(VisitForTypeOf(expr));
+  HValue* expr_value = Pop();
+  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
+  instr->set_position(compare_expr->position());
+  return ast_context()->ReturnControl(instr, compare_expr->id());
+}
+
+
+void HGraphBuilder::HandleLiteralCompareUndefined(
+    CompareOperation* compare_expr, Expression* expr) {
+  CHECK_ALIVE(VisitForValue(expr));
+  HValue* lhs = Pop();
+  HValue* rhs = graph()->GetConstantUndefined();
+  HCompareObjectEqAndBranch* instr =
+      new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
+  instr->set_position(compare_expr->position());
+  return ast_context()->ReturnControl(instr, compare_expr->id());
+}
+
+
 void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (IsClassOfTest(expr)) {
     CallRuntime* call = expr->left()->AsCallRuntime();
-    VISIT_FOR_VALUE(call->arguments()->at(0));
+    ASSERT(call->arguments()->length() == 1);
+    CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
     HValue* value = Pop();
     Literal* literal = expr->right()->AsLiteral();
     Handle<String> rhs = Handle<String>::cast(literal->handle());
-    HInstruction* instr = new(zone()) HClassOfTest(value, rhs);
+    HClassOfTestAndBranch* instr =
+        new(zone()) HClassOfTestAndBranch(value, rhs);
     instr->set_position(expr->position());
-    ast_context()->ReturnInstruction(instr, expr->id());
+    return ast_context()->ReturnControl(instr, expr->id());
+  }
+
+  // Check for special cases that compare against literals.
+  Expression *sub_expr;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    HandleLiteralCompareTypeof(expr, sub_expr, check);
     return;
   }
 
-  // Check for the pattern: typeof <expression> == <string literal>.
-  UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
-  Literal* right_literal = expr->right()->AsLiteral();
-  if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
-      left_unary != NULL && left_unary->op() == Token::TYPEOF &&
-      right_literal != NULL && right_literal->handle()->IsString()) {
-    VisitForTypeOf(left_unary->expression());
-    if (HasStackOverflow()) return;
-    HValue* left = Pop();
-    HInstruction* instr = new(zone()) HTypeofIs(left,
-        Handle<String>::cast(right_literal->handle()));
-    instr->set_position(expr->position());
-    ast_context()->ReturnInstruction(instr, expr->id());
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    HandleLiteralCompareUndefined(expr, sub_expr);
     return;
   }
 
-  VISIT_FOR_VALUE(expr->left());
-  VISIT_FOR_VALUE(expr->right());
+  TypeInfo type_info = oracle()->CompareType(expr);
+  // Check if this expression was ever executed according to type feedback.
+  if (type_info.IsUninitialized()) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    current_block()->MarkAsDeoptimizing();
+    type_info = TypeInfo::Unknown();
+  }
 
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
+
+  HValue* context = environment()->LookupContext();
   HValue* right = Pop();
   HValue* left = Pop();
   Token::Value op = expr->op();
 
-  TypeInfo type_info = oracle()->CompareType(expr);
-  HInstruction* instr = NULL;
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
     // residing in new space. If it is we assume that the function will stay the
     // same.
     Handle<JSFunction> target = Handle<JSFunction>::null();
-    Variable* var = expr->right()->AsVariableProxy()->AsVariable();
-    bool global_function = (var != NULL) && var->is_global() && !var->is_this();
+    VariableProxy* proxy = expr->right()->AsVariableProxy();
+    bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
     if (global_function &&
         info()->has_global_object() &&
         !info()->global_object()->IsAccessCheckNeeded()) {
-      Handle<String> name = var->name();
+      Handle<String> name = proxy->name();
       Handle<GlobalObject> global(info()->global_object());
       LookupResult lookup;
       global->Lookup(*name, &lookup);
@@ -5124,67 +5769,122 @@
     // If the target is not null we have found a known global function that is
     // assumed to stay the same for this instanceof.
     if (target.is_null()) {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
-      instr = new(zone()) HInstanceOf(context, left, right);
+      HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
+      result->set_position(expr->position());
+      return ast_context()->ReturnInstruction(result, expr->id());
     } else {
       AddInstruction(new(zone()) HCheckFunction(right, target));
-      instr = new(zone()) HInstanceOfKnownGlobal(left, target);
+      HInstanceOfKnownGlobal* result =
+          new(zone()) HInstanceOfKnownGlobal(context, left, target);
+      result->set_position(expr->position());
+      return ast_context()->ReturnInstruction(result, expr->id());
     }
   } else if (op == Token::IN) {
-    BAILOUT("Unsupported comparison: in");
+    HIn* result = new(zone()) HIn(context, left, right);
+    result->set_position(expr->position());
+    return ast_context()->ReturnInstruction(result, expr->id());
   } else if (type_info.IsNonPrimitive()) {
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
         AddInstruction(new(zone()) HCheckNonSmi(left));
-        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
+        AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
         AddInstruction(new(zone()) HCheckNonSmi(right));
-        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
-        instr = new(zone()) HCompareJSObjectEq(left, right);
-        break;
+        AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+        HCompareObjectEqAndBranch* result =
+            new(zone()) HCompareObjectEqAndBranch(left, right);
+        result->set_position(expr->position());
+        return ast_context()->ReturnControl(result, expr->id());
       }
       default:
-        BAILOUT("Unsupported non-primitive compare");
-        break;
+        return Bailout("Unsupported non-primitive compare");
     }
+  } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
+             (op == Token::EQ || op == Token::EQ_STRICT)) {
+    AddInstruction(new(zone()) HCheckNonSmi(left));
+    AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+    AddInstruction(new(zone()) HCheckNonSmi(right));
+    AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+    HCompareObjectEqAndBranch* result =
+        new(zone()) HCompareObjectEqAndBranch(left, right);
+    result->set_position(expr->position());
+    return ast_context()->ReturnControl(result, expr->id());
   } else {
-    HCompare* compare = new(zone()) HCompare(left, right, op);
     Representation r = ToRepresentation(type_info);
-    compare->SetInputRepresentation(r);
-    instr = compare;
+    if (r.IsTagged()) {
+      HCompareGeneric* result =
+          new(zone()) HCompareGeneric(context, left, right, op);
+      result->set_position(expr->position());
+      return ast_context()->ReturnInstruction(result, expr->id());
+    } else {
+      HCompareIDAndBranch* result =
+          new(zone()) HCompareIDAndBranch(left, right, op);
+      result->set_position(expr->position());
+      result->SetInputRepresentation(r);
+      return ast_context()->ReturnControl(result, expr->id());
+    }
   }
-  instr->set_position(expr->position());
-  ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
-  VISIT_FOR_VALUE(expr->expression());
-
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
-  HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
-  ast_context()->ReturnInstruction(compare, expr->id());
+  HIsNullAndBranch* instr =
+      new(zone()) HIsNullAndBranch(value, expr->is_strict());
+  return ast_context()->ReturnControl(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
-  BAILOUT("ThisFunction");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HThisFunction* self = new(zone()) HThisFunction;
+  return ast_context()->ReturnInstruction(self, expr->id());
 }
 
 
 void HGraphBuilder::VisitDeclaration(Declaration* decl) {
-  // We allow only declarations that do not require code generation.
-  // The following all require code generation: global variables and
-  // functions, variables with slot type LOOKUP, declarations with
-  // mode CONST, and functions.
-  Variable* var = decl->proxy()->var();
-  Slot* slot = var->AsSlot();
-  if (var->is_global() ||
-      (slot != NULL && slot->type() == Slot::LOOKUP) ||
-      decl->mode() == Variable::CONST ||
-      decl->fun() != NULL) {
-    BAILOUT("unsupported declaration");
+  HandleDeclaration(decl->proxy(), decl->mode(), decl->fun());
+}
+
+
+void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
+                                      Variable::Mode mode,
+                                      FunctionLiteral* function) {
+  if (mode == Variable::LET) return Bailout("unsupported let declaration");
+  Variable* var = proxy->var();
+  switch (var->location()) {
+    case Variable::UNALLOCATED:
+      return Bailout("unsupported global declaration");
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT:
+      if (mode == Variable::CONST || function != NULL) {
+        HValue* value = NULL;
+        if (mode == Variable::CONST) {
+          value = graph()->GetConstantHole();
+        } else {
+          VisitForValue(function);
+          value = Pop();
+        }
+        if (var->IsContextSlot()) {
+          HValue* context = environment()->LookupContext();
+          HStoreContextSlot* store =
+              new HStoreContextSlot(context, var->index(), value);
+          AddInstruction(store);
+          if (store->HasSideEffects()) AddSimulate(proxy->id());
+        } else {
+          environment()->Bind(var, value);
+        }
+      }
+      break;
+    case Variable::LOOKUP:
+      return Bailout("unsupported lookup slot in declaration");
   }
 }
 
@@ -5193,83 +5893,95 @@
 // Support for types.
 void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HIsSmi* result = new(zone()) HIsSmi(value);
-  ast_context()->ReturnInstruction(result, call->id());
+  HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result =
-      new(zone()) HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
-  ast_context()->ReturnInstruction(result, call->id());
+  HHasInstanceTypeAndBranch* result =
+      new(zone()) HHasInstanceTypeAndBranch(value,
+                                            FIRST_SPEC_OBJECT_TYPE,
+                                            LAST_SPEC_OBJECT_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result =
-      new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
-  ast_context()->ReturnInstruction(result, call->id());
+  HHasInstanceTypeAndBranch* result =
+      new(zone()) HHasInstanceTypeAndBranch(value,
+                                            JS_FUNCTION_TYPE,
+                                            JS_FUNCTION_PROXY_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
-  ast_context()->ReturnInstruction(result, call->id());
+  HHasCachedArrayIndexAndBranch* result =
+      new(zone()) HHasCachedArrayIndexAndBranch(value);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
-  ast_context()->ReturnInstruction(result, call->id());
+  HHasInstanceTypeAndBranch* result =
+      new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result =
-      new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
-  ast_context()->ReturnInstruction(result, call->id());
+  HHasInstanceTypeAndBranch* result =
+      new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HIsObject* test = new(zone()) HIsObject(value);
-  ast_context()->ReturnInstruction(test, call->id());
+  HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsNonNegativeSmi");
+  return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
 void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsUndetectableObject");
+  ASSERT(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HIsUndetectableAndBranch* result =
+      new(zone()) HIsUndetectableAndBranch(value);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+  return Bailout(
+      "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
 }
 
 
@@ -5280,9 +5992,10 @@
     // We are generating graph for inlined function. Currently
     // constructor inlining is not supported and we can just return
     // false from %_IsConstructCall().
-    ast_context()->ReturnValue(graph()->GetConstantFalse());
+    return ast_context()->ReturnValue(graph()->GetConstantFalse());
   } else {
-    ast_context()->ReturnInstruction(new(zone()) HIsConstructCall, call->id());
+    return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+                                        call->id());
   }
 }
 
@@ -5296,7 +6009,7 @@
   ASSERT(call->arguments()->length() == 0);
   HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
   HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
@@ -5306,13 +6019,13 @@
   // function is blacklisted by AstNode::IsInlineable.
   ASSERT(function_state()->outer() == NULL);
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
   HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
   HAccessArgumentsAt* result =
       new(zone()) HAccessArgumentsAt(elements, length, index);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
@@ -5320,168 +6033,162 @@
 void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
-  BAILOUT("inlined runtime function: ClassOf");
+  return Bailout("inlined runtime function: ClassOf");
 }
 
 
 void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HValueOf* result = new(zone()) HValueOf(value);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SetValueOf");
+  return Bailout("inlined runtime function: SetValueOf");
 }
 
 
 // Fast support for charCodeAt(n).
 void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
-  HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
-  ast_context()->ReturnInstruction(result, call->id());
+  HValue* context = environment()->LookupContext();
+  HStringCharCodeAt* result = BuildStringCharCodeAt(context, string, index);
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
-  HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
-  ast_context()->ReturnInstruction(result, call->id());
+  HValue* context = environment()->LookupContext();
+  HStringCharFromCode* result =
+      new(zone()) HStringCharFromCode(context, char_code);
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
-  HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+  HValue* context = environment()->LookupContext();
+  HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
   AddInstruction(char_code);
-  HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
-  ast_context()->ReturnInstruction(result, call->id());
+  HStringCharFromCode* result =
+      new(zone()) HStringCharFromCode(context, char_code);
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast support for object equality testing.
 void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
-  HCompareJSObjectEq* result = new(zone()) HCompareJSObjectEq(left, right);
-  ast_context()->ReturnInstruction(result, call->id());
+  HCompareObjectEqAndBranch* result =
+      new(zone()) HCompareObjectEqAndBranch(left, right);
+  return ast_context()->ReturnControl(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateLog(CallRuntime* call) {
   // %_Log is ignored in optimized code.
-  ast_context()->ReturnValue(graph()->GetConstantUndefined());
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
 // Fast support for Math.random().
 void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
-  BAILOUT("inlined runtime function: RandomHeapNumber");
+  return Bailout("inlined runtime function: RandomHeapNumber");
 }
 
 
 // Fast support for StringAdd.
 void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
   Drop(2);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast support for SubString.
 void HGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
   Drop(3);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast support for StringCompare.
 void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
   Drop(2);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Support for direct calls from JavaScript to native RegExp code.
 void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
   Drop(4);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Construct a RegExp exec result with two in-object properties.
 void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
   Drop(3);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Support for fast native caches.
 void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
-  BAILOUT("inlined runtime function: GetFromCache");
+  return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
 void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
   Drop(1);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
@@ -5489,100 +6196,104 @@
 // indices. This should only be used if the indices are known to be
 // non-negative and within bounds of the elements array at the call site.
 void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SwapElements");
+  return Bailout("inlined runtime function: SwapElements");
 }
 
 
 // Fast call for custom callbacks.
 void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
-  BAILOUT("inlined runtime function: CallFunction");
+  // 1 ~ The function to call is not itself an argument to the call.
+  int arg_count = call->arguments()->length() - 1;
+  ASSERT(arg_count >= 1);  // There's always at least a receiver.
+
+  for (int i = 0; i < arg_count; ++i) {
+    CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
+  }
+  CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+  HValue* function = Pop();
+  HValue* context = environment()->LookupContext();
+  HInvokeFunction* result =
+      new(zone()) HInvokeFunction(context, function, arg_count);
+  Drop(arg_count);
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast call to math functions.
 void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
   HPower* result = new(zone()) HPower(left, right);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::SIN);
   Drop(1);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::COS);
   Drop(1);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::LOG);
   Drop(1);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
-  BAILOUT("inlined runtime function: MathSqrt");
+  return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
 void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsRegExpEquivalent");
+  return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
 void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
-  ast_context()->ReturnInstruction(result, call->id());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
-  BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+  return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
 
-#undef BAILOUT
 #undef CHECK_BAILOUT
-#undef VISIT_FOR_EFFECT
-#undef VISIT_FOR_VALUE
-#undef ADD_TO_SUBGRAPH
+#undef CHECK_ALIVE
 
 
 HEnvironment::HEnvironment(HEnvironment* outer,
@@ -5592,6 +6303,7 @@
       values_(0),
       assigned_variables_(4),
       parameter_count_(0),
+      specials_count_(1),
       local_count_(0),
       outer_(outer),
       pop_count_(0),
@@ -5605,6 +6317,7 @@
     : values_(0),
       assigned_variables_(0),
       parameter_count_(0),
+      specials_count_(1),
       local_count_(0),
       outer_(NULL),
       pop_count_(0),
@@ -5621,7 +6334,7 @@
   local_count_ = local_count;
 
   // Avoid reallocating the temporaries' backing store on the first Push.
-  int total = parameter_count + local_count + stack_height;
+  int total = parameter_count + specials_count_ + local_count + stack_height;
   values_.Initialize(total + 4);
   for (int i = 0; i < total; ++i) values_.Add(NULL);
 }
@@ -5680,12 +6393,12 @@
 
 
 bool HEnvironment::HasExpressionAt(int index) const {
-  return index >= parameter_count_ + local_count_;
+  return index >= parameter_count_ + specials_count_ + local_count_;
 }
 
 
 bool HEnvironment::ExpressionStackIsEmpty() const {
-  int first_expression = parameter_count() + local_count();
+  int first_expression = parameter_count() + specials_count() + local_count();
   ASSERT(length() >= first_expression);
   return length() == first_expression;
 }
@@ -5738,10 +6451,11 @@
 }
 
 
-HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
-                                            FunctionLiteral* function,
-                                            bool is_speculative,
-                                            HConstant* undefined) const {
+HEnvironment* HEnvironment::CopyForInlining(
+    Handle<JSFunction> target,
+    FunctionLiteral* function,
+    HConstant* undefined,
+    CallKind call_kind) const {
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
   HEnvironment* outer = Copy();
@@ -5751,22 +6465,20 @@
   HEnvironment* inner =
       new(zone) HEnvironment(outer, function->scope(), target);
   // Get the argument values from the original environment.
-  if (is_speculative) {
-    for (int i = 0; i <= arity; ++i) {  // Include receiver.
-      HValue* push = ExpressionStackAt(arity - i);
-      inner->SetValueAt(i, push);
-    }
-  } else {
-    for (int i = 0; i <= arity; ++i) {  // Include receiver.
-      inner->SetValueAt(i, ExpressionStackAt(arity - i));
-    }
+  for (int i = 0; i <= arity; ++i) {  // Include receiver.
+    HValue* push = ExpressionStackAt(arity - i);
+    inner->SetValueAt(i, push);
   }
-
-  // Initialize the stack-allocated locals to undefined.
-  int local_base = arity + 1;
-  int local_count = function->scope()->num_stack_slots();
-  for (int i = 0; i < local_count; ++i) {
-    inner->SetValueAt(local_base + i, undefined);
+  // If the function we are inlining is a strict mode function or a
+  // builtin function, pass undefined as the receiver for function
+  // calls (instead of the global receiver).
+  if ((target->shared()->native() || function->strict_mode()) &&
+      call_kind == CALL_AS_FUNCTION) {
+    inner->SetValueAt(0, undefined);
+  }
+  inner->SetValueAt(arity + 1, outer->LookupContext());
+  for (int i = arity + 2; i < inner->length(); ++i) {
+    inner->SetValueAt(i, undefined);
   }
 
   inner->set_ast_id(AstNode::kFunctionEntryId);
@@ -5777,8 +6489,11 @@
 void HEnvironment::PrintTo(StringStream* stream) {
   for (int i = 0; i < length(); i++) {
     if (i == 0) stream->Add("parameters\n");
-    if (i == parameter_count()) stream->Add("locals\n");
-    if (i == parameter_count() + local_count()) stream->Add("expressions");
+    if (i == parameter_count()) stream->Add("specials\n");
+    if (i == parameter_count() + specials_count()) stream->Add("locals\n");
+    if (i == parameter_count() + specials_count() + local_count()) {
+      stream->Add("expressions");
+    }
     HValue* val = values_.at(i);
     stream->Add("%d: ", i);
     if (val != NULL) {
@@ -5840,15 +6555,15 @@
       PrintEmptyProperty("predecessors");
     }
 
-    if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
+    if (current->end()->SuccessorCount() == 0) {
       PrintEmptyProperty("successors");
-    } else if (current->end()->SecondSuccessor() == NULL) {
-      PrintBlockProperty("successors",
-                             current->end()->FirstSuccessor()->block_id());
-    } else {
-      PrintBlockProperty("successors",
-                             current->end()->FirstSuccessor()->block_id(),
-                             current->end()->SecondSuccessor()->block_id());
+    } else  {
+      PrintIndent();
+      trace_.Add("successors");
+      for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
+        trace_.Add(" \"B%d\"", it.Current()->block_id());
+      }
+      trace_.Add("\n");
     }
 
     PrintEmptyProperty("xhandlers");
@@ -5858,6 +6573,8 @@
       PrintBlockProperty("dominator", current->dominator()->block_id());
     }
 
+    PrintIntProperty("loop_depth", current->LoopNestingDepth());
+
     if (chunk != NULL) {
       int first_index = current->first_instruction_index();
       int last_index = current->last_instruction_index();
@@ -5873,10 +6590,11 @@
       Tag states_tag(this, "states");
       Tag locals_tag(this, "locals");
       int total = current->phis()->length();
-      trace_.Add("size %d\n", total);
-      trace_.Add("method \"None\"");
+      PrintIntProperty("size", current->phis()->length());
+      PrintStringProperty("method", "None");
       for (int j = 0; j < total; ++j) {
         HPhi* phi = current->phis()->at(j);
+        PrintIndent();
         trace_.Add("%d ", phi->merged_index());
         phi->PrintNameTo(&trace_);
         trace_.Add(" ");
@@ -5890,7 +6608,8 @@
       HInstruction* instruction = current->first();
       while (instruction != NULL) {
         int bci = 0;
-        int uses = instruction->uses()->length();
+        int uses = instruction->UseCount();
+        PrintIndent();
         trace_.Add("%d %d ", bci, uses);
         instruction->PrintNameTo(&trace_);
         trace_.Add(" ");
@@ -5910,6 +6629,7 @@
         for (int i = first_index; i <= last_index; ++i) {
           LInstruction* linstr = instructions->at(i);
           if (linstr != NULL) {
+            PrintIndent();
             trace_.Add("%d ",
                        LifetimePosition::FromInstructionIndex(i).Value());
             linstr->PrintTo(&trace_);
@@ -5945,6 +6665,7 @@
 
 void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
   if (range != NULL && !range->IsEmpty()) {
+    PrintIndent();
     trace_.Add("%d %s", range->id(), type);
     if (range->HasRegisterAssigned()) {
       LOperand* op = range->CreateAssignedOperand();
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 37671f4..03fbc73 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -30,16 +30,18 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "compiler.h"
-#include "data-flow.h"
 #include "hydrogen-instructions.h"
+#include "type-info.h"
 #include "zone.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
+class BitVector;
 class HEnvironment;
 class HGraph;
 class HLoopInformation;
@@ -100,6 +102,7 @@
   void RemovePhi(HPhi* phi);
   void AddInstruction(HInstruction* instr);
   bool Dominates(HBasicBlock* other) const;
+  int LoopNestingDepth() const;
 
   void SetInitialEnvironment(HEnvironment* env);
   void ClearEnvironment() { last_environment_ = NULL; }
@@ -114,18 +117,18 @@
 
   bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
 
-  void SetJoinId(int id);
+  void SetJoinId(int ast_id);
 
   void Finish(HControlInstruction* last);
   void FinishExit(HControlInstruction* instruction);
-  void Goto(HBasicBlock* block, bool include_stack_check = false);
+  void Goto(HBasicBlock* block);
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
-  void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
+  void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
   void AssignCommonDominator(HBasicBlock* other);
 
-  void FinishExitWithDeoptimization() {
-    FinishExit(CreateDeoptimize());
+  void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
+    FinishExit(CreateDeoptimize(has_uses));
   }
 
   // Add the inlined function exit sequence, adding an HLeaveInlined
@@ -141,6 +144,9 @@
   bool IsInlineReturnTarget() const { return is_inline_return_target_; }
   void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
 
+  bool IsDeoptimizing() const { return is_deoptimizing_; }
+  void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+
   inline Zone* zone();
 
 #ifdef DEBUG
@@ -151,8 +157,8 @@
   void RegisterPredecessor(HBasicBlock* pred);
   void AddDominatedBlock(HBasicBlock* block);
 
-  HSimulate* CreateSimulate(int id);
-  HDeoptimize* CreateDeoptimize();
+  HSimulate* CreateSimulate(int ast_id);
+  HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
 
   int block_id_;
   HGraph* graph_;
@@ -173,13 +179,17 @@
   ZoneList<int> deleted_phis_;
   HBasicBlock* parent_loop_header_;
   bool is_inline_return_target_;
+  bool is_deoptimizing_;
 };
 
 
 class HLoopInformation: public ZoneObject {
  public:
   explicit HLoopInformation(HBasicBlock* loop_header)
-      : back_edges_(4), loop_header_(loop_header), blocks_(8) {
+      : back_edges_(4),
+        loop_header_(loop_header),
+        blocks_(8),
+        stack_check_(NULL) {
     blocks_.Add(loop_header);
   }
   virtual ~HLoopInformation() {}
@@ -190,12 +200,18 @@
   HBasicBlock* GetLastBackEdge() const;
   void RegisterBackEdge(HBasicBlock* block);
 
+  HStackCheck* stack_check() const { return stack_check_; }
+  void set_stack_check(HStackCheck* stack_check) {
+    stack_check_ = stack_check;
+  }
+
  private:
   void AddBlock(HBasicBlock* block);
 
   ZoneList<HBasicBlock*> back_edges_;
   HBasicBlock* loop_header_;
   ZoneList<HBasicBlock*> blocks_;
+  HStackCheck* stack_check_;
 };
 
 
@@ -223,9 +239,14 @@
   void OrderBlocks();
   void AssignDominators();
   void ReplaceCheckedValues();
+  void PropagateDeoptimizingMark();
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
+  bool CheckPhis();
+
+  // Returns false if there are phi-uses of hole values comming
+  // from uninitialized consts.
   bool CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
@@ -238,6 +259,7 @@
   HConstant* GetConstantMinus1();
   HConstant* GetConstantTrue();
   HConstant* GetConstantFalse();
+  HConstant* GetConstantHole();
 
   HBasicBlock* CreateBasicBlock();
   HArgumentsObject* GetArgumentsObject() const {
@@ -276,15 +298,15 @@
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
                          Object* value);
 
+  void MarkAsDeoptimizingRecursively(HBasicBlock* block);
   void InsertTypeConversions(HInstruction* instr);
   void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
   void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
   void InsertRepresentationChangeForUse(HValue* value,
-                                        HValue* use,
+                                        HValue* use_value,
+                                        int use_index,
                                         Representation to);
-  void InsertRepresentationChangesForValue(HValue* current,
-                                           ZoneList<HValue*>* value_list,
-                                           ZoneList<Representation>* rep_list);
+  void InsertRepresentationChangesForValue(HValue* value);
   void InferTypes(ZoneList<HValue*>* worklist);
   void InitializeInferredTypes(int from_inclusive, int to_inclusive);
   void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
@@ -301,6 +323,7 @@
   SetOncePointer<HConstant> constant_minus1_;
   SetOncePointer<HConstant> constant_true_;
   SetOncePointer<HConstant> constant_false_;
+  SetOncePointer<HConstant> constant_hole_;
   SetOncePointer<HArgumentsObject> arguments_object_;
 
   DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -323,6 +346,7 @@
     return &assigned_variables_;
   }
   int parameter_count() const { return parameter_count_; }
+  int specials_count() const { return specials_count_; }
   int local_count() const { return local_count_; }
   HEnvironment* outer() const { return outer_; }
   int pop_count() const { return pop_count_; }
@@ -332,6 +356,9 @@
   void set_ast_id(int id) { ast_id_ = id; }
 
   int length() const { return values_.length(); }
+  bool is_special_index(int i) const {
+    return i >= parameter_count() && i < parameter_count() + specials_count();
+  }
 
   void Bind(Variable* variable, HValue* value) {
     Bind(IndexFor(variable), value);
@@ -339,6 +366,10 @@
 
   void Bind(int index, HValue* value);
 
+  void BindContext(HValue* value) {
+    Bind(parameter_count(), value);
+  }
+
   HValue* Lookup(Variable* variable) const {
     return Lookup(IndexFor(variable));
   }
@@ -349,6 +380,11 @@
     return result;
   }
 
+  HValue* LookupContext() const {
+    // Return first special.
+    return Lookup(parameter_count());
+  }
+
   void Push(HValue* value) {
     ASSERT(value != NULL);
     ++push_count_;
@@ -369,6 +405,8 @@
 
   HValue* Top() const { return ExpressionStackAt(0); }
 
+  bool ExpressionStackIsEmpty() const;
+
   HValue* ExpressionStackAt(int index_from_top) const {
     int index = length() - index_from_top - 1;
     ASSERT(HasExpressionAt(index));
@@ -383,13 +421,11 @@
 
   // Create an "inlined version" of this environment, where the original
   // environment is the outer environment but the top expression stack
-  // elements are moved to an inner environment as parameters. If
-  // is_speculative, the argument values are expected to be PushArgument
-  // instructions, otherwise they are the actual values.
+  // elements are moved to an inner environment as parameters.
   HEnvironment* CopyForInlining(Handle<JSFunction> target,
                                 FunctionLiteral* function,
-                                bool is_speculative,
-                                HConstant* undefined) const;
+                                HConstant* undefined,
+                                CallKind call_kind) const;
 
   void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
 
@@ -413,8 +449,6 @@
   // True if index is included in the expression stack part of the environment.
   bool HasExpressionAt(int index) const;
 
-  bool ExpressionStackIsEmpty() const;
-
   void Initialize(int parameter_count, int local_count, int stack_height);
   void Initialize(const HEnvironment* other);
 
@@ -422,17 +456,19 @@
   // by 1 (receiver is parameter index -1 but environment index 0).
   // Stack-allocated local indices are shifted by the number of parameters.
   int IndexFor(Variable* variable) const {
-    Slot* slot = variable->AsSlot();
-    ASSERT(slot != NULL && slot->IsStackAllocated());
-    int shift = (slot->type() == Slot::PARAMETER) ? 1 : parameter_count_;
-    return slot->index() + shift;
+    ASSERT(variable->IsStackAllocated());
+    int shift = variable->IsParameter()
+        ? 1
+        : parameter_count_ + specials_count_;
+    return variable->index() + shift;
   }
 
   Handle<JSFunction> closure_;
-  // Value array [parameters] [locals] [temporaries].
+  // Value array [parameters] [specials] [locals] [temporaries].
   ZoneList<HValue*> values_;
   ZoneList<int> assigned_variables_;
   int parameter_count_;
+  int specials_count_;
   int local_count_;
   HEnvironment* outer_;
   int pop_count_;
@@ -443,6 +479,11 @@
 
 class HGraphBuilder;
 
+enum ArgumentsAllowedFlag {
+  ARGUMENTS_NOT_ALLOWED,
+  ARGUMENTS_ALLOWED
+};
+
 // This class is not BASE_EMBEDDED because our inlining implementation uses
 // new and delete.
 class AstContext {
@@ -462,6 +503,12 @@
   // the instruction as value.
   virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
 
+  // Finishes the current basic block and materialize a boolean for
+  // value context, nothing for effect, generate a branch for test context.
+  // Call this function in tail position in the Visit functions for
+  // expressions.
+  virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
+
   void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
   bool is_for_typeof() { return for_typeof_; }
 
@@ -496,39 +543,50 @@
 
   virtual void ReturnValue(HValue* value);
   virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+  virtual void ReturnControl(HControlInstruction* instr, int ast_id);
 };
 
 
 class ValueContext: public AstContext {
  public:
-  explicit ValueContext(HGraphBuilder* owner)
-      : AstContext(owner, Expression::kValue) {
+  explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+      : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
 
   virtual void ReturnValue(HValue* value);
   virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+  virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+
+  bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
+
+ private:
+  ArgumentsAllowedFlag flag_;
 };
 
 
 class TestContext: public AstContext {
  public:
   TestContext(HGraphBuilder* owner,
+              Expression* condition,
               HBasicBlock* if_true,
               HBasicBlock* if_false)
       : AstContext(owner, Expression::kTest),
+        condition_(condition),
         if_true_(if_true),
         if_false_(if_false) {
   }
 
   virtual void ReturnValue(HValue* value);
   virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+  virtual void ReturnControl(HControlInstruction* instr, int ast_id);
 
   static TestContext* cast(AstContext* context) {
     ASSERT(context->IsTest());
     return reinterpret_cast<TestContext*>(context);
   }
 
+  Expression* condition() const { return condition_; }
   HBasicBlock* if_true() const { return if_true_; }
   HBasicBlock* if_false() const { return if_false_; }
 
@@ -537,6 +595,7 @@
   // control flow.
   void BuildBranch(HValue* value);
 
+  Expression* condition_;
   HBasicBlock* if_true_;
   HBasicBlock* if_false_;
 };
@@ -634,20 +693,7 @@
     BreakAndContinueScope* next_;
   };
 
-  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle)
-      : function_state_(NULL),
-        initial_function_state_(this, info, oracle),
-        ast_context_(NULL),
-        break_scope_(NULL),
-        graph_(NULL),
-        current_block_(NULL),
-        inlined_count_(0),
-        zone_(info->isolate()->zone()) {
-    // This is not initialized in the initializer list because the
-    // constructor for the initial state relies on function_state_ == NULL
-    // to know it's the initial state.
-    function_state_= &initial_function_state_;
-  }
+  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
   HGraph* CreateGraph();
 
@@ -662,14 +708,24 @@
     return current_block()->last_environment();
   }
 
+  bool inline_bailout() { return inline_bailout_; }
+
   // Adding instructions.
   HInstruction* AddInstruction(HInstruction* instr);
-  void AddSimulate(int id);
+  void AddSimulate(int ast_id);
 
   // Bailout environment manipulation.
   void Push(HValue* value) { environment()->Push(value); }
   HValue* Pop() { return environment()->Pop(); }
 
+  void Bailout(const char* reason);
+
+  HBasicBlock* CreateJoin(HBasicBlock* first,
+                          HBasicBlock* second,
+                          int join_id);
+
+  TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
+
  private:
   // Type of a member function that generates inline code for a native function.
   typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -698,7 +754,6 @@
   CompilationInfo* info() const {
     return function_state()->compilation_info();
   }
-  TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
 
   AstContext* call_context() const {
     return function_state()->call_context();
@@ -724,15 +779,28 @@
   INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
-  void Bailout(const char* reason);
+  void HandleDeclaration(VariableProxy* proxy,
+                         Variable::Mode mode,
+                         FunctionLiteral* function);
+
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitAdd(UnaryOperation* expr);
+  void VisitSub(UnaryOperation* expr);
+  void VisitBitNot(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
 
   void PreProcessOsrEntry(IterationStatement* statement);
   // True iff. we are compiling for OSR and the statement is the entry.
   bool HasOsrEntryAt(IterationStatement* statement);
-
-  HBasicBlock* CreateJoin(HBasicBlock* first,
-                          HBasicBlock* second,
-                          int join_id);
+  void VisitLoopBody(IterationStatement* stmt,
+                     HBasicBlock* loop_entry,
+                     BreakAndContinueInfo* break_info);
 
   // Create a back edge in the flow graph.  body_exit is the predecessor
   // block and loop_entry is the successor block.  loop_successor is the
@@ -755,7 +823,11 @@
   void Drop(int n) { environment()->Drop(n); }
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
 
-  void VisitForValue(Expression* expr);
+  // The value of the arguments object is allowed in some but not most value
+  // contexts.  (It's allowed in all effect contexts and disallowed in all
+  // test contexts.)
+  void VisitForValue(Expression* expr,
+                     ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
   void VisitForTypeOf(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForControl(Expression* expr,
@@ -763,8 +835,9 @@
                        HBasicBlock* false_block);
 
   // Visit an argument subexpression and emit a push to the outgoing
-  // arguments.
-  void VisitArgument(Expression* expr);
+  // arguments.  Returns the hydrogen value that was pushed.
+  HValue* VisitArgument(Expression* expr);
+
   void VisitArgumentList(ZoneList<Expression*>* arguments);
 
   // Visit a list of expressions from left to right, each in a value context.
@@ -778,7 +851,10 @@
   // to push them as outgoing parameters.
   template <int V> HInstruction* PreProcessCall(HCall<V>* call);
 
-  void AssumeRepresentation(HValue* value, Representation r);
+  void TraceRepresentation(Token::Value op,
+                           TypeInfo info,
+                           HValue* value,
+                           Representation rep);
   static Representation ToRepresentation(TypeInfo info);
 
   void SetupScope(Scope* scope);
@@ -814,7 +890,9 @@
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
   // non-NULL reason string.
-  void TraceInline(Handle<JSFunction> target, const char* failure_reason);
+  void TraceInline(Handle<JSFunction> target,
+                   Handle<JSFunction> caller,
+                   const char* failure_reason);
 
   void HandleGlobalVariableAssignment(Variable* var,
                                       HValue* value,
@@ -826,37 +904,63 @@
   void HandlePolymorphicStoreNamedField(Assignment* expr,
                                         HValue* object,
                                         HValue* value,
-                                        ZoneMapList* types,
+                                        SmallMapList* types,
                                         Handle<String> name);
   void HandlePolymorphicCallNamed(Call* expr,
                                   HValue* receiver,
-                                  ZoneMapList* types,
+                                  SmallMapList* types,
                                   Handle<String> name);
+  void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+                                  Expression* expr,
+                                  Handle<String> check);
+  void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
+                                     Expression* expr);
 
-  HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
+  HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
+                                           HValue* string,
                                            HValue* index);
   HInstruction* BuildBinaryOperation(BinaryOperation* expr,
                                      HValue* left,
                                      HValue* right);
-  HInstruction* BuildIncrement(HValue* value, bool increment);
+  HInstruction* BuildIncrement(bool returns_original_input,
+                               CountOperation* expr);
   HLoadNamedField* BuildLoadNamedField(HValue* object,
                                        Property* expr,
                                        Handle<Map> type,
                                        LookupResult* result,
                                        bool smi_and_map_check);
   HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
-  HInstruction* BuildLoadKeyedFastElement(HValue* object,
-                                          HValue* key,
-                                          Property* expr);
-  HInstruction* BuildLoadKeyedSpecializedArrayElement(HValue* object,
-                                                      HValue* key,
-                                                      Property* expr);
   HInstruction* BuildLoadKeyedGeneric(HValue* object,
                                       HValue* key);
+  HInstruction* BuildExternalArrayElementAccess(
+      HValue* external_elements,
+      HValue* checked_key,
+      HValue* val,
+      ElementsKind elements_kind,
+      bool is_store);
 
-  HInstruction* BuildLoadKeyed(HValue* obj,
-                               HValue* key,
-                               Property* prop);
+  HInstruction* BuildMonomorphicElementAccess(HValue* object,
+                                              HValue* key,
+                                              HValue* val,
+                                              Expression* expr,
+                                              bool is_store);
+  HValue* HandlePolymorphicElementAccess(HValue* object,
+                                         HValue* key,
+                                         HValue* val,
+                                         Expression* prop,
+                                         int ast_id,
+                                         int position,
+                                         bool is_store,
+                                         bool* has_side_effects);
+
+  HValue* HandleKeyedElementAccess(HValue* obj,
+                                   HValue* key,
+                                   HValue* val,
+                                   Expression* expr,
+                                   int ast_id,
+                                   int position,
+                                   bool is_store,
+                                   bool* has_side_effects);
 
   HInstruction* BuildLoadNamed(HValue* object,
                                Property* prop,
@@ -878,22 +982,6 @@
                                        HValue* key,
                                        HValue* value);
 
-  HInstruction* BuildStoreKeyedFastElement(HValue* object,
-                                           HValue* key,
-                                           HValue* val,
-                                           Expression* expr);
-
-  HInstruction* BuildStoreKeyedSpecializedArrayElement(
-      HValue* object,
-      HValue* key,
-      HValue* val,
-      Expression* expr);
-
-  HInstruction* BuildStoreKeyed(HValue* object,
-                                HValue* key,
-                                HValue* value,
-                                Expression* assignment);
-
   HValue* BuildContextChainWalk(Variable* var);
 
   void AddCheckConstantFunction(Call* expr,
@@ -923,6 +1011,8 @@
 
   Zone* zone_;
 
+  bool inline_bailout_;
+
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
 
@@ -957,9 +1047,11 @@
   HValue* Lookup(HValue* value) const;
 
   HValueMap* Copy(Zone* zone) const {
-    return new(zone) HValueMap(this);
+    return new(zone) HValueMap(zone, this);
   }
 
+  bool IsEmpty() const { return count_ == 0; }
+
  private:
   // A linked list of HValue* values.  Stored in arrays.
   struct HValueMapListElement {
@@ -971,7 +1063,7 @@
   // Must be a power of 2.
   static const int kInitialSize = 16;
 
-  explicit HValueMap(const HValueMap* other);
+  HValueMap(Zone* zone, const HValueMap* other);
 
   void Resize(int new_size);
   void ResizeLists(int new_size);
@@ -1003,7 +1095,6 @@
   }
 
  private:
-
   HStatistics()
       : timing_(5),
         names_(5),
@@ -1127,11 +1218,6 @@
     trace_.Add("%s \"B%d\"\n", name, block_id);
   }
 
-  void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
-    PrintIndent();
-    trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
-  }
-
   void PrintIntProperty(const char* name, int value) {
     PrintIndent();
     trace_.Add("%s %d\n", name, value);
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index a9247f4..0ca2d6b 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
@@ -311,8 +311,12 @@
 }
 
 
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
-  if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, unsigned id) {
+  if (rmode == RelocInfo::CODE_TARGET && id != kNoASTId) {
+    RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, static_cast<intptr_t>(id));
+  } else if (rmode != RelocInfo::NONE) {
+    RecordRelocInfo(rmode);
+  }
   emit(x);
 }
 
@@ -376,6 +380,18 @@
 }
 
 
+void Assembler::emit_near_disp(Label* L) {
+  byte disp = 0x00;
+  if (L->is_near_linked()) {
+    int offset = L->near_link_pos() - pc_offset();
+    ASSERT(is_int8(offset));
+    disp = static_cast<byte>(offset & 0xFF);
+  }
+  L->link_to(pc_offset(), Label::kNear);
+  *pc_++ = disp;
+}
+
+
 void Operand::set_modrm(int mod, Register rm) {
   ASSERT((mod & -4) == 0);
   buf_[0] = mod << 6 | rm.code();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9273037..9996474 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -341,7 +341,6 @@
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
 
-  last_pc_ = NULL;
 #ifdef GENERATED_CODE_COVERAGE
   InitCoverageLog();
 #endif
@@ -389,7 +388,6 @@
 void Assembler::cpuid() {
   ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA2);
 }
@@ -397,35 +395,30 @@
 
 void Assembler::pushad() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x60);
 }
 
 
 void Assembler::popad() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x61);
 }
 
 
 void Assembler::pushfd() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9C);
 }
 
 
 void Assembler::popfd() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9D);
 }
 
 
 void Assembler::push(const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (x.is_int8()) {
     EMIT(0x6a);
     EMIT(x.x_);
@@ -445,140 +438,33 @@
 
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x50 | src.code());
 }
 
 
 void Assembler::push(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(esi, src);
 }
 
 
+void Assembler::push(Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x68);
+  emit(handle);
+}
+
+
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
-    // (last_pc_ != NULL) is rolled into the above check.
-    // If a last_pc_ is set, we need to make sure that there has not been any
-    // relocation information generated between the last instruction and this
-    // pop instruction.
-    byte instr = last_pc_[0];
-    if ((instr & ~0x7) == 0x50) {
-      int push_reg_code = instr & 0x7;
-      if (push_reg_code == dst.code()) {
-        pc_ = last_pc_;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
-        }
-      } else {
-        // Convert 'push src; pop dst' to 'mov dst, src'.
-        last_pc_[0] = 0x8b;
-        Register src = { push_reg_code };
-        EnsureSpace ensure_space(this);
-        emit_operand(dst, Operand(src));
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
-        }
-      }
-      last_pc_ = NULL;
-      return;
-    } else if (instr == 0xff) {  // push of an operand, convert to a move
-      byte op1 = last_pc_[1];
-      // Check if the operation is really a push.
-      if ((op1 & 0x38) == (6 << 3)) {
-        op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
-        last_pc_[0] = 0x8b;
-        last_pc_[1] = op1;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if ((instr == 0x89) &&
-               (last_pc_[1] == 0x04) &&
-               (last_pc_[2] == 0x24)) {
-      // 0x71283c   396  890424         mov [esp],eax
-      // 0x71283f   399  58             pop eax
-      if (dst.is(eax)) {
-        // change to
-        // 0x710fac   216  83c404         add esp,0x4
-        last_pc_[0] = 0x83;
-        last_pc_[1] = 0xc4;
-        last_pc_[2] = 0x04;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if (instr == 0x6a && dst.is(eax)) {  // push of immediate 8 bit
-      byte imm8 = last_pc_[1];
-      if (imm8 == 0) {
-        // 6a00         push 0x0
-        // 58           pop eax
-        last_pc_[0] = 0x31;
-        last_pc_[1] = 0xc0;
-        // change to
-        // 31c0         xor eax,eax
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-        }
-        return;
-      } else {
-        // 6a00         push 0xXX
-        // 58           pop eax
-        last_pc_[0] = 0xb8;
-        EnsureSpace ensure_space(this);
-        if ((imm8 & 0x80) != 0) {
-          EMIT(0xff);
-          EMIT(0xff);
-          EMIT(0xff);
-          // change to
-          // b8XXffffff   mov eax,0xffffffXX
-        } else {
-          EMIT(0x00);
-          EMIT(0x00);
-          EMIT(0x00);
-          // change to
-          // b8XX000000   mov eax,0x000000XX
-        }
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if (instr == 0x68 && dst.is(eax)) {  // push of immediate 32 bit
-      // 68XXXXXXXX   push 0xXXXXXXXX
-      // 58           pop eax
-      last_pc_[0] = 0xb8;
-      last_pc_ = NULL;
-      // change to
-      // b8XXXXXXXX   mov eax,0xXXXXXXXX
-      if (FLAG_print_peephole_optimization) {
-        PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-      }
-      return;
-    }
-
-    // Other potential patterns for peephole:
-    // 0x712716   102  890424         mov [esp], eax
-    // 0x712719   105  8b1424         mov edx, [esp]
-  }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x58 | dst.code());
 }
 
 
 void Assembler::pop(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8F);
   emit_operand(eax, dst);
 }
@@ -586,7 +472,6 @@
 
 void Assembler::enter(const Immediate& size) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC8);
   emit_w(size);
   EMIT(0);
@@ -595,7 +480,6 @@
 
 void Assembler::leave() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC9);
 }
 
@@ -603,7 +487,6 @@
 void Assembler::mov_b(Register dst, const Operand& src) {
   ASSERT(dst.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8A);
   emit_operand(dst, src);
 }
@@ -611,7 +494,6 @@
 
 void Assembler::mov_b(const Operand& dst, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC6);
   emit_operand(eax, dst);
   EMIT(imm8);
@@ -621,7 +503,6 @@
 void Assembler::mov_b(const Operand& dst, Register src) {
   ASSERT(src.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x88);
   emit_operand(src, dst);
 }
@@ -629,7 +510,6 @@
 
 void Assembler::mov_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x8B);
   emit_operand(dst, src);
@@ -638,7 +518,6 @@
 
 void Assembler::mov_w(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x89);
   emit_operand(src, dst);
@@ -647,7 +526,6 @@
 
 void Assembler::mov(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(imm32);
 }
@@ -655,7 +533,6 @@
 
 void Assembler::mov(Register dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(x);
 }
@@ -663,7 +540,6 @@
 
 void Assembler::mov(Register dst, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(handle);
 }
@@ -671,7 +547,6 @@
 
 void Assembler::mov(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8B);
   emit_operand(dst, src);
 }
@@ -679,7 +554,6 @@
 
 void Assembler::mov(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x89);
   EMIT(0xC0 | src.code() << 3 | dst.code());
 }
@@ -687,7 +561,6 @@
 
 void Assembler::mov(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC7);
   emit_operand(eax, dst);
   emit(x);
@@ -696,7 +569,6 @@
 
 void Assembler::mov(const Operand& dst, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC7);
   emit_operand(eax, dst);
   emit(handle);
@@ -705,7 +577,6 @@
 
 void Assembler::mov(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x89);
   emit_operand(src, dst);
 }
@@ -713,7 +584,6 @@
 
 void Assembler::movsx_b(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xBE);
   emit_operand(dst, src);
@@ -722,7 +592,6 @@
 
 void Assembler::movsx_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xBF);
   emit_operand(dst, src);
@@ -731,7 +600,6 @@
 
 void Assembler::movzx_b(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xB6);
   emit_operand(dst, src);
@@ -740,7 +608,6 @@
 
 void Assembler::movzx_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xB7);
   emit_operand(dst, src);
@@ -750,7 +617,6 @@
 void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   UNIMPLEMENTED();
   USE(cc);
   USE(dst);
@@ -761,7 +627,6 @@
 void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   UNIMPLEMENTED();
   USE(cc);
   USE(dst);
@@ -772,7 +637,6 @@
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   EMIT(0x0F);
   EMIT(0x40 + cc);
@@ -782,14 +646,12 @@
 
 void Assembler::cld() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFC);
 }
 
 
 void Assembler::rep_movs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0xA5);
 }
@@ -797,7 +659,6 @@
 
 void Assembler::rep_stos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0xAB);
 }
@@ -805,14 +666,12 @@
 
 void Assembler::stos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xAB);
 }
 
 
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding.
     EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
   } else {
@@ -824,14 +683,12 @@
 
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(2, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::adc(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x13);
   emit_operand(dst, src);
 }
@@ -839,7 +696,6 @@
 
 void Assembler::add(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x03);
   emit_operand(dst, src);
 }
@@ -847,24 +703,7 @@
 
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
-    byte instr = last_pc_[0];
-    if ((instr & 0xf8) == 0x50) {
-      // Last instruction was a push. Check whether this is a pop without a
-      // result.
-      if ((dst.is_reg(esp)) &&
-          (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
-        pc_ = last_pc_;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    }
-  }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(0, dst, x);
 }
 
@@ -876,14 +715,12 @@
 
 void Assembler::and_(Register dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(4, Operand(dst), x);
 }
 
 
 void Assembler::and_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x23);
   emit_operand(dst, src);
 }
@@ -891,14 +728,12 @@
 
 void Assembler::and_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(4, dst, x);
 }
 
 
 void Assembler::and_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x21);
   emit_operand(src, dst);
 }
@@ -906,7 +741,6 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x80);
   emit_operand(edi, op);  // edi == 7
   EMIT(imm8);
@@ -916,7 +750,6 @@
 void Assembler::cmpb(const Operand& dst, Register src) {
   ASSERT(src.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x38);
   emit_operand(src, dst);
 }
@@ -925,7 +758,6 @@
 void Assembler::cmpb(Register dst, const Operand& src) {
   ASSERT(dst.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x3A);
   emit_operand(dst, src);
 }
@@ -934,7 +766,6 @@
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
   ASSERT(imm16.is_int16());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x81);
   emit_operand(edi, op);
@@ -944,21 +775,18 @@
 
 void Assembler::cmp(Register reg, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, Operand(reg), Immediate(imm32));
 }
 
 
 void Assembler::cmp(Register reg, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, Operand(reg), Immediate(handle));
 }
 
 
 void Assembler::cmp(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x3B);
   emit_operand(reg, op);
 }
@@ -966,21 +794,18 @@
 
 void Assembler::cmp(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, op, imm);
 }
 
 
 void Assembler::cmp(const Operand& op, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, op, Immediate(handle));
 }
 
 
 void Assembler::cmpb_al(const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x38);  // CMP r/m8, r8
   emit_operand(eax, op);  // eax has same code as register al.
 }
@@ -988,7 +813,6 @@
 
 void Assembler::cmpw_ax(const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x39);  // CMP r/m16, r16
   emit_operand(eax, op);  // eax has same code as register ax.
@@ -997,7 +821,6 @@
 
 void Assembler::dec_b(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFE);
   EMIT(0xC8 | dst.code());
 }
@@ -1005,7 +828,6 @@
 
 void Assembler::dec_b(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFE);
   emit_operand(ecx, dst);
 }
@@ -1013,14 +835,12 @@
 
 void Assembler::dec(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x48 | dst.code());
 }
 
 
 void Assembler::dec(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(ecx, dst);
 }
@@ -1028,14 +848,12 @@
 
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x99);
 }
 
 
 void Assembler::idiv(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xF8 | src.code());
 }
@@ -1043,7 +861,6 @@
 
 void Assembler::imul(Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xE8 | reg.code());
 }
@@ -1051,7 +868,6 @@
 
 void Assembler::imul(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAF);
   emit_operand(dst, src);
@@ -1060,7 +876,6 @@
 
 void Assembler::imul(Register dst, Register src, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (is_int8(imm32)) {
     EMIT(0x6B);
     EMIT(0xC0 | dst.code() << 3 | src.code());
@@ -1075,14 +890,12 @@
 
 void Assembler::inc(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x40 | dst.code());
 }
 
 
 void Assembler::inc(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(eax, dst);
 }
@@ -1090,7 +903,6 @@
 
 void Assembler::lea(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8D);
   emit_operand(dst, src);
 }
@@ -1098,7 +910,6 @@
 
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xE0 | src.code());
 }
@@ -1106,7 +917,6 @@
 
 void Assembler::neg(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xD8 | dst.code());
 }
@@ -1114,7 +924,6 @@
 
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xD0 | dst.code());
 }
@@ -1122,14 +931,12 @@
 
 void Assembler::or_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(1, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::or_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0B);
   emit_operand(dst, src);
 }
@@ -1137,14 +944,12 @@
 
 void Assembler::or_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(1, dst, x);
 }
 
 
 void Assembler::or_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x09);
   emit_operand(src, dst);
 }
@@ -1152,7 +957,6 @@
 
 void Assembler::rcl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1167,7 +971,6 @@
 
 void Assembler::rcr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1182,7 +985,6 @@
 
 void Assembler::sar(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1197,7 +999,6 @@
 
 void Assembler::sar_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xF8 | dst.code());
 }
@@ -1205,7 +1006,6 @@
 
 void Assembler::sbb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x1B);
   emit_operand(dst, src);
 }
@@ -1213,7 +1013,6 @@
 
 void Assembler::shld(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA5);
   emit_operand(dst, src);
@@ -1222,7 +1021,6 @@
 
 void Assembler::shl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1237,7 +1035,6 @@
 
 void Assembler::shl_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xE0 | dst.code());
 }
@@ -1245,7 +1042,6 @@
 
 void Assembler::shrd(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAD);
   emit_operand(dst, src);
@@ -1254,7 +1050,6 @@
 
 void Assembler::shr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1269,7 +1064,6 @@
 
 void Assembler::shr_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xE8 | dst.code());
 }
@@ -1277,7 +1071,6 @@
 
 void Assembler::subb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (op.is_reg(eax)) {
     EMIT(0x2c);
   } else {
@@ -1290,14 +1083,12 @@
 
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(5, dst, x);
 }
 
 
 void Assembler::sub(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x2B);
   emit_operand(dst, src);
 }
@@ -1306,7 +1097,6 @@
 void Assembler::subb(Register dst, const Operand& src) {
   ASSERT(dst.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x2A);
   emit_operand(dst, src);
 }
@@ -1314,7 +1104,6 @@
 
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x29);
   emit_operand(src, dst);
 }
@@ -1322,7 +1111,6 @@
 
 void Assembler::test(Register reg, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
   if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
@@ -1349,7 +1137,6 @@
 
 void Assembler::test(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x85);
   emit_operand(reg, op);
 }
@@ -1357,7 +1144,6 @@
 
 void Assembler::test_b(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x84);
   emit_operand(reg, op);
 }
@@ -1365,7 +1151,6 @@
 
 void Assembler::test(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   emit_operand(eax, op);
   emit(imm);
@@ -1374,7 +1159,6 @@
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF6);
   emit_operand(eax, op);
   EMIT(imm8);
@@ -1383,14 +1167,12 @@
 
 void Assembler::xor_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(6, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::xor_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x33);
   emit_operand(dst, src);
 }
@@ -1398,7 +1180,6 @@
 
 void Assembler::xor_(const Operand& src, Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x31);
   emit_operand(dst, src);
 }
@@ -1406,14 +1187,12 @@
 
 void Assembler::xor_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(6, dst, x);
 }
 
 
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA3);
   emit_operand(src, dst);
@@ -1422,7 +1201,6 @@
 
 void Assembler::bts(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAB);
   emit_operand(src, dst);
@@ -1431,21 +1209,18 @@
 
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF4);
 }
 
 
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xCC);
 }
 
 
 void Assembler::nop() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x90);
 }
 
@@ -1453,7 +1228,6 @@
 void Assembler::rdtsc() {
   ASSERT(CpuFeatures::IsEnabled(RDTSC));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x31);
 }
@@ -1461,7 +1235,6 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint16(imm16));
   if (imm16 == 0) {
     EMIT(0xC3);
@@ -1507,7 +1280,6 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   EnsureSpace ensure_space(this);
-  last_pc_ = NULL;
   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
   while (L->is_linked()) {
     Displacement disp = disp_at(L);
@@ -1525,36 +1297,35 @@
     }
     disp.next(L);
   }
+  while (L->is_near_linked()) {
+    int fixup_pos = L->near_link_pos();
+    int offset_to_next =
+        static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+    ASSERT(offset_to_next <= 0);
+    // Relative address, relative to point after address.
+    int disp = pos - fixup_pos - sizeof(int8_t);
+    ASSERT(0 <= disp && disp <= 127);
+    set_byte_at(fixup_pos, disp);
+    if (offset_to_next < 0) {
+      L->link_to(fixup_pos + offset_to_next, Label::kNear);
+    } else {
+      L->UnuseNear();
+    }
+  }
   L->bind_to(pos);
 }
 
 
 void Assembler::bind(Label* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = NULL;
   ASSERT(!L->is_bound());  // label can only be bound once
   bind_to(L, pc_offset());
 }
 
 
-void Assembler::bind(NearLabel* L) {
-  ASSERT(!L->is_bound());
-  last_pc_ = NULL;
-  while (L->unresolved_branches_ > 0) {
-    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
-    int disp = pc_offset() - branch_pos;
-    ASSERT(is_int8(disp));
-    set_byte_at(branch_pos - sizeof(int8_t), disp);
-    L->unresolved_branches_--;
-  }
-  L->bind_to(pc_offset());
-}
-
-
 void Assembler::call(Label* L) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (L->is_bound()) {
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
@@ -1573,35 +1344,44 @@
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
 }
 
 
+int Assembler::CallSize(const Operand& adr) {
+  // Call size is 1 (opcode) + adr.len_ (operand).
+  return 1 + adr.len_;
+}
+
+
 void Assembler::call(const Operand& adr) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(edx, adr);
 }
 
 
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
-  positions_recorder()->WriteRecordedPositions();
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  EMIT(0xE8);
-  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+  return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
 }
 
 
-void Assembler::jmp(Label* L) {
+void Assembler::call(Handle<Code> code,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
+}
+
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
+  EnsureSpace ensure_space(this);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 5;
@@ -1616,6 +1396,9 @@
       EMIT(0xE9);
       emit(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    EMIT(0xEB);
+    emit_near_disp(L);
   } else {
     // 1110 1001 #32-bit disp.
     EMIT(0xE9);
@@ -1626,7 +1409,6 @@
 
 void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1635,7 +1417,6 @@
 
 void Assembler::jmp(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(esp, adr);
 }
@@ -1643,37 +1424,15 @@
 
 void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(reinterpret_cast<intptr_t>(code.location()), rmode);
 }
 
 
-void Assembler::jmp(NearLabel* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 1110 1011 #8-bit disp.
-    EMIT(0xEB);
-    EMIT((offs - short_size) & 0xFF);
-  } else {
-    EMIT(0xEB);
-    EMIT(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
-void Assembler::j(Condition cc, Label* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 6;
@@ -1689,6 +1448,9 @@
       EMIT(0x80 | cc);
       emit(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    EMIT(0x70 | cc);
+    emit_near_disp(L);
   } else {
     // 0000 1111 1000 tttn #32-bit disp
     // Note: could eliminate cond. jumps to this jump if condition
@@ -1700,11 +1462,9 @@
 }
 
 
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((0 <= cc) && (cc < 16));
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1712,10 +1472,8 @@
 }
 
 
-void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+void Assembler::j(Condition cc, Handle<Code> code) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   // 0000 1111 1000 tttn #32-bit disp
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1723,46 +1481,22 @@
 }
 
 
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 0111 tttn #8-bit disp
-    EMIT(0x70 | cc);
-    EMIT((offs - short_size) & 0xFF);
-  } else {
-    EMIT(0x70 | cc);
-    EMIT(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
 // FPU instructions.
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC0, i);
 }
 
 
 void Assembler::fstp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xD8, i);
 }
 
 
 void Assembler::fld1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE8);
 }
@@ -1770,7 +1504,6 @@
 
 void Assembler::fldpi() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xEB);
 }
@@ -1778,7 +1511,6 @@
 
 void Assembler::fldz() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xEE);
 }
@@ -1786,7 +1518,6 @@
 
 void Assembler::fldln2() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xED);
 }
@@ -1794,7 +1525,6 @@
 
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   emit_operand(eax, adr);
 }
@@ -1802,7 +1532,6 @@
 
 void Assembler::fld_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(eax, adr);
 }
@@ -1810,7 +1539,6 @@
 
 void Assembler::fstp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   emit_operand(ebx, adr);
 }
@@ -1818,7 +1546,6 @@
 
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(ebx, adr);
 }
@@ -1826,7 +1553,6 @@
 
 void Assembler::fst_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(edx, adr);
 }
@@ -1834,7 +1560,6 @@
 
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(eax, adr);
 }
@@ -1842,7 +1567,6 @@
 
 void Assembler::fild_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   emit_operand(ebp, adr);
 }
@@ -1850,7 +1574,6 @@
 
 void Assembler::fistp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(ebx, adr);
 }
@@ -1859,7 +1582,6 @@
 void Assembler::fisttp_s(const Operand& adr) {
   ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(ecx, adr);
 }
@@ -1868,7 +1590,6 @@
 void Assembler::fisttp_d(const Operand& adr) {
   ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(ecx, adr);
 }
@@ -1876,7 +1597,6 @@
 
 void Assembler::fist_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(edx, adr);
 }
@@ -1884,7 +1604,6 @@
 
 void Assembler::fistp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   emit_operand(edi, adr);
 }
@@ -1892,7 +1611,6 @@
 
 void Assembler::fabs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE1);
 }
@@ -1900,7 +1618,6 @@
 
 void Assembler::fchs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE0);
 }
@@ -1908,7 +1625,6 @@
 
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFF);
 }
@@ -1916,7 +1632,6 @@
 
 void Assembler::fsin() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFE);
 }
@@ -1924,7 +1639,6 @@
 
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF1);
 }
@@ -1932,21 +1646,18 @@
 
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC0, i);
 }
 
 
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xE8, i);
 }
 
 
 void Assembler::fisub_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDA);
   emit_operand(esp, adr);
 }
@@ -1954,56 +1665,48 @@
 
 void Assembler::fmul(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC8, i);
 }
 
 
 void Assembler::fdiv(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xF8, i);
 }
 
 
 void Assembler::faddp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC0, i);
 }
 
 
 void Assembler::fsubp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE8, i);
 }
 
 
 void Assembler::fsubrp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE0, i);
 }
 
 
 void Assembler::fmulp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC8, i);
 }
 
 
 void Assembler::fdivp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xF8, i);
 }
 
 
 void Assembler::fprem() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF8);
 }
@@ -2011,7 +1714,6 @@
 
 void Assembler::fprem1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF5);
 }
@@ -2019,14 +1721,12 @@
 
 void Assembler::fxch(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC8, i);
 }
 
 
 void Assembler::fincstp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF7);
 }
@@ -2034,14 +1734,12 @@
 
 void Assembler::ffree(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xC0, i);
 }
 
 
 void Assembler::ftst() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE4);
 }
@@ -2049,14 +1747,12 @@
 
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xE8, i);
 }
 
 
 void Assembler::fucompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDA);
   EMIT(0xE9);
 }
@@ -2064,7 +1760,6 @@
 
 void Assembler::fucomi(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   EMIT(0xE8 + i);
 }
@@ -2072,7 +1767,6 @@
 
 void Assembler::fucomip() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   EMIT(0xE9);
 }
@@ -2080,7 +1774,6 @@
 
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDE);
   EMIT(0xD9);
 }
@@ -2088,7 +1781,6 @@
 
 void Assembler::fnstsw_ax() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   EMIT(0xE0);
 }
@@ -2096,14 +1788,12 @@
 
 void Assembler::fwait() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9B);
 }
 
 
 void Assembler::frndint() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFC);
 }
@@ -2111,7 +1801,6 @@
 
 void Assembler::fnclex() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   EMIT(0xE2);
 }
@@ -2119,7 +1808,6 @@
 
 void Assembler::sahf() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9E);
 }
 
@@ -2127,7 +1815,6 @@
 void Assembler::setcc(Condition cc, Register reg) {
   ASSERT(reg.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x90 | cc);
   EMIT(0xC0 | reg.code());
@@ -2137,7 +1824,6 @@
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x2C);
@@ -2148,7 +1834,6 @@
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x2C);
@@ -2159,7 +1844,6 @@
 void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x2A);
@@ -2170,7 +1854,6 @@
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x5A);
@@ -2181,7 +1864,6 @@
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5A);
@@ -2192,7 +1874,6 @@
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x58);
@@ -2203,7 +1884,6 @@
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x59);
@@ -2214,7 +1894,6 @@
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5C);
@@ -2225,7 +1904,6 @@
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5E);
@@ -2236,7 +1914,6 @@
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x57);
@@ -2244,9 +1921,16 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x51);
@@ -2256,7 +1940,6 @@
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x54);
@@ -2267,7 +1950,6 @@
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x2E);
@@ -2275,10 +1957,21 @@
 }
 
 
+void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x0B);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  EMIT(static_cast<byte>(mode) | 0x8);
+}
+
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x50);
@@ -2289,7 +1982,6 @@
 void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0xC2);
@@ -2301,7 +1993,6 @@
 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x28);
   emit_sse_operand(dst, src);
@@ -2311,7 +2002,6 @@
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x7F);
@@ -2322,7 +2012,6 @@
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x6F);
@@ -2333,7 +2022,6 @@
 void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x7F);
@@ -2344,7 +2032,6 @@
 void Assembler::movdqu(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x6F);
@@ -2355,7 +2042,6 @@
 void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x38);
@@ -2367,7 +2053,6 @@
 void Assembler::movntdq(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xE7);
@@ -2378,7 +2063,6 @@
 void Assembler::prefetch(const Operand& src, int level) {
   ASSERT(is_uint2(level));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x18);
   XMMRegister code = { level };  // Emit hint number in Reg position of RegR/M.
@@ -2388,14 +2072,12 @@
 
 void Assembler::movdbl(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   movsd(dst, src);
 }
 
 
 void Assembler::movdbl(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   movsd(dst, src);
 }
 
@@ -2403,7 +2085,6 @@
 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);  // double
   EMIT(0x0F);
   EMIT(0x11);  // store
@@ -2414,7 +2095,6 @@
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);  // double
   EMIT(0x0F);
   EMIT(0x10);  // load
@@ -2425,7 +2105,6 @@
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x10);
@@ -2436,7 +2115,6 @@
 void Assembler::movss(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);  // float
   EMIT(0x0F);
   EMIT(0x11);  // store
@@ -2447,7 +2125,6 @@
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);  // float
   EMIT(0x0F);
   EMIT(0x10);  // load
@@ -2458,7 +2135,6 @@
 void Assembler::movss(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x10);
@@ -2469,7 +2145,6 @@
 void Assembler::movd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x6E);
@@ -2480,7 +2155,6 @@
 void Assembler::movd(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x7E);
@@ -2491,7 +2165,6 @@
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xDB);
@@ -2502,7 +2175,6 @@
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xEF);
@@ -2513,7 +2185,6 @@
 void Assembler::por(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xEB);
@@ -2524,7 +2195,6 @@
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x38);
@@ -2536,7 +2206,6 @@
 void Assembler::psllq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x73);
@@ -2548,7 +2217,6 @@
 void Assembler::psllq(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xF3);
@@ -2559,7 +2227,6 @@
 void Assembler::psrlq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x73);
@@ -2571,7 +2238,6 @@
 void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xD3);
@@ -2582,7 +2248,6 @@
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x70);
@@ -2594,7 +2259,6 @@
 void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x3A);
@@ -2607,7 +2271,6 @@
 void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x3A);
@@ -2706,9 +2369,6 @@
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
-  if (last_pc_ != NULL) {
-    last_pc_ += pc_delta;
-  }
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 46fda3b..4698e3e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -249,23 +249,6 @@
 }
 
 
-enum Hint {
-  no_hint = 0,
-  not_taken = 0x2e,
-  taken = 0x3e
-};
-
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition.  That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
-  return (hint == no_hint)
-      ? no_hint
-      : ((hint == not_taken) ? taken : not_taken);
-}
-
-
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
 
@@ -482,6 +465,7 @@
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
+
    public:
     explicit Scope(CpuFeature f) {
       uint64_t mask = static_cast<uint64_t>(1) << f;
@@ -501,10 +485,12 @@
         isolate_->set_enabled_cpu_features(old_enabled_);
       }
     }
+
    private:
     Isolate* isolate_;
     uint64_t old_enabled_;
 #else
+
    public:
     explicit Scope(CpuFeature f) {}
 #endif
@@ -676,6 +662,7 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
+  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -843,30 +830,30 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
-  void bind(NearLabel* L);
 
   // Calls
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
+  int CallSize(const Operand& adr);
   void call(const Operand& adr);
-  void call(Handle<Code> code, RelocInfo::Mode rmode);
+  int CallSize(Handle<Code> code, RelocInfo::Mode mode);
+  void call(Handle<Code> code,
+            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            unsigned ast_id = kNoASTId);
 
   // Jumps
-  void jmp(Label* L);  // unconditional jump to L
+  // unconditional jump to L
+  void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
-  // Short jump
-  void jmp(NearLabel* L);
-
   // Conditional jumps
-  void j(Condition cc, Label* L, Hint hint = no_hint);
-  void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
-  void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
-
-  // Conditional short jump
-  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+  void j(Condition cc,
+         Label* L,
+         Label::Distance distance = Label::kFar);
+  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+  void j(Condition cc, Handle<Code> code);
 
   // Floating-point operations
   void fld(int i);
@@ -951,11 +938,22 @@
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void andpd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
+
+  enum RoundingMode {
+    kRoundToNearest = 0x0,
+    kRoundDown      = 0x1,
+    kRoundUp        = 0x2,
+    kRoundToZero    = 0x3
+  };
+
+  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
   void movmskpd(Register dst, XMMRegister src);
 
   void cmpltsd(XMMRegister dst, XMMRegister src);
@@ -1005,7 +1003,9 @@
   void Print();
 
   // Check the code size generated from label to here.
-  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
 
   // Mark address of the ExitJSFrame code.
   void RecordJSReturn();
@@ -1071,7 +1071,9 @@
   void GrowBuffer();
   inline void emit(uint32_t x);
   inline void emit(Handle<Object> handle);
-  inline void emit(uint32_t x, RelocInfo::Mode rmode);
+  inline void emit(uint32_t x,
+                   RelocInfo::Mode rmode,
+                   unsigned ast_id = kNoASTId);
   inline void emit(const Immediate& x);
   inline void emit_w(const Immediate& x);
 
@@ -1099,6 +1101,7 @@
   inline Displacement disp_at(Label* L);
   inline void disp_at_put(Label* L, Displacement disp);
   inline void emit_disp(Label* L, Displacement::Type type);
+  inline void emit_near_disp(Label* L);
 
   // record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -1117,9 +1120,6 @@
   byte* pc_;  // the program counter; moves forward
   RelocInfoWriter reloc_info_writer;
 
-  // push-pop elimination
-  byte* last_pc_;
-
   PositionsRecorder positions_recorder_;
 
   bool emit_debug_code_;
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 29c67b5..310ea3d 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -82,8 +82,7 @@
 
   Label non_function_call;
   // Check that function is not a smi.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &non_function_call);
+  __ JumpIfSmi(edi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &non_function_call);
@@ -102,6 +101,7 @@
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   Handle<Code> arguments_adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  __ SetCallKind(ecx, CALL_AS_METHOD);
   __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
 }
 
@@ -139,8 +139,7 @@
     // edi: constructor
     __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &rt_call);
+    __ JumpIfSmi(eax, &rt_call);
     // edi: constructor
     // eax: initial map (if proven valid below)
     __ CmpObjectType(eax, MAP_TYPE, ebx);
@@ -339,11 +338,12 @@
     Handle<Code> code =
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
   } else {
     ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Restore context from the frame.
@@ -355,13 +355,12 @@
   Label use_receiver, exit;
 
   // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &use_receiver, not_taken);
+  __ JumpIfSmi(eax, &use_receiver);
 
   // If the type of the result (stored in its map) is less than
-  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  __ j(above_equal, &exit, not_taken);
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+  __ j(above_equal, &exit);
 
   // Throw away the result of the constructor invocation and use the
   // on-stack receiver as the result.
@@ -374,7 +373,7 @@
   __ LeaveConstructFrame();
 
   // Remove caller arguments from the stack and return.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
   __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
   __ push(ecx);
@@ -442,7 +441,8 @@
             RelocInfo::CODE_TARGET);
   } else {
     ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame. Notice that this also removes the empty
@@ -467,19 +467,25 @@
   // Enter an internal frame.
   __ EnterInternalFrame();
 
-  // Push a copy of the function onto the stack.
+  // Push a copy of the function.
   __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
   __ push(edi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyCompile, 1);
+
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
   __ pop(edi);
 
   // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(ecx));
+  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(eax));
 }
 
 
@@ -489,17 +495,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
   __ push(edi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore function and tear down temporary frame.
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
   __ pop(edi);
+
+  // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(ecx));
+  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(eax));
 }
 
 
@@ -520,15 +532,15 @@
   __ SmiUntag(ecx);
 
   // Switch on the state.
-  NearLabel not_no_registers, not_tos_eax;
+  Label not_no_registers, not_tos_eax;
   __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
-  __ j(not_equal, &not_no_registers);
+  __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ cmp(ecx, FullCodeGenerator::TOS_REG);
-  __ j(not_equal, &not_tos_eax);
+  __ j(not_equal, &not_tos_eax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, eax.
 
   __ bind(&not_tos_eax);
@@ -568,7 +580,7 @@
   // 1. Make sure we have at least one argument.
   { Label done;
     __ test(eax, Operand(eax));
-    __ j(not_zero, &done, taken);
+    __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
     __ push(ebx);
@@ -578,17 +590,17 @@
 
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
-  Label non_function;
+  Label slow, non_function;
   // 1 ~ return address.
   __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &non_function, not_taken);
+  __ JumpIfSmi(edi, &non_function);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &non_function, not_taken);
+  __ j(not_equal, &slow);
 
 
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
+  __ Set(edx, Immediate(0));  // indicate regular JS_FUNCTION
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -599,22 +611,24 @@
               1 << SharedFunctionInfo::kStrictModeBitWithinByte);
     __ j(not_equal, &shift_arguments);
 
+    // Do not transform the receiver for natives (shared already in ebx).
+    __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &shift_arguments);
+
     // Compute the receiver in non-strict mode.
     __ mov(ebx, Operand(esp, eax, times_4, 0));  // First argument.
-    __ test(ebx, Immediate(kSmiTagMask));
-    __ j(zero, &convert_to_object);
 
+    // Call ToObject on the receiver if it is not an object, or use the
+    // global object if it is null or undefined.
+    __ JumpIfSmi(ebx, &convert_to_object);
     __ cmp(ebx, factory->null_value());
     __ j(equal, &use_global_receiver);
     __ cmp(ebx, factory->undefined_value());
     __ j(equal, &use_global_receiver);
-
-    // We don't use IsObjectJSObjectType here because we jump on success.
-    __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-    __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
-    __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-    __ j(below_equal, &shift_arguments);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -624,6 +638,7 @@
     __ push(ebx);
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ mov(ebx, eax);
+    __ Set(edx, Immediate(0));  // restore
 
     __ pop(eax);
     __ SmiUntag(eax);
@@ -648,14 +663,19 @@
     __ jmp(&shift_arguments);
   }
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ Set(edx, Immediate(1));  // indicate function proxy
+  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &shift_arguments);
+  __ bind(&non_function);
+  __ Set(edx, Immediate(2));  // indicate non-function
+
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
-  __ bind(&non_function);
   __ mov(Operand(esp, eax, times_4, 0), edi);
-  // Clear edi to indicate a non-function being called.
-  __ Set(edi, Immediate(0));
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
@@ -672,11 +692,25 @@
     __ dec(eax);  // One fewer argument (first argument is new receiver).
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
-  { Label function;
-    __ test(edi, Operand(edi));
-    __ j(not_zero, &function, taken);
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
+  { Label function, non_proxy;
+    __ test(edx, Operand(edx));
+    __ j(zero, &function);
     __ Set(ebx, Immediate(0));
+    __ SetCallKind(ecx, CALL_AS_METHOD);
+    __ cmp(Operand(edx), Immediate(1));
+    __ j(not_equal, &non_proxy);
+
+    __ pop(edx);   // return address
+    __ push(edi);  // re-add proxy object as additional argument
+    __ push(edx);
+    __ inc(eax);
+    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+    __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+           RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
     __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
@@ -691,23 +725,29 @@
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
+  __ SetCallKind(ecx, CALL_AS_METHOD);
   __ cmp(eax, Operand(ebx));
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  static const int kArgumentsOffset = 2 * kPointerSize;
+  static const int kReceiverOffset = 3 * kPointerSize;
+  static const int kFunctionOffset = 4 * kPointerSize;
+
   __ EnterInternalFrame();
 
-  __ push(Operand(ebp, 4 * kPointerSize));  // push this
-  __ push(Operand(ebp, 2 * kPointerSize));  // push arguments
+  __ push(Operand(ebp, kFunctionOffset));  // push this
+  __ push(Operand(ebp, kArgumentsOffset));  // push arguments
   __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying need to catch
+  // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
@@ -724,7 +764,7 @@
   __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
   // Check if the arguments will overflow the stack.
   __ cmp(ecx, Operand(edx));
-  __ j(greater, &okay, taken);  // Signed comparison.
+  __ j(greater, &okay);  // Signed comparison.
 
   // Out of stack space.
   __ push(Operand(ebp, 4 * kPointerSize));  // push this
@@ -740,40 +780,45 @@
   __ push(eax);  // limit
   __ push(Immediate(0));  // index
 
-  // Change context eagerly to get the right global object if
-  // necessary.
-  __ mov(edi, Operand(ebp, 4 * kPointerSize));
+  // Get the receiver.
+  __ mov(ebx, Operand(ebp, kReceiverOffset));
+
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ mov(edi, Operand(ebp, kFunctionOffset));
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &push_receiver);
+
+  // Change context eagerly to get the right global object if necessary.
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
-  __ mov(ebx, Operand(ebp, 3 * kPointerSize));
-
   // Do not transform the receiver for strict mode functions.
+  Label call_to_object, use_global_receiver;
   __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
             1 << SharedFunctionInfo::kStrictModeBitWithinByte);
   __ j(not_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &call_to_object);
   Factory* factory = masm->isolate()->factory();
+
+  // Do not transform the receiver for natives (shared already in ecx).
+  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+            1 << SharedFunctionInfo::kNativeBitWithinByte);
+  __ j(not_equal, &push_receiver);
+
+  // Compute the receiver in non-strict mode.
+  // Call ToObject on the receiver if it is not an object, or use the
+  // global object if it is null or undefined.
+  __ JumpIfSmi(ebx, &call_to_object);
   __ cmp(ebx, factory->null_value());
   __ j(equal, &use_global_receiver);
   __ cmp(ebx, factory->undefined_value());
   __ j(equal, &use_global_receiver);
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+  __ j(above_equal, &push_receiver);
 
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  // We don't use IsObjectJSObjectType here because we jump on success.
-  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
-  __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-  __ j(below_equal, &push_receiver);
-
-  // Convert the receiver to an object.
   __ bind(&call_to_object);
   __ push(ebx);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -798,7 +843,7 @@
   __ mov(eax, Operand(ebp, kIndexOffset));
   __ jmp(&entry);
   __ bind(&loop);
-  __ mov(edx, Operand(ebp, 2 * kPointerSize));  // load arguments
+  __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
 
   // Use inline caching to speed up access to arguments.
   Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
@@ -821,10 +866,27 @@
   __ j(not_equal, &loop);
 
   // Invoke the function.
+  Label call_proxy;
   ParameterCount actual(eax);
   __ SmiUntag(eax);
-  __ mov(edi, Operand(ebp, 4 * kPointerSize));
-  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+  __ mov(edi, Operand(ebp, kFunctionOffset));
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &call_proxy);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(edi);  // add function proxy as last argument
+  __ inc(eax);
+  __ Set(ebx, Immediate(0));
+  __ SetCallKind(ecx, CALL_AS_METHOD);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 
   __ LeaveInternalFrame();
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
@@ -906,7 +968,7 @@
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -958,7 +1020,7 @@
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
                         times_half_pointer_size,  // array_size is a smi.
                         array_size,
@@ -1083,7 +1145,7 @@
   __ bind(&argc_one_or_more);
   __ cmp(eax, 1);
   __ j(not_equal, &argc_two_or_more);
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
   __ test(ecx, Operand(ecx));
   __ j(not_zero, &not_empty_array);
@@ -1138,7 +1200,7 @@
 
   // Handle construction of an array from a list of arguments.
   __ bind(&argc_two_or_more);
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ SmiTag(eax);  // Convet argc to a smi.
   // eax: array_size (smi)
   // edi: constructor
@@ -1367,8 +1429,7 @@
   Label convert_argument;
   __ bind(&not_cached);
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &convert_argument);
+  __ JumpIfSmi(eax, &convert_argument);
   Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
   __ j(NegateCondition(is_string), &convert_argument);
   __ mov(ebx, eax);
@@ -1418,12 +1479,12 @@
   // Push the function on the stack.
   __ push(edi);
 
-  // Preserve the number of arguments on the stack. Must preserve both
-  // eax and ebx because these registers are used when copying the
+  // Preserve the number of arguments on the stack. Must preserve eax,
+  // ebx and ecx because these registers are used when copying the
   // arguments and the receiver.
-  ASSERT(kSmiTagSize == 1);
-  __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
-  __ push(ecx);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+  __ push(edi);
 }
 
 
@@ -1435,7 +1496,7 @@
   __ leave();
 
   // Remove caller arguments from the stack.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
   __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
   __ push(ecx);
@@ -1446,6 +1507,7 @@
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
   //  -- ebx : expected number of arguments
+  //  -- ecx : call kind information
   //  -- edx : code entry to call
   // -----------------------------------
 
@@ -1465,14 +1527,14 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(eax, Operand(ebp, eax, times_4, offset));
-    __ mov(ecx, -1);  // account for receiver
+    __ mov(edi, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ inc(ecx);
+    __ inc(edi);
     __ push(Operand(eax, 0));
     __ sub(Operand(eax), Immediate(kPointerSize));
-    __ cmp(ecx, Operand(ebx));
+    __ cmp(edi, Operand(ebx));
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1484,30 +1546,33 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
-    __ mov(ecx, -1);  // account for receiver
+    // ebx = expected - actual.
+    __ sub(ebx, Operand(eax));
+    // eax = -actual - 1
+    __ neg(eax);
+    __ sub(Operand(eax), Immediate(1));
 
     Label copy;
     __ bind(&copy);
-    __ inc(ecx);
+    __ inc(eax);
     __ push(Operand(edi, 0));
     __ sub(Operand(edi), Immediate(kPointerSize));
-    __ cmp(ecx, Operand(eax));
-    __ j(less, &copy);
+    __ test(eax, Operand(eax));
+    __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
     Label fill;
     __ bind(&fill);
-    __ inc(ecx);
+    __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(ecx, Operand(ebx));
+    __ cmp(eax, Operand(ebx));
     __ j(less, &fill);
-
-    // Restore function pointer.
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 
   // Call the entry point.
   __ bind(&invoke);
+  // Restore function pointer.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ call(Operand(edx));
 
   // Leave frame and return.
@@ -1558,19 +1623,19 @@
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
-  NearLabel skip;
+  Label skip;
   __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
-  __ j(not_equal, &skip);
+  __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
   // If we decide not to perform on-stack replacement we perform a
   // stack guard check to enable interrupts.
   __ bind(&stack_check);
-  NearLabel ok;
+  Label ok;
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(masm->isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, taken);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
   __ Abort("Unreachable code: returned from tail call.");
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 5d32095..8a5bd50 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -29,10 +29,10 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "code-stubs.h"
 #include "bootstrapper.h"
-#include "jsregexp.h"
+#include "code-stubs.h"
 #include "isolate.h"
+#include "jsregexp.h"
 #include "regexp-macro-assembler.h"
 
 namespace v8 {
@@ -42,16 +42,15 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
-  NearLabel check_heap_number, call_builtin;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_heap_number);
+  Label check_heap_number, call_builtin;
+  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
   __ ret(0);
 
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
   __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
-  __ j(not_equal, &call_builtin);
+  __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
   __ bind(&call_builtin);
@@ -129,22 +128,19 @@
 
   // Setup the object header.
   Factory* factory = masm->isolate()->factory();
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->context_map());
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         factory->function_context_map());
   __ mov(FieldOperand(eax, Context::kLengthOffset),
          Immediate(Smi::FromInt(length)));
 
   // Setup the fixed slots.
   __ Set(ebx, Immediate(0));  // Set to NULL.
   __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
-  __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
-  __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+  __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
   __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
 
-  // Copy the global object from the surrounding context. We go through the
-  // context in the function (ecx) to match the allocation behavior we have
-  // in the runtime system (see Heap::AllocateFunctionContext).
-  __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
-  __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Copy the global object from the previous context.
+  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
 
   // Initialize the rest of the slots to undefined.
@@ -159,7 +155,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
 }
 
 
@@ -240,60 +236,146 @@
 }
 
 
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub expects its argument on the stack and returns its result in tos_:
+// zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  NearLabel false_result, true_result, not_string;
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-
-  // 'null' => false.
+  Label patch;
   Factory* factory = masm->isolate()->factory();
-  __ cmp(eax, factory->null_value());
-  __ j(equal, &false_result);
+  const Register argument = eax;
+  const Register map = edx;
 
-  // Get the map and type of the heap object.
-  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  if (!types_.IsEmpty()) {
+    __ mov(argument, Operand(esp, 1 * kPointerSize));
+  }
 
-  // Undetectable => false.
-  __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  __ j(not_zero, &false_result);
+  // undefined -> false
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // JavaScript object => true.
-  __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
-  __ j(above_equal, &true_result);
+  // Boolean -> its value
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  // String value => false iff empty.
-  __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
-  __ j(above_equal, &not_string);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
-  __ j(zero, &false_result);
-  __ jmp(&true_result);
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  __ bind(&not_string);
-  // HeapNumber => false iff +0, -0, or NaN.
-  __ cmp(edx, factory->heap_number_map());
-  __ j(not_equal, &true_result);
-  __ fldz();
-  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ FCmp();
-  __ j(zero, &false_result);
-  // Fall through to |true_result|.
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    Label not_smi;
+    __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
+    // argument contains the correct return value already.
+    if (!tos_.is(argument)) {
+      __ mov(tos_, argument);
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&not_smi);
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(argument, &patch, Label::kNear);
+  }
 
-  // Return 1/0 for true/false in eax.
-  __ bind(&true_result);
-  __ mov(eax, 1);
-  __ ret(1 * kPointerSize);
-  __ bind(&false_result);
-  __ mov(eax, 0);
-  __ ret(1 * kPointerSize);
+  if (types_.NeedsMap()) {
+    __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
+
+    if (types_.CanBeUndetectable()) {
+      __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+                1 << Map::kIsUndetectable);
+      // Undetectable -> false.
+      Label not_undetectable;
+      __ j(zero, &not_undetectable, Label::kNear);
+      __ Set(tos_, Immediate(0));
+      __ ret(1 * kPointerSize);
+      __ bind(&not_undetectable);
+    }
+  }
+
+  if (types_.Contains(SPEC_OBJECT)) {
+    // spec object -> true.
+    Label not_js_object;
+    __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+    __ j(below, &not_js_object, Label::kNear);
+    // argument contains the correct return value already.
+    if (!tos_.is(argument)) {
+      __ Set(tos_, Immediate(1));
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&not_js_object);
+  }
+
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+    Label not_string;
+    __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+    __ j(above_equal, &not_string, Label::kNear);
+    __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
+    __ ret(1 * kPointerSize);  // the string length is OK as the return value
+    __ bind(&not_string);
+  }
+
+  if (types_.Contains(HEAP_NUMBER)) {
+    // heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number, false_result;
+    __ cmp(map, factory->heap_number_map());
+    __ j(not_equal, &not_heap_number, Label::kNear);
+    __ fldz();
+    __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
+    __ FCmp();
+    __ j(zero, &false_result, Label::kNear);
+    // argument contains the correct return value already.
+    if (!tos_.is(argument)) {
+      __ Set(tos_, Immediate(1));
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&false_result);
+    __ Set(tos_, Immediate(0));
+    __ ret(1 * kPointerSize);
+    __ bind(&not_heap_number);
+  }
+
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  const Register argument = eax;
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    Label different_value;
+    __ CompareRoot(argument, value);
+    __ j(not_equal, &different_value, Label::kNear);
+    if (!result) {
+      // If we have to return zero, there is no way around clearing tos_.
+      __ Set(tos_, Immediate(0));
+    } else if (!tos_.is(argument)) {
+      // If we have to return non-zero, we can re-use the argument if it is the
+      // same register as the result, because we never see Smi-zero here.
+      __ Set(tos_, Immediate(1));
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&different_value);
+  }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Get return address, operand is now on top of stack.
+  __ push(Immediate(Smi::FromInt(tos_.code())));
+  __ push(Immediate(Smi::FromInt(types_.ToByte())));
+  __ push(ecx);  // Push return address.
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
 }
 
 
 class FloatingPointHelper : public AllStatic {
  public:
-
   enum ArgLocation {
     ARGS_ON_STACK,
     ARGS_IN_REGISTERS
@@ -331,14 +413,6 @@
 
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
-  static void LoadAsIntegers(MacroAssembler* masm,
-                             TypeInfo type_info,
-                             bool use_sse3,
-                             Label* operand_conversion_failure);
-  static void LoadNumbersAsIntegers(MacroAssembler* masm,
-                                    TypeInfo type_info,
-                                    bool use_sse3,
-                                    Label* operand_conversion_failure);
   static void LoadUnknownsAsIntegers(MacroAssembler* masm,
                                      bool use_sse3,
                                      Label* operand_conversion_failure);
@@ -374,15 +448,459 @@
 };
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
-  return stub.GetCode();
+// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
+// trashed registers.
+static void IntegerConvert(MacroAssembler* masm,
+                           Register source,
+                           bool use_sse3,
+                           Label* conversion_failure) {
+  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+  Label done, right_exponent, normal_exponent;
+  Register scratch = ebx;
+  Register scratch2 = edi;
+  // Get exponent word.
+  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  __ mov(scratch2, scratch);
+  __ and_(scratch2, HeapNumber::kExponentMask);
+  if (use_sse3) {
+    CpuFeatures::Scope scope(SSE3);
+    // Check whether the exponent is too big for a 64 bit signed integer.
+    static const uint32_t kTooBigExponent =
+        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+    __ j(greater_equal, conversion_failure);
+    // Load x87 register with heap number.
+    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+    // Reserve space for 64 bit answer.
+    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    // Do conversion, which cannot fail because we checked the exponent.
+    __ fisttp_d(Operand(esp, 0));
+    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
+    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+  } else {
+    // Load ecx with zero.  We use this either for the final shift or
+    // for the answer.
+    __ xor_(ecx, Operand(ecx));
+    // Check whether the exponent matches a 32 bit signed int that cannot be
+    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
+    // exponent is 30 (biased).  This is the exponent that we are fastest at and
+    // also the highest exponent we can handle here.
+    const uint32_t non_smi_exponent =
+        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+    // If we have a match of the int32-but-not-Smi exponent then skip some
+    // logic.
+    __ j(equal, &right_exponent, Label::kNear);
+    // If the exponent is higher than that then go to slow case.  This catches
+    // numbers that don't fit in a signed int32, infinities and NaNs.
+    __ j(less, &normal_exponent, Label::kNear);
+
+    {
+      // Handle a big exponent.  The only reason we have this code is that the
+      // >>> operator has a tendency to generate numbers with an exponent of 31.
+      const uint32_t big_non_smi_exponent =
+          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+      __ j(not_equal, conversion_failure);
+      // We have the big exponent, typically from >>>.  This means the number is
+      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
+      __ mov(scratch2, scratch);
+      __ and_(scratch2, HeapNumber::kMantissaMask);
+      // Put back the implicit 1.
+      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+      // Shift up the mantissa bits to take up the space the exponent used to
+      // take. We just orred in the implicit bit so that took care of one and
+      // we want to use the full unsigned range so we subtract 1 bit from the
+      // shift distance.
+      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+      __ shl(scratch2, big_shift_distance);
+      // Get the second half of the double.
+      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+      // Shift down 21 bits to get the most significant 11 bits or the low
+      // mantissa word.
+      __ shr(ecx, 32 - big_shift_distance);
+      __ or_(ecx, Operand(scratch2));
+      // We have the answer in ecx, but we may need to negate it.
+      __ test(scratch, Operand(scratch));
+      __ j(positive, &done, Label::kNear);
+      __ neg(ecx);
+      __ jmp(&done, Label::kNear);
+    }
+
+    __ bind(&normal_exponent);
+    // Exponent word in scratch, exponent part of exponent word in scratch2.
+    // Zero in ecx.
+    // We know the exponent is smaller than 30 (biased).  If it is less than
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // it rounds to zero.
+    const uint32_t zero_exponent =
+        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+    __ sub(Operand(scratch2), Immediate(zero_exponent));
+    // ecx already has a Smi zero.
+    __ j(less, &done, Label::kNear);
+
+    // We have a shifted exponent between 0 and 30 in scratch2.
+    __ shr(scratch2, HeapNumber::kExponentShift);
+    __ mov(ecx, Immediate(30));
+    __ sub(ecx, Operand(scratch2));
+
+    __ bind(&right_exponent);
+    // Here ecx is the shift, scratch is the exponent word.
+    // Get the top bits of the mantissa.
+    __ and_(scratch, HeapNumber::kMantissaMask);
+    // Put back the implicit 1.
+    __ or_(scratch, 1 << HeapNumber::kExponentShift);
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We have kExponentShift + 1 significant bits int he low end of the
+    // word.  Shift them to the top bits.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    __ shl(scratch, shift_distance);
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+    // Shift down 22 bits to get the most significant 10 bits or the low
+    // mantissa word.
+    __ shr(scratch2, 32 - shift_distance);
+    __ or_(scratch2, Operand(scratch));
+    // Move down according to the exponent.
+    __ shr_cl(scratch2);
+    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
+    // we may need to fix the sign.
+    Label negative;
+    __ xor_(ecx, Operand(ecx));
+    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+    __ j(greater, &negative, Label::kNear);
+    __ mov(ecx, scratch2);
+    __ jmp(&done, Label::kNear);
+    __ bind(&negative);
+    __ sub(ecx, Operand(scratch2));
+    __ bind(&done);
+  }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void UnaryOpStub::PrintName(StringStream* stream) {
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+  stream->Add("UnaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              UnaryOpIC::GetName(operand_type_));
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+
+  __ push(eax);  // the operand
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(mode_)));
+  __ push(Immediate(Smi::FromInt(operand_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, undo, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
+                     Label::kNear, Label::kNear, Label::kNear);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* undo,
+                                     Label* slow,
+                                     Label::Distance non_smi_near,
+                                     Label::Distance undo_near,
+                                     Label::Distance slow_near) {
+  // Check whether the value is a smi.
+  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
+
+  // We can't handle -0 with smis, so use a type transition for that case.
+  __ test(eax, Operand(eax));
+  __ j(zero, slow, slow_near);
+
+  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
+  __ mov(edx, Operand(eax));
+  __ Set(eax, Immediate(0));
+  __ sub(eax, Operand(edx));
+  __ j(overflow, undo, undo_near);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(
+    MacroAssembler* masm,
+    Label* non_smi,
+    Label::Distance non_smi_near) {
+  // Check whether the value is a smi.
+  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
+
+  // Flip bits and revert inverted smi-tag.
+  __ not_(eax);
+  __ and_(eax, ~kSmiTagMask);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
+  __ mov(eax, Operand(edx));
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, undo, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+    MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, slow);
+
+  if (mode_ == UNARY_OVERWRITE) {
+    __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
+            Immediate(HeapNumber::kSignMask));  // Flip sign.
+  } else {
+    __ mov(edx, Operand(eax));
+    // edx: operand
+
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated, Label::kNear);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(edx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(edx);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    // eax: allocated 'empty' number
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+  }
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+                                               Label* slow) {
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, slow);
+
+  // Convert the heap number in eax to an untagged integer in ecx.
+  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ not_(ecx);
+  __ cmp(ecx, 0xc0000000);
+  __ j(sign, &try_float, Label::kNear);
+
+  // Tag the result as a smi and we're done.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(eax, Operand(ecx, times_2, kSmiTag));
+  __ ret(0);
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ mov(ebx, eax);
+    __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    // Push the original HeapNumber on the stack. The integer value can't
+    // be stored since it's untagged and not in the smi range (so we can't
+    // smi-tag it). We'll recalculate the value after the GC instead.
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    // New HeapNumber is in eax.
+    __ pop(edx);
+    __ LeaveInternalFrame();
+    // IntegerConvert uses ebx and edi as scratch registers.
+    // This conversion won't go slow-case.
+    IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
+    __ not_(ecx);
+
+    __ bind(&heapnumber_allocated);
+  }
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ cvtsi2sd(xmm0, Operand(ecx));
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ push(ecx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ecx);
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  }
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm)  {
+  Label non_smi, undo, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the corresponding JavaScript builtin.
+  __ pop(ecx);  // pop return address.
+  __ push(eax);
+  __ push(ecx);  // push return address
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   __ push(edx);
   __ push(eax);
@@ -398,7 +916,7 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
@@ -407,8 +925,7 @@
 
 // Prepare for a type transition runtime call when the args are already on
 // the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
-    MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   // Left and right arguments are already on top of the stack.
   // Push this stub's key. Although the operation and the type info are
@@ -422,34 +939,37 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       GenerateInt32Stub(masm);
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -458,12 +978,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name;
   switch (mode_) {
@@ -472,17 +987,15 @@
     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
     default: overwrite_name = "UnknownOverwrite"; break;
   }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
-               op_name,
-               overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
-  return name_;
+  stream->Add("BinaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              BinaryOpIC::GetName(operands_type_));
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
@@ -541,8 +1054,7 @@
 
   // 3. Perform the smi check of the operands.
   STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
-  __ test(combined, Immediate(kSmiTagMask));
-  __ j(not_zero, &not_smis, not_taken);
+  __ JumpIfNotSmi(combined, &not_smis);
 
   // 4. Operands are both smis, perform the operation leaving the result in
   // eax and check the result if necessary.
@@ -571,7 +1083,7 @@
       __ shl_cl(left);
       // Check that the *signed* result fits in a smi.
       __ cmp(left, 0xc0000000);
-      __ j(sign, &use_fp_on_smis, not_taken);
+      __ j(sign, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(left);
       __ mov(eax, left);
@@ -601,7 +1113,7 @@
       // Smi tagging these two cases can only happen with shifts
       // by 0 or 1 when handed a valid smi.
       __ test(left, Immediate(0xc0000000));
-      __ j(not_zero, slow, not_taken);
+      __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(left);
       __ mov(eax, left);
@@ -610,12 +1122,12 @@
     case Token::ADD:
       ASSERT(right.is(eax));
       __ add(right, Operand(left));  // Addition is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
       __ sub(left, Operand(right));
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
 
@@ -629,7 +1141,7 @@
       __ SmiUntag(right);
       // Do multiplication.
       __ imul(right, Operand(left));  // Multiplication is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
       break;
@@ -640,7 +1152,7 @@
       __ mov(edi, left);
       // Check for 0 divisor.
       __ test(right, Operand(right));
-      __ j(zero, &use_fp_on_smis, not_taken);
+      __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
       __ cdq();
@@ -664,7 +1176,7 @@
     case Token::MOD:
       // Check for 0 divisor.
       __ test(right, Operand(right));
-      __ j(zero, &not_smis, not_taken);
+      __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -737,26 +1249,35 @@
   } else {
     ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
     switch (op_) {
-      case Token::SHL: {
+      case Token::SHL:
+      case Token::SHR: {
         Comment perform_float(masm, "-- Perform float operation on smis");
         __ bind(&use_fp_on_smis);
         // Result we want is in left == edx, so we can put the allocated heap
         // number in eax.
         __ AllocateHeapNumber(eax, ecx, ebx, slow);
         // Store the result in the HeapNumber and return.
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(left));
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        } else {
-          // It's OK to overwrite the right argument on the stack because we
-          // are about to return.
+        // It's OK to overwrite the arguments on the stack because we
+        // are about to return.
+        if (op_ == Token::SHR) {
           __ mov(Operand(esp, 1 * kPointerSize), left);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
+          __ fild_d(Operand(esp, 1 * kPointerSize));
           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        } else {
+          ASSERT_EQ(Token::SHL, op_);
+          if (CpuFeatures::IsSupported(SSE2)) {
+            CpuFeatures::Scope use_sse2(SSE2);
+            __ cvtsi2sd(xmm0, Operand(left));
+            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          } else {
+            __ mov(Operand(esp, 1 * kPointerSize), left);
+            __ fild_s(Operand(esp, 1 * kPointerSize));
+            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          }
         }
-      __ ret(2 * kPointerSize);
-      break;
+        __ ret(2 * kPointerSize);
+        break;
       }
 
       case Token::ADD:
@@ -848,7 +1369,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label call_runtime;
 
   switch (op_) {
@@ -870,8 +1391,8 @@
       UNREACHABLE();
   }
 
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
   } else {
     GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -899,19 +1420,49 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateAddStrings(masm);
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
   Label call_runtime;
-  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime, Label::kNear);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime, Label::kNear);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime, Label::kNear);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime, Label::kNear);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
 
   // Floating point case.
   switch (op_) {
@@ -933,7 +1484,7 @@
           default: UNREACHABLE();
         }
         // Check result type if it is currently Int32.
-        if (result_type_ <= TRBinaryOpIC::INT32) {
+        if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
           __ cvtsi2sd(xmm2, Operand(ecx));
           __ ucomisd(xmm0, xmm2);
@@ -1012,7 +1563,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -1024,7 +1575,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1032,8 +1583,7 @@
             // allocation of a heap number.
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1111,32 +1661,32 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
-  Label call_runtime;
-
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   if (op_ == Token::ADD) {
     // Handle string addition here, because it is the only operation
     // that does not do a ToNumber conversion on the operands.
     GenerateAddStrings(masm);
   }
 
+  Factory* factory = masm->isolate()->factory();
+
   // Convert odd ball arguments to numbers.
-  NearLabel check, done;
-  __ cmp(edx, FACTORY->undefined_value());
-  __ j(not_equal, &check);
+  Label check, done;
+  __ cmp(edx, factory->undefined_value());
+  __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(edx, Operand(edx));
   } else {
-    __ mov(edx, Immediate(FACTORY->nan_value()));
+    __ mov(edx, Immediate(factory->nan_value()));
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&check);
-  __ cmp(eax, FACTORY->undefined_value());
-  __ j(not_equal, &done);
+  __ cmp(eax, factory->undefined_value());
+  __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(eax, Operand(eax));
   } else {
-    __ mov(eax, Immediate(FACTORY->nan_value()));
+    __ mov(eax, Immediate(factory->nan_value()));
   }
   __ bind(&done);
 
@@ -1144,7 +1694,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label call_runtime;
 
   // Floating point case.
@@ -1227,7 +1777,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -1239,7 +1789,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1247,8 +1797,7 @@
             // allocation of a heap number.
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1325,7 +1874,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime;
 
   Counters* counters = masm->isolate()->counters();
@@ -1427,7 +1976,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -1439,7 +1988,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1447,8 +1996,7 @@
               // allocation of a heap number.
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1522,19 +2070,18 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
-  NearLabel left_not_string, call_runtime;
+  Label left_not_string, call_runtime;
 
   // Registers containing left and right operands respectively.
   Register left = edx;
   Register right = eax;
 
   // Test if left operand is a string.
-  __ test(left, Immediate(kSmiTagMask));
-  __ j(zero, &left_not_string);
+  __ JumpIfSmi(left, &left_not_string, Label::kNear);
   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &left_not_string);
+  __ j(above_equal, &left_not_string, Label::kNear);
 
   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -1542,10 +2089,9 @@
 
   // Left operand is not a string, test right.
   __ bind(&left_not_string);
-  __ test(right, Immediate(kSmiTagMask));
-  __ j(zero, &call_runtime);
+  __ JumpIfSmi(right, &call_runtime, Label::kNear);
   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -1556,7 +2102,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Label* alloc_failure) {
   Label skip_allocation;
@@ -1565,8 +2111,7 @@
     case OVERWRITE_LEFT: {
       // If the argument in edx is already an object, we skip the
       // allocation of a heap number.
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
+      __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
       // Allocate a heap number for the result. Keep eax and edx intact
       // for the possible runtime call.
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -1581,8 +2126,7 @@
     case OVERWRITE_RIGHT:
       // If the argument in eax is already an object, we skip the
       // allocation of a heap number.
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
+      __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
       // Fall through!
     case NO_OVERWRITE:
       // Allocate a heap number for the result. Keep eax and edx intact
@@ -1598,7 +2142,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ pop(ecx);
   __ push(edx);
   __ push(eax);
@@ -1626,11 +2170,10 @@
   const bool tagged = (argument_type_ == TAGGED);
   if (tagged) {
     // Test that eax is a number.
-    NearLabel input_not_smi;
-    NearLabel loaded;
+    Label input_not_smi;
+    Label loaded;
     __ mov(eax, Operand(esp, kPointerSize));
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(not_zero, &input_not_smi);
+    __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
     // Input is a smi. Untag and load it onto the FPU stack.
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
@@ -1641,7 +2184,7 @@
     __ fst_d(Operand(esp, 0));
     __ pop(edx);
     __ pop(ebx);
-    __ jmp(&loaded);
+    __ jmp(&loaded, Label::kNear);
     __ bind(&input_not_smi);
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1715,11 +2258,11 @@
   __ lea(ecx, Operand(ecx, ecx, times_2, 0));
   __ lea(ecx, Operand(eax, ecx, times_4, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  NearLabel cache_miss;
+  Label cache_miss;
   __ cmp(ebx, Operand(ecx, 0));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   __ cmp(edx, Operand(ecx, kIntSize));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
   __ mov(eax, Operand(ecx, 2 * kIntSize));
   if (tagged) {
@@ -1817,7 +2360,7 @@
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
-    NearLabel in_range, done;
+    Label in_range, done;
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
@@ -1825,11 +2368,11 @@
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
     __ cmp(Operand(edi), Immediate(supported_exponent_limit));
-    __ j(below, &in_range, taken);
+    __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
     __ cmp(Operand(edi), Immediate(0x7ff00000));
-    NearLabel non_nan_result;
-    __ j(not_equal, &non_nan_result, taken);
+    Label non_nan_result;
+    __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
     // NaN is represented by 0x7ff8000000000000.
@@ -1837,7 +2380,7 @@
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
     __ add(Operand(esp), Immediate(2 * kPointerSize));
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
 
@@ -1848,19 +2391,19 @@
     __ fld(1);
     // FPU Stack: input, 2*pi, input.
     {
-      NearLabel no_exceptions;
+      Label no_exceptions;
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
       __ test(Operand(eax), Immediate(5));
-      __ j(zero, &no_exceptions);
+      __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
     }
 
     // Compute st(0) % st(1)
     {
-      NearLabel partial_remainder_loop;
+      Label partial_remainder_loop;
       __ bind(&partial_remainder_loop);
       __ fprem1();
       __ fwait();
@@ -1897,203 +2440,6 @@
 }
 
 
-// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
-                    Register source,
-                    TypeInfo type_info,
-                    bool use_sse3,
-                    Label* conversion_failure) {
-  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
-  Label done, right_exponent, normal_exponent;
-  Register scratch = ebx;
-  Register scratch2 = edi;
-  if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
-    return;
-  }
-  if (!type_info.IsInteger32() || !use_sse3) {
-    // Get exponent word.
-    __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
-    // Get exponent alone in scratch2.
-    __ mov(scratch2, scratch);
-    __ and_(scratch2, HeapNumber::kExponentMask);
-  }
-  if (use_sse3) {
-    CpuFeatures::Scope scope(SSE3);
-    if (!type_info.IsInteger32()) {
-      // Check whether the exponent is too big for a 64 bit signed integer.
-      static const uint32_t kTooBigExponent =
-          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
-      __ j(greater_equal, conversion_failure);
-    }
-    // Load x87 register with heap number.
-    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
-    // Reserve space for 64 bit answer.
-    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
-    // Do conversion, which cannot fail because we checked the exponent.
-    __ fisttp_d(Operand(esp, 0));
-    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
-  } else {
-    // Load ecx with zero.  We use this either for the final shift or
-    // for the answer.
-    __ xor_(ecx, Operand(ecx));
-    // Check whether the exponent matches a 32 bit signed int that cannot be
-    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
-    // exponent is 30 (biased).  This is the exponent that we are fastest at and
-    // also the highest exponent we can handle here.
-    const uint32_t non_smi_exponent =
-        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
-    // If we have a match of the int32-but-not-Smi exponent then skip some
-    // logic.
-    __ j(equal, &right_exponent);
-    // If the exponent is higher than that then go to slow case.  This catches
-    // numbers that don't fit in a signed int32, infinities and NaNs.
-    __ j(less, &normal_exponent);
-
-    {
-      // Handle a big exponent.  The only reason we have this code is that the
-      // >>> operator has a tendency to generate numbers with an exponent of 31.
-      const uint32_t big_non_smi_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
-      __ j(not_equal, conversion_failure);
-      // We have the big exponent, typically from >>>.  This means the number is
-      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
-      __ mov(scratch2, scratch);
-      __ and_(scratch2, HeapNumber::kMantissaMask);
-      // Put back the implicit 1.
-      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
-      // Shift up the mantissa bits to take up the space the exponent used to
-      // take. We just orred in the implicit bit so that took care of one and
-      // we want to use the full unsigned range so we subtract 1 bit from the
-      // shift distance.
-      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
-      __ shl(scratch2, big_shift_distance);
-      // Get the second half of the double.
-      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
-      // Shift down 21 bits to get the most significant 11 bits or the low
-      // mantissa word.
-      __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, Operand(scratch2));
-      // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, Operand(scratch));
-      __ j(positive, &done);
-      __ neg(ecx);
-      __ jmp(&done);
-    }
-
-    __ bind(&normal_exponent);
-    // Exponent word in scratch, exponent part of exponent word in scratch2.
-    // Zero in ecx.
-    // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
-    // it rounds to zero.
-    const uint32_t zero_exponent =
-        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(Operand(scratch2), Immediate(zero_exponent));
-    // ecx already has a Smi zero.
-    __ j(less, &done);
-
-    // We have a shifted exponent between 0 and 30 in scratch2.
-    __ shr(scratch2, HeapNumber::kExponentShift);
-    __ mov(ecx, Immediate(30));
-    __ sub(ecx, Operand(scratch2));
-
-    __ bind(&right_exponent);
-    // Here ecx is the shift, scratch is the exponent word.
-    // Get the top bits of the mantissa.
-    __ and_(scratch, HeapNumber::kMantissaMask);
-    // Put back the implicit 1.
-    __ or_(scratch, 1 << HeapNumber::kExponentShift);
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We have kExponentShift + 1 significant bits int he low end of the
-    // word.  Shift them to the top bits.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ shl(scratch, shift_distance);
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the most significant 10 bits or the low
-    // mantissa word.
-    __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, Operand(scratch));
-    // Move down according to the exponent.
-    __ shr_cl(scratch2);
-    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
-    // we may need to fix the sign.
-    NearLabel negative;
-    __ xor_(ecx, Operand(ecx));
-    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
-    __ j(greater, &negative);
-    __ mov(ecx, scratch2);
-    __ jmp(&done);
-    __ bind(&negative);
-    __ sub(ecx, Operand(scratch2));
-    __ bind(&done);
-  }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
-                                                TypeInfo type_info,
-                                                bool use_sse3,
-                                                Label* conversion_failure) {
-  // Check float operands.
-  Label arg1_is_object, check_undefined_arg1;
-  Label arg2_is_object, check_undefined_arg2;
-  Label load_arg2, done;
-
-  if (!type_info.IsDouble()) {
-    if (!type_info.IsSmi()) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &arg1_is_object);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(edx);
-    }
-    __ SmiUntag(edx);
-    __ jmp(&load_arg2);
-  }
-
-  __ bind(&arg1_is_object);
-
-  // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
-  __ mov(edx, ecx);
-
-  // Here edx has the untagged integer, eax has a Smi or a heap number.
-  __ bind(&load_arg2);
-  if (!type_info.IsDouble()) {
-    // Test if arg2 is a Smi.
-    if (!type_info.IsSmi()) {
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &arg2_is_object);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(eax);
-    }
-    __ SmiUntag(eax);
-    __ mov(ecx, eax);
-    __ jmp(&done);
-  }
-
-  __ bind(&arg2_is_object);
-
-  // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
-  __ bind(&done);
-  __ mov(eax, edx);
-}
-
-
 // Input: edx, eax are the left and right objects of a bit op.
 // Output: eax, ecx are left and right integers for a bit op.
 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
@@ -2105,8 +2451,7 @@
   Label load_arg2, done;
 
   // Test if arg1 is a Smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &arg1_is_object);
+  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
 
   __ SmiUntag(edx);
   __ jmp(&load_arg2);
@@ -2125,19 +2470,14 @@
   __ j(not_equal, &check_undefined_arg1);
 
   // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm,
-                 edx,
-                 TypeInfo::Unknown(),
-                 use_sse3,
-                 conversion_failure);
+  IntegerConvert(masm, edx, use_sse3, conversion_failure);
   __ mov(edx, ecx);
 
   // Here edx has the untagged integer, eax has a Smi or a heap number.
   __ bind(&load_arg2);
 
   // Test if arg2 is a Smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &arg2_is_object);
+  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
 
   __ SmiUntag(eax);
   __ mov(ecx, eax);
@@ -2156,28 +2496,12 @@
   __ j(not_equal, &check_undefined_arg2);
 
   // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm,
-                 eax,
-                 TypeInfo::Unknown(),
-                 use_sse3,
-                 conversion_failure);
+  IntegerConvert(masm, eax, use_sse3, conversion_failure);
   __ bind(&done);
   __ mov(eax, edx);
 }
 
 
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
-                                         TypeInfo type_info,
-                                         bool use_sse3,
-                                         Label* conversion_failure) {
-  if (type_info.IsNumber()) {
-    LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
-  } else {
-    LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
-  }
-}
-
-
 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
                                                        bool use_sse3,
                                                        Label* not_int32) {
@@ -2187,12 +2511,11 @@
 
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
-  __ test(number, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi, not_taken);
+  __ JumpIfSmi(number, &load_smi, Label::kNear);
   __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi);
   __ SmiUntag(number);
@@ -2205,18 +2528,16 @@
 
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
-  NearLabel load_smi_edx, load_eax, load_smi_eax, done;
+  Label load_smi_edx, load_eax, load_smi_eax, done;
   // Load operand in edx into xmm0.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
 
   __ bind(&load_eax);
   // Load operand in eax into xmm1.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
@@ -2235,20 +2556,18 @@
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
-  NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
   // Load operand in edx into xmm0, or branch to not_numbers.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
   Factory* factory = masm->isolate()->factory();
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
   __ j(not_equal, not_numbers);  // Argument in edx is not a number.
   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
   __ bind(&load_eax);
   // Load operand in eax into xmm1, or branch to not_numbers.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
-  __ j(equal, &load_float_eax);
+  __ j(equal, &load_float_eax, Label::kNear);
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
@@ -2259,7 +2578,7 @@
   __ SmiUntag(eax);  // Untag smi before converting to float.
   __ cvtsi2sd(xmm1, Operand(eax));
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
   __ bind(&done);
@@ -2300,14 +2619,13 @@
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
-  NearLabel load_smi_1, load_smi_2, done_load_1, done;
+  Label load_smi_1, load_smi_2, done_load_1, done;
   if (arg_location == ARGS_IN_REGISTERS) {
     __ mov(scratch, edx);
   } else {
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
   }
-  __ test(scratch, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_1, not_taken);
+  __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
   __ bind(&done_load_1);
 
@@ -2316,10 +2634,9 @@
   } else {
     __ mov(scratch, Operand(esp, 1 * kPointerSize));
   }
-  __ test(scratch, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_2, not_taken);
+  __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi_1);
   __ SmiUntag(scratch);
@@ -2359,19 +2676,17 @@
 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
                                              Label* non_float,
                                              Register scratch) {
-  NearLabel test_other, done;
+  Label test_other, done;
   // Test if both operands are floats or smi -> scratch=k_is_float;
   // Otherwise scratch = k_not_float.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &test_other, not_taken);  // argument in edx is OK
+  __ JumpIfSmi(edx, &test_other, Label::kNear);
   __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
   __ cmp(scratch, factory->heap_number_map());
   __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
 
   __ bind(&test_other);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);  // argument in eax is OK
+  __ JumpIfSmi(eax, &done, Label::kNear);
   __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(scratch, factory->heap_number_map());
   __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
@@ -2387,140 +2702,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done, undo;
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      NearLabel try_float;
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &try_float, not_taken);
-
-      if (negative_zero_ == kStrictNegativeZero) {
-        // Go slow case if the value of the expression is zero
-        // to make sure that we switch between 0 and -0.
-        __ test(eax, Operand(eax));
-        __ j(zero, &slow, not_taken);
-      }
-
-      // The value of the expression is a smi that is not zero.  Try
-      // optimistic subtraction '0 - value'.
-      __ mov(edx, Operand(eax));
-      __ Set(eax, Immediate(0));
-      __ sub(eax, Operand(edx));
-      __ j(overflow, &undo, not_taken);
-      __ StubReturn(1);
-
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(eax);
-    }
-
-    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-    __ cmp(edx, masm->isolate()->factory()->heap_number_map());
-    __ j(not_equal, &slow);
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
-      __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
-      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
-    } else {
-      __ mov(edx, Operand(eax));
-      // edx: operand
-      __ AllocateHeapNumber(eax, ebx, ecx, &undo);
-      // eax: allocated 'empty' number
-      __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
-      __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
-      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
-      __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
-      __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label non_smi;
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &non_smi);
-      __ not_(eax);
-      __ and_(eax, ~kSmiTagMask);  // Remove inverted smi-tag.
-      __ ret(0);
-      __ bind(&non_smi);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(eax);
-    }
-
-    // Check if the operand is a heap number.
-    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-    __ cmp(edx, masm->isolate()->factory()->heap_number_map());
-    __ j(not_equal, &slow, not_taken);
-
-    // Convert the heap number in eax to an untagged integer in ecx.
-    IntegerConvert(masm,
-                   eax,
-                   TypeInfo::Unknown(),
-                   CpuFeatures::IsSupported(SSE3),
-                   &slow);
-
-    // Do the bitwise operation and check if the result fits in a smi.
-    NearLabel try_float;
-    __ not_(ecx);
-    __ cmp(ecx, 0xc0000000);
-    __ j(sign, &try_float, not_taken);
-
-    // Tag the result as a smi and we're done.
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ lea(eax, Operand(ecx, times_2, kSmiTag));
-    __ jmp(&done);
-
-    // Try to store the result in a heap number.
-    __ bind(&try_float);
-    if (overwrite_ == UNARY_NO_OVERWRITE) {
-      // Allocate a fresh heap number, but don't overwrite eax until
-      // we're sure we can do it without going through the slow case
-      // that needs the value in eax.
-      __ AllocateHeapNumber(ebx, edx, edi, &slow);
-      __ mov(eax, Operand(ebx));
-    }
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope use_sse2(SSE2);
-      __ cvtsi2sd(xmm0, Operand(ecx));
-      __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-    } else {
-      __ push(ecx);
-      __ fild_s(Operand(esp, 0));
-      __ pop(ecx);
-      __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    }
-  } else {
-    UNIMPLEMENTED();
-  }
-
-  // Return from the stub.
-  __ bind(&done);
-  __ StubReturn(1);
-
-  // Restore eax and go slow case.
-  __ bind(&undo);
-  __ mov(eax, Operand(edx));
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ pop(ecx);  // pop return address.
-  __ push(eax);
-  __ push(ecx);  // push return address
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   // Registers are used as follows:
   // edx = base
@@ -2541,10 +2722,8 @@
   Label exponent_nonsmi;
   Label base_nonsmi;
   // If the exponent is a heap number go to that specific case.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &exponent_nonsmi);
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &base_nonsmi);
+  __ JumpIfNotSmi(eax, &exponent_nonsmi);
+  __ JumpIfNotSmi(edx, &base_nonsmi);
 
   // Optimized version when both exponent and base are smis.
   Label powi;
@@ -2570,20 +2749,20 @@
   __ mov(edx, eax);
 
   // Get absolute value of exponent.
-  NearLabel no_neg;
+  Label no_neg;
   __ cmp(eax, 0);
-  __ j(greater_equal, &no_neg);
+  __ j(greater_equal, &no_neg, Label::kNear);
   __ neg(eax);
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
   __ movsd(xmm1, xmm3);
-  NearLabel while_true;
-  NearLabel no_multiply;
+  Label while_true;
+  Label no_multiply;
 
   __ bind(&while_true);
   __ shr(eax, 1);
-  __ j(not_carry, &no_multiply);
+  __ j(not_carry, &no_multiply, Label::kNear);
   __ mulsd(xmm1, xmm0);
   __ bind(&no_multiply);
   __ mulsd(xmm0, xmm0);
@@ -2614,13 +2793,12 @@
   __ ucomisd(xmm1, xmm1);
   __ j(parity_even, &call_runtime);
 
-  NearLabel base_not_smi;
-  NearLabel handle_special_cases;
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &base_not_smi);
+  Label base_not_smi;
+  Label handle_special_cases;
+  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
   __ SmiUntag(edx);
   __ cvtsi2sd(xmm0, Operand(edx));
-  __ jmp(&handle_special_cases);
+  __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2635,7 +2813,7 @@
 
   // base is in xmm0 and exponent is in xmm1.
   __ bind(&handle_special_cases);
-  NearLabel not_minus_half;
+  Label not_minus_half;
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ mov(ecx, Immediate(0xBF000000));
@@ -2643,11 +2821,11 @@
   __ cvtss2sd(xmm2, xmm2);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
+  __ j(not_equal, &not_minus_half, Label::kNear);
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
@@ -2664,7 +2842,7 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
 
@@ -2689,21 +2867,20 @@
 
   // Check that the key is a smi.
   Label slow;
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ JumpIfNotSmi(edx, &slow, Label::kNear);
 
   // Check if the calling frame is an arguments adaptor frame.
-  NearLabel adaptor;
+  Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor);
+  __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
   __ cmp(edx, Operand(eax));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -2719,7 +2896,7 @@
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ cmp(edx, Operand(ecx));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -2739,44 +2916,288 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
   // esp[0] : return address
   // esp[4] : number of parameters
   // esp[8] : receiver displacement
-  // esp[16] : function
+  // esp[12] : function
 
-  // The displacement is used for skipping the return address and the
-  // frame pointer on the stack. It is the offset of the last
-  // parameter (if any) relative to the frame pointer.
-  static const int kDisplacement = 2 * kPointerSize;
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &runtime, Label::kNear);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ mov(Operand(esp, 1 * kPointerSize), ecx);
+  __ lea(edx, Operand(edx, ecx, times_2,
+              StandardFrameConstants::kCallerSPOffset));
+  __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+  // esp[0] : return address
+  // esp[4] : number of parameters (tagged)
+  // esp[8] : receiver displacement
+  // esp[12] : function
+
+  // ebx = parameter count (tagged)
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  // TODO(rossberg): Factor out some of the bits that are shared with the other
+  // Generate* functions.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor_frame, Label::kNear);
+
+  // No adaptor, parameter count = argument count.
+  __ mov(ecx, ebx);
+  __ jmp(&try_allocate, Label::kNear);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx, Operand(edx, ecx, times_2,
+                      StandardFrameConstants::kCallerSPOffset));
+  __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+  // ebx = parameter count (tagged)
+  // ecx = argument count (tagged)
+  // esp[4] = parameter count (tagged)
+  // esp[8] = address of receiver argument
+  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+  __ cmp(ebx, Operand(ecx));
+  __ j(less_equal, &try_allocate, Label::kNear);
+  __ mov(ebx, ecx);
+
+  __ bind(&try_allocate);
+
+  // Save mapped parameter count.
+  __ push(ebx);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  Label no_parameter_map;
+  __ test(ebx, Operand(ebx));
+  __ j(zero, &no_parameter_map, Label::kNear);
+  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+  __ bind(&no_parameter_map);
+
+  // 2. Backing store.
+  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+  // eax = address of new object(s) (tagged)
+  // ecx = argument count (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[8] = parameter count (tagged)
+  // esp[12] = address of receiver argument
+  // Get the arguments boilerplate from the current (global) context into edi.
+  Label has_mapped_parameters, copy;
+  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+  __ mov(ebx, Operand(esp, 0 * kPointerSize));
+  __ test(ebx, Operand(ebx));
+  __ j(not_zero, &has_mapped_parameters, Label::kNear);
+  __ mov(edi, Operand(edi,
+         Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+  __ jmp(&copy, Label::kNear);
+
+  __ bind(&has_mapped_parameters);
+  __ mov(edi, Operand(edi,
+            Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+  __ bind(&copy);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (tagged)
+  // edi = address of boilerplate object (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[8] = parameter count (tagged)
+  // esp[12] = address of receiver argument
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ mov(edx, FieldOperand(edi, i));
+    __ mov(FieldOperand(eax, i), edx);
+  }
+
+  // Setup the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+                      Heap::kArgumentsCalleeIndex * kPointerSize),
+         edx);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+                      Heap::kArgumentsLengthIndex * kPointerSize),
+         ecx);
+
+  // Setup the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, edi will point there, otherwise to the
+  // backing store.
+  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (tagged)
+  // edi = address of parameter map or backing store (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[8] = parameter count (tagged)
+  // esp[12] = address of receiver argument
+  // Free a register.
+  __ push(eax);
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ test(ebx, Operand(ebx));
+  __ j(zero, &skip_parameter_map);
+
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(FACTORY->non_strict_arguments_elements_map()));
+  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ push(ecx);
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ add(ebx, Operand(esp, 4 * kPointerSize));
+  __ sub(ebx, Operand(eax));
+  __ mov(ecx, FACTORY->the_hole_value());
+  __ mov(edx, edi);
+  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+  // eax = loop variable (tagged)
+  // ebx = mapping index (tagged)
+  // ecx = the hole value
+  // edx = address of parameter map (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = argument count (tagged)
+  // esp[4] = address of new object (tagged)
+  // esp[8] = mapped parameter count (tagged)
+  // esp[16] = parameter count (tagged)
+  // esp[20] = address of receiver argument
+  __ jmp(&parameters_test, Label::kNear);
+
+  __ bind(&parameters_loop);
+  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ test(eax, Operand(eax));
+  __ j(not_zero, &parameters_loop, Label::kNear);
+  __ pop(ecx);
+
+  __ bind(&skip_parameter_map);
+
+  // ecx = argument count (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = address of new object (tagged)
+  // esp[4] = mapped parameter count (tagged)
+  // esp[12] = parameter count (tagged)
+  // esp[16] = address of receiver argument
+  // Copy arguments header and remaining slots (if there are any).
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(FACTORY->fixed_array_map()));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+  Label arguments_loop, arguments_test;
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+  __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(Operand(edx), ebx);
+  __ jmp(&arguments_test, Label::kNear);
+
+  __ bind(&arguments_loop);
+  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ mov(eax, Operand(edx, 0));
+  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ cmp(ebx, Operand(ecx));
+  __ j(less, &arguments_loop, Label::kNear);
+
+  // Restore.
+  __ pop(eax);  // Address of arguments object.
+  __ pop(ebx);  // Parameter count.
+
+  // Return and remove the on-stack parameters.
+  __ ret(3 * kPointerSize);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ pop(eax);  // Remove saved parameter count.
+  __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // esp[0] : return address
+  // esp[4] : number of parameters
+  // esp[8] : receiver displacement
+  // esp[12] : function
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame);
+  __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
-  __ jmp(&try_allocate);
+  __ jmp(&try_allocate, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
   __ bind(&adaptor_frame);
   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ mov(Operand(esp, 1 * kPointerSize), ecx);
-  __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+  __ lea(edx, Operand(edx, ecx, times_2,
+                      StandardFrameConstants::kCallerSPOffset));
   __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
-  NearLabel add_arguments_object;
+  Label add_arguments_object;
   __ bind(&try_allocate);
   __ test(ecx, Operand(ecx));
-  __ j(zero, &add_arguments_object);
+  __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(Operand(ecx), Immediate(GetArgumentsObjectSize()));
+  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -2784,8 +3205,9 @@
   // Get the arguments boilerplate from the current (global) context.
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
-  __ mov(edi, Operand(edi,
-                      Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+  const int offset =
+      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+  __ mov(edi, Operand(edi, offset));
 
   // Copy the JS object part.
   for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
@@ -2793,43 +3215,34 @@
     __ mov(FieldOperand(eax, i), ebx);
   }
 
-  if (type_ == NEW_NON_STRICT) {
-    // Setup the callee in-object property.
-    STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-    __ mov(ebx, Operand(esp, 3 * kPointerSize));
-    __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                             Heap::kArgumentsCalleeIndex * kPointerSize),
-           ebx);
-  }
-
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                           Heap::kArgumentsLengthIndex * kPointerSize),
+                      Heap::kArgumentsLengthIndex * kPointerSize),
          ecx);
 
   // If there are no actual arguments, we're done.
   Label done;
   __ test(ecx, Operand(ecx));
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
 
   // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
-  __ lea(edi, Operand(eax, GetArgumentsObjectSize()));
+  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
+         Immediate(FACTORY->fixed_array_map()));
 
   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
   // Untag the length for the loop below.
   __ SmiUntag(ecx);
 
   // Copy the fixed array slots.
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@@ -2844,7 +3257,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
 }
 
 
@@ -2882,13 +3295,12 @@
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
   __ test(ebx, Operand(ebx));
-  __ j(zero, &runtime, not_taken);
+  __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
   __ mov(eax, Operand(esp, kJSRegExpOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
+  __ JumpIfSmi(eax, &runtime);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
   __ j(not_equal, &runtime);
   // Check that the RegExp has been compiled (data contains a fixed array).
@@ -2922,8 +3334,7 @@
   // edx: Number of capture registers
   // Check that the second argument is a string.
   __ mov(eax, Operand(esp, kSubjectOffset));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
+  __ JumpIfSmi(eax, &runtime);
   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
   __ j(NegateCondition(is_string), &runtime);
   // Get the length of the string to ebx.
@@ -2935,8 +3346,7 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &runtime);
+  __ JumpIfNotSmi(eax, &runtime);
   __ cmp(eax, Operand(ebx));
   __ j(above_equal, &runtime);
 
@@ -2944,8 +3354,7 @@
   // edx: Number of capture registers
   // Check that the fourth object is a JSArray object.
   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
+  __ JumpIfSmi(eax, &runtime);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
   __ j(not_equal, &runtime);
   // Check that the JSArray is in fast case.
@@ -2962,6 +3371,8 @@
   __ cmp(edx, Operand(eax));
   __ j(greater, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ Set(edi, Immediate(0));
   // ecx: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
   Label seq_ascii_string, seq_two_byte_string, check_code;
@@ -2972,36 +3383,45 @@
   __ and_(ebx,
           kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
-  __ test(Operand(ebx),
+  __ and_(Operand(ebx),
           Immediate(kIsNotStringMask | kStringRepresentationMask));
-  __ j(zero, &seq_ascii_string);
+  __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ test(Operand(ebx),
-          Immediate(kIsNotStringMask | kExternalStringTag));
-  __ j(not_zero, &runtime);
-  // String is a cons string.
-  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
-  __ cmp(Operand(edx), factory->empty_string());
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+  __ j(less, &cons_string);
+  __ j(equal, &runtime);
+
+  // String is sliced.
+  __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+  __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+  // edi: offset of sliced string, smi-tagged.
+  // eax: parent string.
+  __ jmp(&check_encoding, Label::kNear);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
   __ j(not_equal, &runtime);
   __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+  __ bind(&check_encoding);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  // String is a cons string with empty second part.
-  // eax: first part of cons string.
-  // ebx: map of first part of cons string.
-  // Is first part a flat two byte string?
+  // eax: first part of cons string or parent of sliced string.
+  // ebx: map of first part of cons string or map of parent of sliced string.
+  // Is first part of cons or parent of slice a flat two byte string?
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be ascii.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
@@ -3011,25 +3431,24 @@
   // eax: subject string (flat ascii)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(edi, Immediate(1));  // Type is ascii.
-  __ jmp(&check_code);
+  __ Set(ecx, Immediate(1));  // Type is ascii.
+  __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
   // eax: subject string (flat two byte)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
-  __ Set(edi, Immediate(0));  // Type is two byte.
+  __ Set(ecx, Immediate(0));  // Type is two byte.
 
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CmpObjectType(edx, CODE_TYPE, ebx);
-  __ j(not_equal, &runtime);
+  // a smi (code flushing support).
+  __ JumpIfSmi(edx, &runtime);
 
   // eax: subject string
   // edx: code
-  // edi: encoding of subject string (1 if ascii, 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3038,7 +3457,7 @@
   // eax: subject string
   // ebx: previous index
   // edx: code
-  // edi: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3055,44 +3474,62 @@
   __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
 
   // Argument 6: Start (high end) of backtracking stack memory area.
-  __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
-  __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ mov(Operand(esp, 5 * kPointerSize), ecx);
+  __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+  __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+  __ mov(Operand(esp, 5 * kPointerSize), esi);
 
   // Argument 5: static offsets vector buffer.
   __ mov(Operand(esp, 4 * kPointerSize),
          Immediate(ExternalReference::address_of_static_offsets_vector(
              masm->isolate())));
 
+  // Argument 2: Previous index.
+  __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+  // Argument 1: Original subject string.
+  // The original subject is in the previous stack frame. Therefore we have to
+  // use ebp, which points exactly to one pointer size below the previous esp.
+  // (Because creating a new stack frame pushes the previous ebp onto the stack
+  // and thereby moves up esp by one kPointerSize.)
+  __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
+  __ mov(Operand(esp, 0 * kPointerSize), esi);
+
+  // esi: original subject string
+  // eax: underlying subject string
+  // ebx: previous index
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  NearLabel setup_two_byte, setup_rest;
-  __ test(edi, Operand(edi));
-  __ mov(edi, FieldOperand(eax, String::kLengthOffset));
-  __ j(zero, &setup_two_byte);
+  // Prepare start and end index of the input.
+  // Load the length from the original sliced string if that is the case.
+  __ mov(esi, FieldOperand(esi, String::kLengthOffset));
+  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
+
+  // ebx: start index of the input string
+  // esi: end index of the input string
+  Label setup_two_byte, setup_rest;
+  __ test(ecx, Operand(ecx));
+  __ j(zero, &setup_two_byte, Label::kNear);
+  __ SmiUntag(esi);
+  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
-  __ jmp(&setup_rest);
+  __ jmp(&setup_rest, Label::kNear);
 
   __ bind(&setup_two_byte);
   STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);  // edi is smi (powered by 2).
-  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+  STATIC_ASSERT(kSmiTagSize == 1);  // esi is smi (powered by 2).
+  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
 
   __ bind(&setup_rest);
 
-  // Argument 2: Previous index.
-  __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
-  // Argument 1: Subject string.
-  __ mov(Operand(esp, 0 * kPointerSize), eax);
-
   // Locate the code entry and call it.
   __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(Operand(edx));
@@ -3103,10 +3540,10 @@
   // Check the result.
   Label success;
   __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
-  __ j(equal, &success, taken);
+  __ j(equal, &success);
   Label failure;
   __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
-  __ j(equal, &failure, taken);
+  __ j(equal, &failure);
   __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
   // If not exception it can only be retry. Handle that in the runtime system.
   __ j(not_equal, &runtime);
@@ -3114,7 +3551,7 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  ExternalReference pending_exception(Isolate::k_pending_exception_address,
+  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
   __ mov(edx,
          Operand::StaticVariable(ExternalReference::the_hole_value_location(
@@ -3131,7 +3568,7 @@
   // by javascript code.
   __ cmp(eax, factory->termination_exception());
   Label throw_termination_exception;
-  __ j(equal, &throw_termination_exception);
+  __ j(equal, &throw_termination_exception, Label::kNear);
 
   // Handle normal exception by following handler chain.
   __ Throw(eax);
@@ -3183,12 +3620,12 @@
   // ebx: last_match_info backing store (FixedArray)
   // ecx: offsets vector
   // edx: number of capture registers
-  NearLabel next_capture, done;
+  Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
   __ sub(Operand(edx), Immediate(1));
-  __ j(negative, &done);
+  __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
   __ SmiTag(edi);
@@ -3215,10 +3652,9 @@
 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
   const int kMaxInlineLength = 100;
   Label slowcase;
-  NearLabel done;
+  Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slowcase);
+  __ JumpIfNotSmi(ebx, &slowcase);
   __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
   __ j(above, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
@@ -3281,7 +3717,7 @@
   Label loop;
   __ test(ecx, Operand(ecx));
   __ bind(&loop);
-  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
   __ sub(Operand(ecx), Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
@@ -3322,19 +3758,18 @@
   // number string cache for smis is just the smi value, and the hash for
   // doubles is the xor of the upper and lower words. See
   // Heap::GetNumberStringCache.
-  NearLabel smi_hash_calculated;
-  NearLabel load_result_from_cache;
+  Label smi_hash_calculated;
+  Label load_result_from_cache;
   if (object_is_smi) {
     __ mov(scratch, object);
     __ SmiUntag(scratch);
   } else {
-    NearLabel not_smi, hash_calculated;
+    Label not_smi;
     STATIC_ASSERT(kSmiTag == 0);
-    __ test(object, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_smi);
+    __ JumpIfNotSmi(object, &not_smi, Label::kNear);
     __ mov(scratch, object);
     __ SmiUntag(scratch);
-    __ jmp(&smi_hash_calculated);
+    __ jmp(&smi_hash_calculated, Label::kNear);
     __ bind(&not_smi);
     __ cmp(FieldOperand(object, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
@@ -3351,8 +3786,7 @@
                         index,
                         times_twice_pointer_size,
                         FixedArray::kHeaderSize));
-    __ test(probe, Immediate(kSmiTagMask));
-    __ j(zero, not_found);
+    __ JumpIfSmi(probe, not_found);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
@@ -3365,7 +3799,7 @@
     }
     __ j(parity_even, not_found);  // Bail out if NaN is involved.
     __ j(not_equal, not_found);  // The cache did not contain this value.
-    __ jmp(&load_result_from_cache);
+    __ jmp(&load_result_from_cache, Label::kNear);
   }
 
   __ bind(&smi_hash_calculated);
@@ -3417,17 +3851,16 @@
 void CompareStub::Generate(MacroAssembler* masm) {
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
-  Label check_unequal_objects, done;
+  Label check_unequal_objects;
 
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
     __ mov(ecx, Operand(edx));
     __ or_(ecx, Operand(eax));
-    __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &non_smi, not_taken);
+    __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
     __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
-    __ j(no_overflow, &smi_done);
+    __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
     __ mov(eax, edx);
@@ -3453,9 +3886,9 @@
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      NearLabel check_for_nan;
+      Label check_for_nan;
       __ cmp(edx, masm->isolate()->factory()->undefined_value());
-      __ j(not_equal, &check_for_nan);
+      __ j(not_equal, &check_for_nan, Label::kNear);
       __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
       __ ret(0);
       __ bind(&check_for_nan);
@@ -3468,13 +3901,13 @@
       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
       __ ret(0);
     } else {
-      NearLabel heap_number;
+      Label heap_number;
       __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
              Immediate(masm->isolate()->factory()->heap_number_map()));
-      __ j(equal, &heap_number);
+      __ j(equal, &heap_number, Label::kNear);
       if (cc_ != equal) {
         // Call runtime on identical JSObjects.  Otherwise return equal.
-        __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+        __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
         __ j(above_equal, &not_identical);
       }
       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3503,8 +3936,8 @@
         __ setcc(above_equal, eax);
         __ ret(0);
       } else {
-        NearLabel nan;
-        __ j(above_equal, &nan);
+        Label nan;
+        __ j(above_equal, &nan, Label::kNear);
         __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
         __ ret(0);
         __ bind(&nan);
@@ -3520,7 +3953,7 @@
   // Non-strict object equality is slower, so it is handled later in the stub.
   if (cc_ == equal && strict_) {
     Label slow;  // Fallthrough label.
-    NearLabel not_smis;
+    Label not_smis;
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
@@ -3532,7 +3965,7 @@
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, Operand(eax));
     __ test(ecx, Operand(edx));
-    __ j(not_zero, &not_smis);
+    __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
@@ -3549,7 +3982,7 @@
     __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
            Immediate(masm->isolate()->factory()->heap_number_map()));
     // If heap number, handle it in the slow case.
-    __ j(equal, &slow);
+    __ j(equal, &slow, Label::kNear);
     // Return non-equal (ebx is not zero)
     __ mov(eax, ebx);
     __ ret(0);
@@ -3561,13 +3994,13 @@
 
     // Get the type of the first operand.
     // If the first object is a JS object, we have done pointer comparison.
-    NearLabel first_non_object;
-    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-    __ j(below, &first_non_object);
+    Label first_non_object;
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(below, &first_non_object, Label::kNear);
 
     // Return non-zero (eax is not zero)
-    NearLabel return_not_equal;
+    Label return_not_equal;
     STATIC_ASSERT(kHeapObjectTag != 0);
     __ bind(&return_not_equal);
     __ ret(0);
@@ -3577,7 +4010,7 @@
     __ CmpInstanceType(ecx, ODDBALL_TYPE);
     __ j(equal, &return_not_equal);
 
-    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
     __ j(above_equal, &return_not_equal);
 
     // Check for oddballs: true, false, null, undefined.
@@ -3600,7 +4033,7 @@
       __ ucomisd(xmm0, xmm1);
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, not_taken);
+      __ j(parity_even, &unordered, Label::kNear);
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
@@ -3616,12 +4049,12 @@
       __ FCmp();
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, not_taken);
+      __ j(parity_even, &unordered, Label::kNear);
 
-      NearLabel below_label, above_label;
+      Label below_label, above_label;
       // Return a result of -1, 0, or 1, based on EFLAGS.
-      __ j(below, &below_label, not_taken);
-      __ j(above, &above_label, not_taken);
+      __ j(below, &below_label, Label::kNear);
+      __ j(above, &above_label, Label::kNear);
 
       __ Set(eax, Immediate(0));
       __ ret(0);
@@ -3668,12 +4101,20 @@
                                          &check_unequal_objects);
 
   // Inline comparison of ascii strings.
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == equal) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
                                                      eax,
                                                      ecx,
-                                                     ebx,
-                                                     edi);
+                                                     ebx);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       edx,
+                                                       eax,
+                                                       ecx,
+                                                       ebx,
+                                                       edi);
+  }
 #ifdef DEBUG
   __ Abort("Unexpected fall-through from string comparison");
 #endif
@@ -3683,8 +4124,8 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    NearLabel not_both_objects;
-    NearLabel return_unequal;
+    Label not_both_objects;
+    Label return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -3692,20 +4133,20 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_both_objects);
-    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-    __ j(below, &not_both_objects);
-    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
-    __ j(below, &not_both_objects);
+    __ j(not_zero, &not_both_objects, Label::kNear);
+    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(below, &not_both_objects, Label::kNear);
+    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+    __ j(below, &not_both_objects, Label::kNear);
     // We do not bail out after this point.  Both are JSObjects, and
     // they are equal if and only if both are undetectable.
     // The and of the undetectable flags is 1 if and only if they are equal.
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     // The objects are both undetectable, so they both compare as the value
     // undefined, and are equal.
     __ Set(eax, Immediate(EQUAL));
@@ -3743,8 +4184,7 @@
                                     Label* label,
                                     Register object,
                                     Register scratch) {
-  __ test(object, Immediate(kSmiTagMask));
-  __ j(zero, label);
+  __ JumpIfSmi(object, label);
   __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
@@ -3759,33 +4199,24 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  Label slow, non_function;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // +1 ~ return address
-    Label receiver_is_value, receiver_is_js_object;
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &receiver_is_value, not_taken);
-
-    // Check if the receiver is a valid JS object.
-    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
-    __ j(above_equal, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ LeaveInternalFrame();
-    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
-    __ bind(&receiver_is_js_object);
+    // Call as function is indicated with the hole.
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ j(not_equal, &call, Label::kNear);
+    // Patch the receiver on the stack with the global receiver object.
+    __ mov(ebx, GlobalObjectOperand());
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -3793,23 +4224,56 @@
   __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
 
   // Check that the function really is a JavaScript function.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ JumpIfSmi(edi, &non_function);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow, not_taken);
+  __ j(not_equal, &slow);
 
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ j(equal, &call_as_function);
+    __ InvokeFunction(edi,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(edi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+  __ pop(ecx);
+  __ push(edi);  // put proxy as additional argument under return address
+  __ push(ecx);
+  __ Set(eax, Immediate(argc_ + 1));
+  __ Set(ebx, Immediate(0));
+  __ SetCallKind(ecx, CALL_AS_FUNCTION);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ jmp(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
   __ Set(eax, Immediate(argc_));
   __ Set(ebx, Immediate(0));
+  __ SetCallKind(ecx, CALL_AS_METHOD);
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
   Handle<Code> adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -3878,9 +4342,9 @@
   // Make sure we're not trying to return 'the hole' from the runtime
   // call as this may lead to crashes in the IC code later.
   if (FLAG_debug_code) {
-    NearLabel okay;
+    Label okay;
     __ cmp(eax, masm->isolate()->factory()->the_hole_value());
-    __ j(not_equal, &okay);
+    __ j(not_equal, &okay, Label::kNear);
     __ int3();
     __ bind(&okay);
   }
@@ -3891,10 +4355,10 @@
   __ lea(ecx, Operand(eax, 1));
   // Lower 2 bits of ecx are 0 iff eax has failure tag.
   __ test(ecx, Immediate(kFailureTagMask));
-  __ j(zero, &failure_returned, not_taken);
+  __ j(zero, &failure_returned);
 
   ExternalReference pending_exception_address(
-      Isolate::k_pending_exception_address, masm->isolate());
+      Isolate::kPendingExceptionAddress, masm->isolate());
 
   // Check that there is no pending exception, otherwise we
   // should have returned some failure value.
@@ -3902,10 +4366,10 @@
     __ push(edx);
     __ mov(edx, Operand::StaticVariable(
         ExternalReference::the_hole_value_location(masm->isolate())));
-    NearLabel okay;
+    Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
-    __ j(equal, &okay);
+    __ j(equal, &okay, Label::kNear);
     __ int3();
     __ bind(&okay);
     __ pop(edx);
@@ -3922,7 +4386,7 @@
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
-  __ j(zero, &retry, taken);
+  __ j(zero, &retry, Label::kNear);
 
   // Special handling of out of memory exceptions.
   __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
@@ -4019,9 +4483,7 @@
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Label not_outermost_js, not_outermost_js_2;
-#endif
 
   // Setup frame.
   __ push(ebp);
@@ -4037,30 +4499,28 @@
   __ push(ebx);
 
   // Save copies of the top frame descriptor on the stack.
-  ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
+  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
   __ push(Operand::StaticVariable(c_entry_fp));
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // If this is the outermost JS call, set js_entry_sp value.
-  ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
                                 masm->isolate());
   __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
-  __ j(not_equal, &not_outermost_js);
+  __ j(not_equal, &not_outermost_js, Label::kNear);
   __ mov(Operand::StaticVariable(js_entry_sp), ebp);
   __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   Label cont;
-  __ jmp(&cont);
+  __ jmp(&cont, Label::kNear);
   __ bind(&not_outermost_js);
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
-#endif
 
   // Call a faked try-block that does the invoke.
   __ call(&invoke);
 
   // Caught exception: Store result (exception) in the pending
   // exception field in the JSEnv and return a failure sentinel.
-  ExternalReference pending_exception(Isolate::k_pending_exception_address,
+  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
   __ mov(Operand::StaticVariable(pending_exception), eax);
   __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
@@ -4101,7 +4561,6 @@
   __ PopTryHandler();
 
   __ bind(&exit);
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
   __ cmp(Operand(ebx),
@@ -4109,11 +4568,10 @@
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
-#endif
 
   // Restore the top frame descriptor from the stack.
   __ pop(Operand::StaticVariable(ExternalReference(
-      Isolate::k_c_entry_fp_address,
+      Isolate::kCEntryFPAddress,
       masm->isolate())));
 
   // Restore callee-saved registers (C calling conventions).
@@ -4177,23 +4635,22 @@
   }
 
   // Check that the left hand is a JS object.
-  __ test(object, Immediate(kSmiTagMask));
-  __ j(zero, &not_js_object, not_taken);
+  __ JumpIfSmi(object, &not_js_object);
   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     // Look up the function and the map in the instanceof cache.
-    NearLabel miss;
+    Label miss;
     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
     __ cmp(function,
            Operand::StaticArray(scratch, times_pointer_size, roots_address));
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
     __ cmp(map, Operand::StaticArray(
         scratch, times_pointer_size, roots_address));
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(eax, Operand::StaticArray(
         scratch, times_pointer_size, roots_address));
@@ -4205,8 +4662,7 @@
   __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
-  __ test(prototype, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ JumpIfSmi(prototype, &slow);
   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
   // Update the global instanceof or call site inlined cache with the current
@@ -4236,13 +4692,13 @@
   // Loop through the prototype chain of the object looking for the function
   // prototype.
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
-  NearLabel loop, is_instance, is_not_instance;
+  Label loop, is_instance, is_not_instance;
   __ bind(&loop);
   __ cmp(scratch, Operand(prototype));
-  __ j(equal, &is_instance);
+  __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
   __ cmp(Operand(scratch), Immediate(factory->null_value()));
-  __ j(equal, &is_not_instance);
+  __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
@@ -4295,28 +4751,26 @@
   __ bind(&not_js_object);
   // Before null, smi and string value checks, check that the rhs is a function
   // as for a non-function rhs an exception needs to be thrown.
-  __ test(function, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ JumpIfSmi(function, &slow, Label::kNear);
   __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
-  __ j(not_equal, &slow, not_taken);
+  __ j(not_equal, &slow, Label::kNear);
 
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
-  __ j(not_equal, &object_not_null);
+  __ j(not_equal, &object_not_null, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
-  __ test(object, Immediate(kSmiTagMask));
-  __ j(not_zero, &object_not_null_or_smi, not_taken);
+  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null_or_smi);
   // String values is not instance of anything.
   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
-  __ j(NegateCondition(is_string), &slow);
+  __ j(NegateCondition(is_string), &slow, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
@@ -4339,11 +4793,11 @@
     __ push(function);
     __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
     __ LeaveInternalFrame();
-    NearLabel true_value, done;
+    Label true_value, done;
     __ test(eax, Operand(eax));
-    __ j(zero, &true_value);
+    __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
     __ bind(&true_value);
     __ mov(eax, factory->true_value());
     __ bind(&done);
@@ -4375,15 +4829,8 @@
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-
   const char* cc_name;
   switch (cc_) {
     case less: cc_name = "LT"; break;
@@ -4394,35 +4841,12 @@
     case not_equal: cc_name = "NE"; break;
     default: cc_name = "UnknownCondition"; break;
   }
-
-  const char* strict_name = "";
-  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
-    strict_name = "_STRICT";
-  }
-
-  const char* never_nan_nan_name = "";
-  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
-    never_nan_nan_name = "_NO_NAN";
-  }
-
-  const char* include_number_compare_name = "";
-  if (!include_number_compare_) {
-    include_number_compare_name = "_NO_NUMBER";
-  }
-
-  const char* include_smi_compare_name = "";
-  if (!include_smi_compare_) {
-    include_smi_compare_name = "_NO_SMI";
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "CompareStub_%s%s%s%s%s",
-               cc_name,
-               strict_name,
-               never_nan_nan_name,
-               include_number_compare_name,
-               include_smi_compare_name);
-  return name_;
+  bool is_equality = cc_ == equal || cc_ == not_equal;
+  stream->Add("CompareStub_%s", cc_name);
+  if (strict_ && is_equality) stream->Add("_STRICT");
+  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+  if (!include_number_compare_) stream->Add("_NO_NUMBER");
+  if (!include_smi_compare_) stream->Add("_NO_SMI");
 }
 
 
@@ -4433,11 +4857,11 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(object_, Immediate(kSmiTagMask));
-  __ j(zero, receiver_not_string_);
+  __ JumpIfSmi(object_, receiver_not_string_);
 
   // Fetch the instance type of the receiver into result register.
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4448,8 +4872,7 @@
 
   // If the index is non-smi trigger the non-smi case.
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(index_, Immediate(kSmiTagMask));
-  __ j(not_zero, &index_not_smi_);
+  __ JumpIfNotSmi(index_, &index_not_smi_);
 
   // Put smi-tagged index into scratch register.
   __ mov(scratch_, index_);
@@ -4465,31 +4888,46 @@
   __ j(zero, &flat_string);
 
   // Handle non-flat strings.
-  __ test(result_, Immediate(kIsConsStringMask));
-  __ j(zero, &call_runtime_);
+  __ and_(result_, kStringRepresentationMask);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(result_, kExternalStringTag);
+  __ j(greater, &sliced_string, Label::kNear);
+  __ j(equal, &call_runtime_);
 
   // ConsString.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
          Immediate(masm->isolate()->factory()->empty_string()));
   __ j(not_equal, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string, Label::kNear);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // 2-byte string.
   // Load the 2-byte character code into the result register.
@@ -4497,7 +4935,7 @@
   __ movzx_w(result_, FieldOperand(object_,
                                    scratch_, times_1,  // Scratch is smi-tagged.
                                    SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
+  __ jmp(&got_char_code, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
@@ -4522,7 +4960,7 @@
   __ CheckMap(index_,
               masm->isolate()->factory()->heap_number_map(),
               index_not_number_,
-              true);
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -4547,8 +4985,7 @@
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(scratch_, Immediate(kSmiTagMask));
-  __ j(not_zero, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4581,7 +5018,7 @@
   __ test(code_,
           Immediate(kSmiTagMask |
                     ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
-  __ j(not_zero, &slow_case_, not_taken);
+  __ j(not_zero, &slow_case_);
 
   Factory* factory = masm->isolate()->factory();
   __ Set(result_, Immediate(factory->single_character_string_cache()));
@@ -4593,7 +5030,7 @@
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
   __ cmp(result_, factory->undefined_value());
-  __ j(equal, &slow_case_, not_taken);
+  __ j(equal, &slow_case_);
   __ bind(&exit_);
 }
 
@@ -4642,14 +5079,12 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &string_add_runtime);
+    __ JumpIfSmi(eax, &string_add_runtime);
     __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
     __ j(above_equal, &string_add_runtime);
 
     // First argument is a a string, test second.
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &string_add_runtime);
+    __ JumpIfSmi(edx, &string_add_runtime);
     __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
     __ j(above_equal, &string_add_runtime);
   } else {
@@ -4672,11 +5107,11 @@
   // eax: first string
   // edx: second string
   // Check if either of the strings are empty. In that case return the other.
-  NearLabel second_not_zero_length, both_not_zero_length;
+  Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ test(ecx, Operand(ecx));
-  __ j(not_zero, &second_not_zero_length);
+  __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -4685,7 +5120,7 @@
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ test(ebx, Operand(ebx));
-  __ j(not_zero, &both_not_zero_length);
+  __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -4761,8 +5196,9 @@
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ and_(ecx, Operand(edi));
-  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
-  __ test(ecx, Immediate(kAsciiStringTag));
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(ecx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
   // Allocate an acsii cons string.
@@ -4793,7 +5229,7 @@
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+  __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
   __ jmp(&allocated);
 
   // Handle creating a flat result. First check that both strings are not
@@ -4812,17 +5248,20 @@
   __ and_(ecx, kStringRepresentationMask);
   __ cmp(ecx, kExternalStringTag);
   __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
   // Now check if both strings are ascii strings.
   // eax: first string
   // ebx: length of resulting flat string as a smi
   // edx: second string
   Label non_ascii_string_add_flat_result;
-  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
   __ j(zero, &non_ascii_string_add_flat_result);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
   __ j(zero, &string_add_runtime);
 
   // Both strings are ascii strings.  As they are short they are both flat.
@@ -4862,7 +5301,7 @@
   // edx: second string
   __ bind(&non_ascii_string_add_flat_result);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
   __ j(not_zero, &string_add_runtime);
   // Both strings are two byte strings. As they are short they are both
   // flat.
@@ -4917,8 +5356,7 @@
                                             Label* slow) {
   // First check if the argument is already a string.
   Label not_string, done;
-  __ test(arg, Immediate(kSmiTagMask));
-  __ j(zero, &not_string);
+  __ JumpIfSmi(arg, &not_string);
   __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
   __ j(below, &done);
 
@@ -4939,8 +5377,7 @@
 
   // Check if the argument is a safe string wrapper.
   __ bind(&not_cached);
-  __ test(arg, Immediate(kSmiTagMask));
-  __ j(zero, slow);
+  __ JumpIfSmi(arg, slow);
   __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
   __ j(not_equal, slow);
   __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
@@ -4959,7 +5396,7 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
   // short strings.
@@ -5006,9 +5443,9 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  NearLabel last_bytes;
+  Label last_bytes;
   __ test(count, Immediate(~3));
-  __ j(zero, &last_bytes);
+  __ j(zero, &last_bytes, Label::kNear);
 
   // Copy from edi to esi using rep movs instruction.
   __ mov(scratch, count);
@@ -5026,7 +5463,7 @@
   __ j(zero, &done);
 
   // Copy remaining characters.
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
@@ -5052,11 +5489,11 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  NearLabel not_array_index;
+  Label not_array_index;
   __ mov(scratch, c1);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
-  __ j(above, &not_array_index);
+  __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@@ -5102,6 +5539,7 @@
   static const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+  Register candidate = scratch;  // Scratch register contains candidate.
   for (int i = 0; i < kProbes; i++) {
     // Calculate entry in symbol table.
     __ mov(scratch, hash);
@@ -5111,7 +5549,6 @@
     __ and_(scratch, Operand(mask));
 
     // Load the entry from the symbol table.
-    Register candidate = scratch;  // Scratch register contains candidate.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ mov(candidate,
            FieldOperand(symbol_table,
@@ -5156,7 +5593,7 @@
   __ jmp(not_found);
 
   // Scratch register contains result when we fall through to here.
-  Register result = scratch;
+  Register result = candidate;
   __ bind(&found_in_symbol_table);
   __ pop(mask);  // Pop saved mask from the stack.
   if (!result.is(eax)) {
@@ -5169,13 +5606,28 @@
                                     Register hash,
                                     Register character,
                                     Register scratch) {
-  // hash = character + (character << 10);
-  __ mov(hash, character);
-  __ shl(hash, 10);
-  __ add(hash, Operand(character));
+  // hash = (seed + character) + ((seed + character) << 10);
+  if (Serializer::enabled()) {
+    ExternalReference roots_address =
+        ExternalReference::roots_address(masm->isolate());
+    __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+    __ mov(scratch, Operand::StaticArray(scratch,
+                                         times_pointer_size,
+                                         roots_address));
+    __ SmiUntag(scratch);
+    __ add(scratch, Operand(character));
+    __ mov(hash, scratch);
+    __ shl(scratch, 10);
+    __ add(hash, Operand(scratch));
+  } else {
+    int32_t seed = masm->isolate()->heap()->HashSeed();
+    __ lea(scratch, Operand(character, seed));
+    __ shl(scratch, 10);
+    __ lea(hash, Operand(scratch, character, times_1, seed));
+  }
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
-  __ sar(scratch, 6);
+  __ shr(scratch, 6);
   __ xor_(hash, Operand(scratch));
 }
 
@@ -5192,7 +5644,7 @@
   __ add(hash, Operand(scratch));
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
-  __ sar(scratch, 6);
+  __ shr(scratch, 6);
   __ xor_(hash, Operand(scratch));
 }
 
@@ -5206,18 +5658,19 @@
   __ add(hash, Operand(scratch));
   // hash ^= hash >> 11;
   __ mov(scratch, hash);
-  __ sar(scratch, 11);
+  __ shr(scratch, 11);
   __ xor_(hash, Operand(scratch));
   // hash += hash << 15;
   __ mov(scratch, hash);
   __ shl(scratch, 15);
   __ add(hash, Operand(scratch));
 
+  __ and_(hash, String::kHashBitMask);
+
   // if (hash == 0) hash = 27;
-  NearLabel hash_not_zero;
-  __ test(hash, Operand(hash));
-  __ j(not_zero, &hash_not_zero);
-  __ mov(hash, Immediate(27));
+  Label hash_not_zero;
+  __ j(not_zero, &hash_not_zero, Label::kNear);
+  __ mov(hash, Immediate(StringHasher::kZeroHash));
   __ bind(&hash_not_zero);
 }
 
@@ -5234,8 +5687,7 @@
   // Make sure first argument is a string.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
+  __ JumpIfSmi(eax, &runtime);
   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
   __ j(NegateCondition(is_string), &runtime);
 
@@ -5245,11 +5697,9 @@
   // Calculate length of sub string using the smi values.
   Label result_longer_than_two;
   __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &runtime);
+  __ JumpIfNotSmi(ecx, &runtime);
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &runtime);
+  __ JumpIfNotSmi(edx, &runtime);
   __ sub(ecx, Operand(edx));
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
   Label return_eax;
@@ -5289,7 +5739,84 @@
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   __ Set(ecx, Immediate(2));
 
-  __ bind(&result_longer_than_two);
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // If coming from the make_two_character_string path, the string
+    // is too short to be sliced anyways.
+    STATIC_ASSERT(2 < SlicedString::kMinLength);
+    __ jmp(&copy_routine);
+    __ bind(&result_longer_than_two);
+
+    // eax: string
+    // ebx: instance type
+    // ecx: sub string length
+    // edx: from index (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    __ cmp(ecx, SlicedString::kMinLength);
+    // Short slice.  Copy instead of slicing.
+    __ j(less, &copy_routine);
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ test(ebx, Immediate(kStringRepresentationMask));
+    __ j(zero, &seq_string, Label::kNear);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ test(ebx, Immediate(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ j(zero, &runtime);
+
+    Factory* factory = masm->isolate()->factory();
+    __ test(ebx, Immediate(kSlicedNotConsMask));
+    __ j(not_zero, &sliced_string, Label::kNear);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+           factory->empty_string());
+    __ j(not_equal, &runtime);
+    __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+    __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(edi, eax);
+
+    __ bind(&allocate_slice);
+    // edi: underlying subject string
+    // ebx: instance type of original subject string
+    // edx: offset
+    // ecx: length
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ test(ebx, Immediate(kStringEncodingMask));
+    __ j(zero, &two_byte_slice, Label::kNear);
+    __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+    __ jmp(&set_slice_header, Label::kNear);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
+    __ bind(&set_slice_header);
+    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
+    __ SmiTag(ecx);
+    __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
+    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
+    __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
+           Immediate(String::kEmptyHashField));
+    __ jmp(&return_eax);
+
+    __ bind(&copy_routine);
+  } else {
+    __ bind(&result_longer_than_two);
+  }
+
   // eax: string
   // ebx: instance type
   // ecx: result string length
@@ -5371,28 +5898,60 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ mov(length, FieldOperand(left, String::kLengthOffset));
+  __ cmp(length, FieldOperand(right, String::kLengthOffset));
+  __ j(equal, &check_zero_length, Label::kNear);
+  __ bind(&strings_not_equal);
+  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+  __ ret(0);
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(length, Operand(length));
+  __ j(not_zero, &compare_chars, Label::kNear);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+                                &strings_not_equal, Label::kNear);
+
+  // Characters are equal.
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3) {
-  Label result_not_equal;
-  Label result_greater;
-  Label compare_lengths;
-
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
 
   // Find minimum length.
-  NearLabel left_shorter;
+  Label left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
   __ mov(scratch3, scratch1);
   __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
 
   Register length_delta = scratch3;
 
-  __ j(less_equal, &left_shorter);
+  __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
   __ sub(scratch1, Operand(length_delta));
   __ bind(&left_shorter);
@@ -5400,41 +5959,19 @@
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
+  Label compare_lengths;
   __ test(min_length, Operand(min_length));
-  __ j(zero, &compare_lengths);
+  __ j(zero, &compare_lengths, Label::kNear);
 
-  // Change index to run from -min_length to -1 by adding min_length
-  // to string start. This means that loop ends when index reaches zero,
-  // which doesn't need an additional compare.
-  __ SmiUntag(min_length);
-  __ lea(left,
-         FieldOperand(left,
-                      min_length, times_1,
-                      SeqAsciiString::kHeaderSize));
-  __ lea(right,
-         FieldOperand(right,
-                      min_length, times_1,
-                      SeqAsciiString::kHeaderSize));
-  __ neg(min_length);
-
-  Register index = min_length;  // index = -min_length;
-
-  {
-    // Compare loop.
-    NearLabel loop;
-    __ bind(&loop);
-    // Compare characters.
-    __ mov_b(scratch2, Operand(left, index, times_1, 0));
-    __ cmpb(scratch2, Operand(right, index, times_1, 0));
-    __ j(not_equal, &result_not_equal);
-    __ add(Operand(index), Immediate(1));
-    __ j(not_zero, &loop);
-  }
+  // Compare characters.
+  Label result_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                &result_not_equal, Label::kNear);
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
   __ test(length_delta, Operand(length_delta));
-  __ j(not_zero, &result_not_equal);
+  __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
   STATIC_ASSERT(EQUAL == 0);
@@ -5442,8 +5979,9 @@
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
 
+  Label result_greater;
   __ bind(&result_not_equal);
-  __ j(greater, &result_greater);
+  __ j(greater, &result_greater, Label::kNear);
 
   // Result is LESS.
   __ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -5456,6 +5994,36 @@
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch,
+    Label* chars_not_equal,
+    Label::Distance chars_not_equal_near) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ lea(left,
+         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(right,
+         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+  __ neg(length);
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ mov_b(scratch, Operand(left, index, times_1, 0));
+  __ cmpb(scratch, Operand(right, index, times_1, 0));
+  __ j(not_equal, chars_not_equal, chars_not_equal_near);
+  __ add(Operand(index), Immediate(1));
+  __ j(not_zero, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -5467,9 +6035,9 @@
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
-  NearLabel not_same;
+  Label not_same;
   __ cmp(edx, Operand(eax));
-  __ j(not_equal, &not_same);
+  __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -5497,19 +6065,18 @@
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
-  NearLabel miss;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ or_(ecx, Operand(eax));
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
+  __ JumpIfNotSmi(ecx, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
     __ sub(eax, Operand(edx));
   } else {
-    NearLabel done;
+    Label done;
     __ sub(edx, Operand(eax));
-    __ j(no_overflow, &done);
+    __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
     __ bind(&done);
@@ -5525,18 +6092,17 @@
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
-  NearLabel generic_stub;
-  NearLabel unordered;
-  NearLabel miss;
+  Label generic_stub;
+  Label unordered;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ and_(ecx, Operand(eax));
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &generic_stub, not_taken);
+  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -5552,7 +6118,7 @@
     __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
-    __ j(parity_even, &unordered, not_taken);
+    __ j(parity_even, &unordered, Label::kNear);
 
     // Return a result of -1, 0, or 1, based on EFLAGS.
     // Performing mov, because xor would destroy the flag register.
@@ -5575,18 +6141,138 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  ASSERT(GetCondition() == equal);
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+  Register tmp1 = ecx;
+  Register tmp2 = ebx;
+
+  // Check that both operands are heap objects.
+  Label miss;
+  __ mov(tmp1, Operand(left));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ and_(tmp1, Operand(right));
+  __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+  // Check that both operands are symbols.
+  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, Operand(tmp2));
+  __ test(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &miss, Label::kNear);
+
+  // Symbols are compared by identity.
+  Label done;
+  __ cmp(left, Operand(right));
+  // Make sure eax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(eax));
+  __ j(not_equal, &done, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ bind(&done);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+  Register tmp1 = ecx;
+  Register tmp2 = ebx;
+  Register tmp3 = edi;
+
+  // Check that both operands are heap objects.
+  __ mov(tmp1, Operand(left));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ and_(tmp1, Operand(right));
+  __ JumpIfSmi(tmp1, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  __ mov(tmp3, tmp1);
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ or_(tmp3, Operand(tmp2));
+  __ test(tmp3, Immediate(kIsNotStringMask));
+  __ j(not_zero, &miss);
+
+  // Fast check for identical strings.
+  Label not_same;
+  __ cmp(left, Operand(right));
+  __ j(not_equal, &not_same, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+
+  // Handle not identical strings.
+  __ bind(&not_same);
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, Operand(tmp2));
+  __ test(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure eax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(eax));
+  __ ret(0);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ bind(&do_compare);
+  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ pop(tmp1);  // Return address.
+  __ push(left);
+  __ push(right);
+  __ push(tmp1);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
-  NearLabel miss;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ and_(ecx, Operand(eax));
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(ecx, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ sub(eax, Operand(edx));
@@ -5628,6 +6314,218 @@
 }
 
 
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
+  ASSERT(name->IsSymbol());
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = r0;
+    // Capacity is smi 2^n.
+    __ mov(index, FieldOperand(properties, kCapacityOffset));
+    __ dec(index);
+    __ and_(Operand(index),
+           Immediate(Smi::FromInt(name->Hash() +
+                                   StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
+    Register entity_name = r0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+                                kElementsStartOffset - kHeapObjectTag));
+    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
+    __ j(equal, done);
+
+    // Stop if found the property.
+    __ cmp(entity_name, Handle<String>(name));
+    __ j(equal, miss);
+
+    // Check if the entry name is not a symbol.
+    __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+              kIsSymbolMask);
+    __ j(zero, miss);
+  }
+
+  StringDictionaryLookupStub stub(properties,
+                                  r0,
+                                  r0,
+                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+  __ push(Immediate(Handle<Object>(name)));
+  __ push(Immediate(name->Hash()));
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ test(r0, Operand(r0));
+  __ j(not_zero, miss);
+  __ jmp(done);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register r0,
+                                                        Register r1) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  __ mov(r1, FieldOperand(elements, kCapacityOffset));
+  __ shr(r1, kSmiTagSize);  // convert smi to int
+  __ dec(r1);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+    __ shr(r0, String::kHashShift);
+    if (i > 0) {
+      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r0, Operand(r1));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
+
+    // Check if the key is identical to the name.
+    __ cmp(name, Operand(elements,
+                         r0,
+                         times_4,
+                         kElementsStartOffset - kHeapObjectTag));
+    __ j(equal, done);
+  }
+
+  StringDictionaryLookupStub stub(elements,
+                                  r1,
+                                  r0,
+                                  POSITIVE_LOOKUP);
+  __ push(name);
+  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+  __ shr(r0, String::kHashShift);
+  __ push(r0);
+  __ CallStub(&stub);
+
+  __ test(r1, Operand(r1));
+  __ j(zero, miss);
+  __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Stack frame on entry:
+  //  esp[0 * kPointerSize]: return address.
+  //  esp[1 * kPointerSize]: key's hash.
+  //  esp[2 * kPointerSize]: key.
+  // Registers:
+  //  dictionary_: StringDictionary to probe.
+  //  result_: used as scratch.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  Register scratch = result_;
+
+  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ dec(scratch);
+  __ SmiUntag(scratch);
+  __ push(scratch);
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(scratch, Operand(esp, 2 * kPointerSize));
+    if (i > 0) {
+      __ add(Operand(scratch),
+             Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(esp, 0));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ mov(scratch, Operand(dictionary_,
+                            index_,
+                            times_pointer_size,
+                            kElementsStartOffset - kHeapObjectTag));
+    __ cmp(scratch, masm->isolate()->factory()->undefined_value());
+    __ j(equal, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmp(scratch, Operand(esp, 3 * kPointerSize));
+    __ j(equal, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // If we hit a non symbol key during negative lookup
+      // we have to bailout as this key might be equal to the
+      // key we are looking for.
+
+      // Check if the entry name is not a symbol.
+      __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+      __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
+                kIsSymbolMask);
+      __ j(zero, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result_, Immediate(0));
+    __ Drop(1);
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&in_dictionary);
+  __ mov(result_, Immediate(1));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&not_in_dictionary);
+  __ mov(result_, Immediate(0));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 80a75cd..fa255da 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -60,40 +60,100 @@
 };
 
 
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
+  UnaryOpStub(Token::Value op,
+              UnaryOverwriteMode mode,
+              UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
+      : op_(op),
+        mode_(mode),
+        operand_type_(operand_type) {
+  }
 
  private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  virtual void PrintName(StringStream* stream);
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm,
+                          Label* non_smi,
+                          Label* undo,
+                          Label* slow,
+                          Label::Distance non_smi_near = Label::kFar,
+                          Label::Distance undo_near = Label::kFar,
+                          Label::Distance slow_near = Label::kFar);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm,
+                             Label* non_smi,
+                             Label::Distance non_smi_near = Label::kFar);
+  void GenerateSmiCodeUndo(MacroAssembler* masm);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_sse3_(SSE3Bits::decode(key)),
         operands_type_(operands_type),
-        result_type_(result_type),
-        name_(NULL) { }
+        result_type_(result_type) { }
 
  private:
   enum SmiCodeGenerateHeapNumberResults {
@@ -106,32 +166,19 @@
   bool use_sse3_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
-           "(mode %d, runtime_type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class SSE3Bits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -153,6 +200,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
 
@@ -161,15 +209,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -283,11 +331,9 @@
 
 class StringCompareStub: public CodeStub {
  public:
-  explicit StringCompareStub() {
-  }
+  StringCompareStub() { }
 
-  // Compare two flat ascii strings and returns result in eax after popping two
-  // arguments from the stack.
+  // Compares two flat ASCII strings and returns result in eax.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -295,11 +341,27 @@
                                               Register scratch2,
                                               Register scratch3);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in eax.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(
+      MacroAssembler* masm,
+      Register left,
+      Register right,
+      Register length,
+      Register scratch,
+      Label* chars_not_equal,
+      Label::Distance chars_not_equal_near = Label::kFar);
 };
 
 
@@ -325,16 +387,70 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("NumberToStringStub\n");
-  }
-#endif
 };
 
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  StringDictionaryLookupStub(Register dictionary,
+                             Register result,
+                             Register index,
+                             LookupMode mode)
+      : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return DictionaryBits::encode(dictionary_.code()) |
+        ResultBits::encode(result_.code()) |
+        IndexBits::encode(index_.code()) |
+        LookupModeBits::encode(mode_);
+  }
+
+  class DictionaryBits: public BitField<int, 0, 3> {};
+  class ResultBits: public BitField<int, 3, 3> {};
+  class IndexBits: public BitField<int, 6, 3> {};
+  class LookupModeBits: public BitField<LookupMode, 9, 1> {};
+
+  Register dictionary_;
+  Register result_;
+  Register index_;
+  LookupMode mode_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 572c36c..3a657bd 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -255,6 +255,7 @@
   ASSERT(desc.reloc_size == 0);
 
   CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
 }
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 8f090b1..c85fa83 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -53,9 +53,7 @@
   // Print the code after compiling it.
   static void PrintCode(Handle<Code> code, CompilationInfo* info);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
-#endif
 
   static bool RecordPositions(MacroAssembler* masm,
                               int pos,
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 615dbfe..57e66df 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,7 +67,8 @@
   // solution is to run valgrind with --smc-check=all, but this comes at a big
   // performance cost.  We can notify valgrind to invalidate its cache.
 #ifdef VALGRIND_DISCARD_TRANSLATIONS
-  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+  unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+  USE(res);
 #endif
 }
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 72fdac8..080ad64 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -37,7 +37,7 @@
 namespace v8 {
 namespace internal {
 
-int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 10;
 
 
 int Deoptimizer::patch_size() {
@@ -45,16 +45,6 @@
 }
 
 
-static void ZapCodeRange(Address start, Address end) {
-#ifdef DEBUG
-  ASSERT(start <= end);
-  int size = end - start;
-  CodePatcher destroyer(start, size);
-  while (size-- > 0) destroyer.masm()->int3();
-#endif
-}
-
-
 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
   Isolate* isolate = code->GetIsolate();
   HandleScope scope(isolate);
@@ -62,30 +52,23 @@
   // Compute the size of relocation information needed for the code
   // patching in Deoptimizer::DeoptimizeFunction.
   int min_reloc_size = 0;
-  Address prev_reloc_address = code->instruction_start();
-  Address code_start_address = code->instruction_start();
-  SafepointTable table(*code);
-  for (unsigned i = 0; i < table.length(); ++i) {
-    Address curr_reloc_address = code_start_address + table.GetPcOffset(i);
-    ASSERT_GE(curr_reloc_address, prev_reloc_address);
-    SafepointEntry safepoint_entry = table.GetEntry(i);
-    int deoptimization_index = safepoint_entry.deoptimization_index();
-    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      // The gap code is needed to get to the state expected at the
-      // bailout and we need to skip the call opcode to get to the
-      // address that needs reloc.
-      curr_reloc_address += safepoint_entry.gap_code_size() + 1;
-      int pc_delta = curr_reloc_address - prev_reloc_address;
-      // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
-      // if encodable with small pc delta encoding and up to 6 bytes
-      // otherwise.
-      if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
-        min_reloc_size += 2;
-      } else {
-        min_reloc_size += 6;
-      }
-      prev_reloc_address = curr_reloc_address;
+  int prev_pc_offset = 0;
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    int pc_offset = deopt_data->Pc(i)->value();
+    if (pc_offset == -1) continue;
+    ASSERT_GE(pc_offset, prev_pc_offset);
+    int pc_delta = pc_offset - prev_pc_offset;
+    // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
+    // if encodable with small pc delta encoding and up to 6 bytes
+    // otherwise.
+    if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
+      min_reloc_size += 2;
+    } else {
+      min_reloc_size += 6;
     }
+    prev_pc_offset = pc_offset;
   }
 
   // If the relocation information is not big enough we create a new
@@ -150,40 +133,40 @@
   Address reloc_end_address = reloc_info->address() + reloc_info->Size();
   RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
 
-  // For each return after a safepoint insert a call to the corresponding
-  // deoptimization entry.  Since the call is a relative encoding, write new
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+
+  // Since the call is a relative encoding, write new
   // reloc info.  We do not need any of the existing reloc info because the
   // existing code will not be used again (we zap it in debug builds).
-  SafepointTable table(code);
-  Address prev_address = code_start_address;
-  for (unsigned i = 0; i < table.length(); ++i) {
-    Address curr_address = code_start_address + table.GetPcOffset(i);
-    ASSERT_GE(curr_address, prev_address);
-    ZapCodeRange(prev_address, curr_address);
-
-    SafepointEntry safepoint_entry = table.GetEntry(i);
-    int deoptimization_index = safepoint_entry.deoptimization_index();
-    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      // The gap code is needed to get to the state expected at the bailout.
-      curr_address += safepoint_entry.gap_code_size();
-
-      CodePatcher patcher(curr_address, patch_size());
-      Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
-      patcher.masm()->call(deopt_entry, RelocInfo::NONE);
-
-      // We use RUNTIME_ENTRY for deoptimization bailouts.
-      RelocInfo rinfo(curr_address + 1,  // 1 after the call opcode.
-                      RelocInfo::RUNTIME_ENTRY,
-                      reinterpret_cast<intptr_t>(deopt_entry));
-      reloc_info_writer.Write(&rinfo);
-      ASSERT_GE(reloc_info_writer.pos(),
-                reloc_info->address() + ByteArray::kHeaderSize);
-      curr_address += patch_size();
-    }
-    prev_address = curr_address;
+  //
+  // Emit call to lazy deoptimization at all lazy deopt points.
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    // Patch lazy deoptimization entry.
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    CodePatcher patcher(call_address, patch_size());
+    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+    patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+    // We use RUNTIME_ENTRY for deoptimization bailouts.
+    RelocInfo rinfo(call_address + 1,  // 1 after the call opcode.
+                    RelocInfo::RUNTIME_ENTRY,
+                    reinterpret_cast<intptr_t>(deopt_entry));
+    reloc_info_writer.Write(&rinfo);
+    ASSERT_GE(reloc_info_writer.pos(),
+              reloc_info->address() + ByteArray::kHeaderSize);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
   }
-  ZapCodeRange(prev_address,
-               code_start_address + code->safepoint_table_offset());
 
   // Move the relocation info to the beginning of the byte array.
   int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
@@ -212,11 +195,6 @@
     PrintF("[forced deoptimization: ");
     function->PrintName();
     PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
-    if (FLAG_print_code) {
-      code->PrintLn();
-    }
-#endif
   }
 }
 
@@ -348,6 +326,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -461,6 +442,9 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -587,7 +571,7 @@
   output_frame->SetState(Smi::FromInt(state));
 
   // Set the continuation for the topmost frame.
-  if (is_topmost) {
+  if (is_topmost && bailout_type_ != DEBUGGER) {
     Builtins* builtins = isolate_->builtins();
     Code* continuation = (bailout_type_ == EAGER)
         ? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -595,8 +579,27 @@
     output_frame->SetContinuation(
         reinterpret_cast<uint32_t>(continuation->entry()));
   }
+}
 
-  if (output_count_ - 1 == frame_index) iterator->Done();
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers ebp and esp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+  }
 }
 
 
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index d1c869a..a936277 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,7 +54,7 @@
 };
 
 
-static ByteMnemonic two_operands_instr[] = {
+static const ByteMnemonic two_operands_instr[] = {
   {0x03, "add", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
@@ -79,7 +79,7 @@
 };
 
 
-static ByteMnemonic zero_operands_instr[] = {
+static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
   {0xC9, "leave", UNSET_OP_ORDER},
   {0x90, "nop", UNSET_OP_ORDER},
@@ -98,14 +98,14 @@
 };
 
 
-static ByteMnemonic call_jump_instr[] = {
+static const ByteMnemonic call_jump_instr[] = {
   {0xE8, "call", UNSET_OP_ORDER},
   {0xE9, "jmp", UNSET_OP_ORDER},
   {-1, "", UNSET_OP_ORDER}
 };
 
 
-static ByteMnemonic short_immediate_instr[] = {
+static const ByteMnemonic short_immediate_instr[] = {
   {0x05, "add", UNSET_OP_ORDER},
   {0x0D, "or", UNSET_OP_ORDER},
   {0x15, "adc", UNSET_OP_ORDER},
@@ -117,7 +117,7 @@
 };
 
 
-static const char* jump_conditional_mnem[] = {
+static const char* const jump_conditional_mnem[] = {
   /*0*/ "jo", "jno", "jc", "jnc",
   /*4*/ "jz", "jnz", "jna", "ja",
   /*8*/ "js", "jns", "jpe", "jpo",
@@ -125,7 +125,7 @@
 };
 
 
-static const char* set_conditional_mnem[] = {
+static const char* const set_conditional_mnem[] = {
   /*0*/ "seto", "setno", "setc", "setnc",
   /*4*/ "setz", "setnz", "setna", "seta",
   /*8*/ "sets", "setns", "setpe", "setpo",
@@ -133,7 +133,7 @@
 };
 
 
-static const char* conditional_move_mnem[] = {
+static const char* const conditional_move_mnem[] = {
   /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
   /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
   /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
@@ -169,7 +169,7 @@
   InstructionDesc instructions_[256];
   void Clear();
   void Init();
-  void CopyTable(ByteMnemonic bm[], InstructionType type);
+  void CopyTable(const ByteMnemonic bm[], InstructionType type);
   void SetTableRange(InstructionType type,
                      byte start,
                      byte end,
@@ -208,7 +208,8 @@
 }
 
 
-void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+void InstructionTable::CopyTable(const ByteMnemonic bm[],
+                                 InstructionType type) {
   for (int i = 0; bm[i].b >= 0; i++) {
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
@@ -981,6 +982,14 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (f0byte == 0x57) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("xorps %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if ((f0byte & 0xF0) == 0x80) {
             data += JumpConditional(data, branch_hint);
           } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1132,7 +1141,17 @@
             }
           } else if (*data == 0x3A) {
             data++;
-            if (*data == 0x16) {
+            if (*data == 0x0B) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("roundsd %s,%s,%d",
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm),
+                             static_cast<int>(imm8));
+              data += 2;
+            } else if (*data == 0x16) {
               data++;
               int mod, regop, rm;
               get_modrm(*data, &mod, &regop, &rm);
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 0f95abd..2f1b2a9 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,10 +58,11 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset  = 0 * kPointerSize;
-  static const int kFPOffset    = 1 * kPointerSize;
-  static const int kStateOffset = 2 * kPointerSize;
-  static const int kPCOffset    = 3 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kContextOffset = 1 * kPointerSize;
+  static const int kFPOffset      = 2 * kPointerSize;
+  static const int kStateOffset   = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
   static const int kSize = kPCOffset + kPointerSize;
 };
@@ -80,8 +81,8 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kCodeOffset      = -2 * kPointerSize;
-  static const int kSPOffset        = -1 * kPointerSize;
+  static const int kCodeOffset     = -2 * kPointerSize;
+  static const int kSPOffset       = -1 * kPointerSize;
 
   static const int kCallerFPOffset =  0 * kPointerSize;
   static const int kCallerPCOffset = +1 * kPointerSize;
@@ -94,7 +95,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  static const int kFixedFrameSize    =  4;
+  // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+  // object pointer.
+  static const int kFixedFrameSize    =  4;  // Currently unused.
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5d153a8..ca6ce6e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -41,10 +41,14 @@
 namespace v8 {
 namespace internal {
 
-
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -57,34 +61,40 @@
     ASSERT(patch_site_.is_bound() == info_emitted_);
   }
 
-  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfNotSmi(Register reg,
+                        Label* target,
+                        Label::Distance distance = Label::kFar) {
     __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(not_carry, target);  // Always taken before patched.
+    EmitJump(not_carry, target, distance);  // Always taken before patched.
   }
 
-  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfSmi(Register reg,
+                     Label* target,
+                     Label::Distance distance = Label::kFar) {
     __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(carry, target);  // Never taken before patched.
+    EmitJump(carry, target, distance);  // Never taken before patched.
   }
 
   void EmitPatchInfo() {
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-    ASSERT(is_int8(delta_to_patch_site));
-    __ test(eax, Immediate(delta_to_patch_site));
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+      ASSERT(is_int8(delta_to_patch_site));
+      __ test(eax, Immediate(delta_to_patch_site));
 #ifdef DEBUG
-    info_emitted_ = true;
+      info_emitted_ = true;
 #endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
   }
 
-  bool is_bound() const { return patch_site_.is_bound(); }
-
  private:
   // jc will be patched with jz, jnc will become jnz.
-  void EmitJump(Condition cc, NearLabel* target) {
+  void EmitJump(Condition cc, Label* target, Label::Distance distance) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
     ASSERT(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
-    __ j(cc, target);
+    __ j(cc, target, distance);
   }
 
   MacroAssembler* masm_;
@@ -111,6 +121,7 @@
 void FullCodeGenerator::Generate(CompilationInfo* info) {
   ASSERT(info_ == NULL);
   info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -121,13 +132,28 @@
   }
 #endif
 
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). ecx is zero for method calls and non-zero for
+  // function calls.
+  if (info->is_strict_mode() || info->is_native()) {
+    Label ok;
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(Operand(esp, receiver_offset),
+           Immediate(isolate()->factory()->undefined_value()));
+    __ bind(&ok);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
   __ push(edi);  // Callee's JS Function.
 
   { Comment cmnt(masm_, "[ Allocate locals");
-    int locals_count = scope()->num_stack_slots();
+    int locals_count = info->scope()->num_stack_slots();
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
     } else if (locals_count > 1) {
@@ -138,10 +164,15 @@
     }
   }
 
+  set_stack_height(2 + scope()->num_stack_slots());
+  if (FLAG_verify_stack_height) {
+    verify_stack_height();
+  }
+
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
@@ -150,7 +181,7 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in both eax and esi.  It replaces the context
@@ -158,16 +189,16 @@
     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
 
     // Copy parameters into context if necessary.
-    int num_parameters = scope()->num_parameters();
+    int num_parameters = info->scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ mov(eax, Operand(ebp, parameter_offset));
         // Store it in the context.
-        int context_offset = Context::SlotOffset(slot->index());
+        int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers all involved
         // registers, so we have use a third register to avoid
@@ -188,26 +219,28 @@
       __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     }
     // Receiver is just before the parameters on the caller's stack.
-    int offset = scope()->num_parameters() * kPointerSize;
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
     __ lea(edx,
            Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
     __ push(edx);
-    __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
-    // Arguments to ArgumentsAccessStub:
+    __ SafePush(Immediate(Smi::FromInt(num_parameters)));
+    // Arguments to ArgumentsAccessStub and/or New...:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub stub(
-        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
-                         : ArgumentsAccessStub::NEW_NON_STRICT);
+    ArgumentsAccessStub::Type type;
+    if (is_strict_mode()) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+    }
+    ArgumentsAccessStub stub(type);
     __ CallStub(&stub);
 
-    Variable* arguments_shadow = scope()->arguments_shadow();
-    if (arguments_shadow != NULL) {
-      __ mov(ecx, eax);  // Duplicate result.
-      Move(arguments_shadow->AsSlot(), ecx, ebx, edx);
-    }
-    Move(arguments->AsSlot(), eax, ebx, edx);
+    SetVar(arguments, eax, ebx, edx);
   }
 
   if (FLAG_trace) {
@@ -221,22 +254,24 @@
     scope()->VisitIllegalRedeclaration(this);
 
   } else {
+    PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
     { Comment cmnt(masm_, "[ Declarations");
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
-      NearLabel ok;
+      PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+      Label ok;
       ExternalReference stack_limit =
           ExternalReference::address_of_stack_limit(isolate());
       __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, &ok, taken);
+      __ j(above_equal, &ok, Label::kNear);
       StackCheckStub stub;
       __ CallStub(&stub);
       __ bind(&ok);
@@ -265,11 +300,11 @@
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
-  NearLabel ok;
+  Label ok;
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, taken);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
@@ -316,7 +351,7 @@
     __ mov(esp, ebp);
     __ pop(ebp);
 
-    int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+    int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
     __ Ret(arguments_bytes, ecx);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Check that the size of the code used for returning is large enough
@@ -328,28 +363,40 @@
 }
 
 
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+void FullCodeGenerator::verify_stack_height() {
+  ASSERT(FLAG_verify_stack_height);
+  __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
+  __ cmp(ebp, Operand(esp));
+  __ Assert(equal, "Full codegen stack height not as expected.");
+  __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
 }
 
 
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
-  __ mov(result_register(), slot_operand);
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
-  __ push(slot_operand);
+  __ push(operand);
+  codegen()->increment_stack_height();
 }
 
 
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
-  codegen()->Move(result_register(), slot);
+  codegen()->GetVar(result_register(), var);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -395,6 +442,7 @@
   } else {
     __ push(Immediate(lit));
   }
+  codegen()->increment_stack_height();
 }
 
 
@@ -423,7 +471,7 @@
   } else {
     // For simplicity we always test the accumulator register.
     __ mov(result_register(), lit);
-    codegen()->DoTest(true_label_, false_label_, fall_through_);
+    codegen()->DoTest(this);
   }
 }
 
@@ -432,6 +480,7 @@
                                                    Register reg) const {
   ASSERT(count > 0);
   __ Drop(count);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -441,6 +490,7 @@
   ASSERT(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -449,6 +499,7 @@
   ASSERT(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
+  codegen()->decrement_stack_height(count - 1);
 }
 
 
@@ -459,7 +510,8 @@
   __ Drop(count);
   __ Move(result_register(), reg);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -473,10 +525,10 @@
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ mov(result_register(), isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ mov(result_register(), isolate()->factory()->false_value());
   __ bind(&done);
@@ -486,13 +538,14 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ push(Immediate(isolate()->factory()->true_value()));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
+  codegen()->increment_stack_height();
 }
 
 
@@ -520,6 +573,7 @@
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
   __ push(Immediate(value));
+  codegen()->increment_stack_height();
 }
 
 
@@ -536,28 +590,14 @@
 }
 
 
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+                               Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  // Emit the inlined tests assumed by the stub.
-  __ cmp(result_register(), isolate()->factory()->undefined_value());
-  __ j(equal, if_false);
-  __ cmp(result_register(), isolate()->factory()->true_value());
-  __ j(equal, if_true);
-  __ cmp(result_register(), isolate()->factory()->false_value());
-  __ j(equal, if_false);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ test(result_register(), Operand(result_register()));
-  __ j(zero, if_false);
-  __ test(result_register(), Immediate(kSmiTagMask));
-  __ j(zero, if_true);
-
-  // Call the ToBoolean stub for all other cases.
-  ToBooleanStub stub;
+  ToBooleanStub stub(result_register());
   __ push(result_register());
-  __ CallStub(&stub);
-  __ test(eax, Operand(eax));
-
+  __ CallStub(&stub, condition->test_id());
+  __ test(result_register(), Operand(result_register()));
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -578,44 +618,54 @@
 }
 
 
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-    case Slot::LOCAL:
-      return Operand(ebp, SlotOffset(slot));
-    case Slot::CONTEXT: {
-      int context_chain_length =
-          scope()->ContextChainLength(slot->var()->scope());
-      __ LoadContext(scratch, context_chain_length);
-      return ContextOperand(scratch, slot->index());
-    }
-    case Slot::LOOKUP:
-      UNREACHABLE();
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  ASSERT(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
   }
-  UNREACHABLE();
-  return Operand(eax, 0);
+  return Operand(ebp, offset);
 }
 
 
-void FullCodeGenerator::Move(Register destination, Slot* source) {
-  MemOperand location = EmitSlotSearch(source, destination);
-  __ mov(destination, location);
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
 }
 
 
-void FullCodeGenerator::Move(Slot* dst,
-                             Register src,
-                             Register scratch1,
-                             Register scratch2) {
-  ASSERT(dst->type() != Slot::LOOKUP);  // Not yet implemented.
-  ASSERT(!scratch1.is(src) && !scratch2.is(src));
-  MemOperand location = EmitSlotSearch(dst, scratch1);
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  MemOperand location = VarOperand(var, dest);
+  __ mov(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+                               Register src,
+                               Register scratch0,
+                               Register scratch1) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  ASSERT(!scratch0.is(src));
+  ASSERT(!scratch0.is(scratch1));
+  ASSERT(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
   // Emit the write barrier code if the location is in the heap.
-  if (dst->type() == Slot::CONTEXT) {
-    int offset = Context::SlotOffset(dst->index());
-    ASSERT(!scratch1.is(esi) && !src.is(esi) && !scratch2.is(esi));
-    __ RecordWrite(scratch1, offset, src, scratch2);
+  if (var->IsContextSlot()) {
+    int offset = Context::SlotOffset(var->index());
+    ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
+    __ RecordWrite(scratch0, offset, src, scratch1);
   }
 }
 
@@ -629,8 +679,8 @@
   // preparation to avoid preparing with the same AST id twice.
   if (!context()->IsTest() || !info_->IsOptimizable()) return;
 
-  NearLabel skip;
-  if (should_normalize) __ jmp(&skip);
+  Label skip;
+  if (should_normalize) __ jmp(&skip, Label::kNear);
 
   ForwardBailoutStack* current = forward_bailout_stack_;
   while (current != NULL) {
@@ -646,120 +696,102 @@
 }
 
 
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
+void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
                                         Variable::Mode mode,
-                                        FunctionLiteral* function) {
-  Comment cmnt(masm_, "[ Declaration");
-  ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
+                                        FunctionLiteral* function,
+                                        int* global_count) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      ++(*global_count);
+      break;
 
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ mov(Operand(ebp, SlotOffset(slot)),
-                 Immediate(isolate()->factory()->the_hole_value()));
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ mov(Operand(ebp, SlotOffset(slot)), result_register());
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a 'with'.
-          __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
-          __ cmp(ebx, Operand(esi));
-          __ Check(equal, "Unexpected declaration in current context.");
-        }
-        if (mode == Variable::CONST) {
-          __ mov(ContextOperand(esi, slot->index()),
-                 Immediate(isolate()->factory()->the_hole_value()));
-          // No write barrier since the hole value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ mov(ContextOperand(esi, slot->index()), result_register());
-          int offset = Context::SlotOffset(slot->index());
-          __ mov(ebx, esi);
-          __ RecordWrite(ebx, offset, result_register(), ecx);
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ push(esi);
-        __ push(Immediate(variable->name()));
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR || mode == Variable::CONST);
-        PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
-        __ push(Immediate(Smi::FromInt(attr)));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ push(Immediate(isolate()->factory()->the_hole_value()));
-        } else if (function != NULL) {
-          VisitForStackValue(function);
-        } else {
-          __ push(Immediate(Smi::FromInt(0)));  // No initial value!
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
-      }
-    }
-
-  } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We cannot
-      // visit the rewrite because it's shared and we risk recording
-      // duplicate AST IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
-      }
-
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
       if (function != NULL) {
-        __ push(eax);
+        Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
-        __ pop(edx);
-      } else {
-        __ mov(edx, eax);
-        __ mov(eax, isolate()->factory()->the_hole_value());
+        __ mov(StackOperand(variable), result_register());
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ mov(StackOperand(variable),
+               Immediate(isolate()->factory()->the_hole_value()));
       }
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+      break;
 
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    case Variable::CONTEXT:
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+        __ cmp(ebx, isolate()->factory()->with_context_map());
+        __ Check(not_equal, "Declaration in with context.");
+        __ cmp(ebx, isolate()->factory()->catch_context_map());
+        __ Check(not_equal, "Declaration in catch context.");
+      }
+      if (function != NULL) {
+        Comment cmnt(masm_, "[ Declaration");
+        VisitForAccumulatorValue(function);
+        __ mov(ContextOperand(esi, variable->index()), result_register());
+        int offset = Context::SlotOffset(variable->index());
+        __ mov(ebx, esi);
+        __ RecordWrite(ebx, offset, result_register(), ecx);
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ mov(ContextOperand(esi, variable->index()),
+               Immediate(isolate()->factory()->the_hole_value()));
+        // No write barrier since the hole value is in old space.
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      }
+      break;
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Declaration");
+      __ push(esi);
+      __ push(Immediate(variable->name()));
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ push(Immediate(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      increment_stack_height(3);
+      if (function != NULL) {
+        VisitForStackValue(function);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        __ push(Immediate(isolate()->factory()->the_hole_value()));
+        increment_stack_height();
+      } else {
+        __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
+        increment_stack_height();
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      decrement_stack_height(4);
+      break;
     }
   }
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(pairs));
-  __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-  __ CallRuntime(Runtime::kDeclareGlobals, 4);
+  __ push(Immediate(Smi::FromInt(DeclareGlobalsFlags())));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -769,6 +801,7 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
 
+  int switch_clause_stack_height = stack_height();
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -800,10 +833,10 @@
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
     JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
-      NearLabel slow_case;
+      Label slow_case;
       __ mov(ecx, edx);
       __ or_(ecx, Operand(eax));
-      patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+      patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
@@ -815,7 +848,8 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    patch_site.EmitPatchInfo();
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -827,11 +861,12 @@
   __ bind(&next_test);
   __ Drop(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
-    __ jmp(nested_statement.break_target());
+    __ jmp(nested_statement.break_label());
   } else {
     __ jmp(default_clause->body_target());
   }
 
+  set_stack_height(switch_clause_stack_height);
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
@@ -841,7 +876,7 @@
     VisitStatements(clause->statements());
   }
 
-  __ bind(nested_statement.break_target());
+  __ bind(nested_statement.break_label());
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
@@ -864,16 +899,16 @@
   __ j(equal, &exit);
 
   // Convert the object to a JS object.
-  NearLabel convert, done_convert;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &convert);
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  __ j(above_equal, &done_convert);
+  Label convert, done_convert;
+  __ JumpIfSmi(eax, &convert, Label::kNear);
+  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+  __ j(above_equal, &done_convert, Label::kNear);
   __ bind(&convert);
   __ push(eax);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(eax);
+  increment_stack_height();
 
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -893,21 +928,19 @@
   // check for an enum cache.  Leave the map in ebx for the subsequent
   // prototype load.
   __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
-  __ cmp(edx, isolate()->factory()->empty_descriptor_array());
-  __ j(equal, &call_runtime);
+  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(edx, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (edx).  This is the case if the next enumeration
   // index field does not contain a smi.
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &call_runtime);
+  __ JumpIfSmi(edx, &call_runtime);
 
   // For all objects but the receiver, check that the cache is empty.
-  NearLabel check_prototype;
+  Label check_prototype;
   __ cmp(ecx, Operand(eax));
-  __ j(equal, &check_prototype);
+  __ j(equal, &check_prototype, Label::kNear);
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmp(edx, isolate()->factory()->empty_fixed_array());
   __ j(not_equal, &call_runtime);
@@ -920,9 +953,9 @@
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
-  NearLabel use_cache;
+  Label use_cache;
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ jmp(&use_cache);
+  __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
@@ -932,14 +965,14 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  NearLabel fixed_array;
+  Label fixed_array;
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  __ j(not_equal, &fixed_array);
+  __ j(not_equal, &fixed_array, Label::kNear);
 
   // We got a map in register eax. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(eax, ecx);
   __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
   __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -959,11 +992,13 @@
   __ push(eax);  // Fixed array length (as smi).
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
+  // 1 ~ The object has already been pushed.
+  increment_stack_height(ForIn::kElementCount - 1);
   // Generate code for doing the condition check.
   __ bind(&loop);
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
   __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
-  __ j(above_equal, loop_statement.break_target());
+  __ j(above_equal, loop_statement.break_label());
 
   // Get the current entry of the array into register ebx.
   __ mov(ebx, Operand(esp, 2 * kPointerSize));
@@ -975,10 +1010,10 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  NearLabel update_each;
+  Label update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ j(equal, &update_each);
+  __ j(equal, &update_each, Label::kNear);
 
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
@@ -987,7 +1022,7 @@
   __ push(ebx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
   __ test(eax, Operand(eax));
-  __ j(equal, loop_statement.continue_target());
+  __ j(equal, loop_statement.continue_label());
   __ mov(ebx, Operand(eax));
 
   // Update the 'each' property or variable from the possibly filtered
@@ -1004,16 +1039,17 @@
 
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
 
   EmitStackCheck(stmt);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   __ add(Operand(esp), Immediate(5 * kPointerSize));
 
+  decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
   __ bind(&exit);
   decrement_loop_depth();
@@ -1050,14 +1086,13 @@
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var());
+  EmitVariableLoad(expr);
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
   Register context = esi;
   Register temp = edx;
 
@@ -1071,8 +1106,7 @@
         __ j(not_equal, slow);
       }
       // Load next context in chain.
-      __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
       // Walk the rest of the chain without clobbering esi.
       context = temp;
     }
@@ -1086,7 +1120,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    NearLabel next, fast;
+    Label next, fast;
     if (!context.is(temp)) {
       __ mov(temp, context);
     }
@@ -1094,13 +1128,12 @@
     // Terminate at global context.
     __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
            Immediate(isolate()->factory()->global_context_map()));
-    __ j(equal, &fast);
+    __ j(equal, &fast, Label::kNear);
     // Check that extension is NULL.
     __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
     __ j(not_equal, slow);
     // Load next context in chain.
-    __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
-    __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+    __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
     __ jmp(&next);
     __ bind(&fast);
   }
@@ -1108,23 +1141,22 @@
   // All extension objects were empty and it is safe to use a global
   // load IC call.
   __ mov(eax, GlobalObjectOperand());
-  __ mov(ecx, slot->var()->name());
+  __ mov(ecx, var->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  EmitCallIC(ic, mode);
+  __ call(ic, mode);
 }
 
 
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Label* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  ASSERT(var->IsContextSlot());
   Register context = esi;
   Register temp = ebx;
 
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
         // Check that extension is NULL.
@@ -1132,8 +1164,7 @@
                Immediate(0));
         __ j(not_equal, slow);
       }
-      __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
       // Walk the rest of the chain without clobbering esi.
       context = temp;
     }
@@ -1145,152 +1176,102 @@
   // This function is used only for loads, not stores, so it's safe to
   // return an esi-based operand (the write barrier cannot be allowed to
   // destroy the esi register).
-  return ContextOperand(context, slot->index());
+  return ContextOperand(context, var->index());
 }
 
 
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow,
-    Label* done) {
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+                                                  TypeofState typeof_state,
+                                                  Label* slow,
+                                                  Label* done) {
   // Generate fast-case code for variables that might be shadowed by
   // eval-introduced variables.  Eval is used a lot without
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      __ mov(eax,
-             ContextSlotOperandCheckExtensions(potential_slot, slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ cmp(eax, isolate()->factory()->the_hole_value());
-        __ j(not_equal, done);
-        __ mov(eax, isolate()->factory()->undefined_value());
-      }
-      __ jmp(done);
-    } else if (rewrite != NULL) {
-      // Generate fast case for calls of an argument function.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          __ mov(edx,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   slow));
-          __ SafeSet(eax, Immediate(key_literal->handle()));
-          Handle<Code> ic =
-              isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
-          __ jmp(done);
-        }
-      }
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == Variable::CONST) {
+      __ cmp(eax, isolate()->factory()->the_hole_value());
+      __ j(not_equal, done);
+      __ mov(eax, isolate()->factory()->undefined_value());
     }
+    __ jmp(done);
   }
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  // Four cases: non-this global variables, lookup slots, all other
-  // types of slots, and parameters that rewrite to explicit property
-  // accesses on the arguments object.
-  Slot* slot = var->AsSlot();
-  Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
 
-  if (var->is_global() && !var->is_this()) {
-    Comment cmnt(masm_, "Global variable");
-    // Use inline caching. Variable name is passed in ecx and the global
-    // object on the stack.
-    __ mov(eax, GlobalObjectOperand());
-    __ mov(ecx, var->name());
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    context()->Plug(eax);
-
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    Label done, slow;
-
-    // Generate code for loading from variables potentially shadowed
-    // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    Comment cmnt(masm_, "Lookup slot");
-    __ push(esi);  // Context.
-    __ push(Immediate(var->name()));
-    __ CallRuntime(Runtime::kLoadContextSlot, 2);
-    __ bind(&done);
-
-    context()->Plug(eax);
-
-  } else if (slot != NULL) {
-    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
-                            ? "Context slot"
-                            : "Stack slot");
-    if (var->mode() == Variable::CONST) {
-      // Constants may be the hole value if they have not been initialized.
-      // Unhole them.
-      NearLabel done;
-      MemOperand slot_operand = EmitSlotSearch(slot, eax);
-      __ mov(eax, slot_operand);
-      __ cmp(eax, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &done);
-      __ mov(eax, isolate()->factory()->undefined_value());
-      __ bind(&done);
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "Global variable");
+      // Use inline caching. Variable name is passed in ecx and the global
+      // object in eax.
+      __ mov(eax, GlobalObjectOperand());
+      __ mov(ecx, var->name());
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(eax);
-    } else {
-      context()->Plug(slot);
+      break;
     }
 
-  } else {
-    Comment cmnt(masm_, "Rewritten parameter");
-    ASSERT_NOT_NULL(property);
-    // Rewritten parameter accesses are of the form "slot[literal]".
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot()
+                              ? "Context variable"
+                              : "Stack variable");
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        Label done;
+        GetVar(eax, var);
+        __ cmp(eax, isolate()->factory()->the_hole_value());
+        __ j(not_equal, &done, Label::kNear);
+        if (var->mode() == Variable::LET) {
+          __ push(Immediate(var->name()));
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+        } else {  // Variable::CONST
+          __ mov(eax, isolate()->factory()->undefined_value());
+        }
+        __ bind(&done);
+        context()->Plug(eax);
+      }
+      break;
+    }
 
-    // Assert that the object is in a slot.
-    Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
-    ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
-
-    // Load the object.
-    MemOperand object_loc = EmitSlotSearch(object_slot, eax);
-    __ mov(edx, object_loc);
-
-    // Assert that the key is a smi.
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    ASSERT(key_literal->handle()->IsSmi());
-
-    // Load the key.
-    __ SafeSet(eax, Immediate(key_literal->handle()));
-
-    // Do a keyed property load.
-    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
-    // Drop key and object left on the stack by IC.
-    context()->Plug(eax);
+    case Variable::LOOKUP: {
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      Comment cmnt(masm_, "Lookup variable");
+      __ push(esi);  // Context.
+      __ push(Immediate(var->name()));
+      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      __ bind(&done);
+      context()->Plug(eax);
+      break;
+    }
   }
 }
 
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
-  NearLabel materialized;
+  Label materialized;
   // Registers will be used as follows:
   // edi = JS function.
   // ecx = literals array.
@@ -1302,7 +1283,7 @@
       FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
   __ mov(ebx, FieldOperand(ecx, literal_offset));
   __ cmp(ebx, isolate()->factory()->undefined_value());
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in eax.
@@ -1379,6 +1360,7 @@
     if (!result_saved) {
       __ push(eax);  // Save result on the stack
       result_saved = true;
+      increment_stack_height();
     }
     switch (property->kind()) {
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1393,7 +1375,7 @@
             Handle<Code> ic = is_strict_mode()
                 ? isolate()->builtins()->StoreIC_Initialize_Strict()
                 : isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1403,6 +1385,7 @@
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
+        increment_stack_height();
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1411,16 +1394,20 @@
         } else {
           __ Drop(3);
         }
+        decrement_stack_height(3);
         break;
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
         __ push(Operand(esp, 0));  // Duplicate receiver.
+        increment_stack_height();
         VisitForStackValue(key);
         __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
                           Smi::FromInt(1) :
                           Smi::FromInt(0)));
+        increment_stack_height();
         VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
+        decrement_stack_height(4);
         break;
       default: UNREACHABLE();
     }
@@ -1483,6 +1470,7 @@
     if (!result_saved) {
       __ push(eax);
       result_saved = true;
+      increment_stack_height();
     }
     VisitForAccumulatorValue(subexpr);
 
@@ -1511,12 +1499,14 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // on the left-hand side.
   if (!expr->target()->IsValidLeftHandSide()) {
-    VisitForEffect(expr->target());
+    ASSERT(expr->target()->AsThrow() != NULL);
+    VisitInCurrentContext(expr->target());  // Throw does not plug the context
+    context()->Plug(eax);
     return;
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* property = expr->target()->AsProperty();
@@ -1536,35 +1526,21 @@
         // We need the receiver both on the stack and in the accumulator.
         VisitForAccumulatorValue(property->obj());
         __ push(result_register());
+        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
       }
       break;
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          MemOperand slot_operand =
-              EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
-          __ push(slot_operand);
-          __ SafeSet(eax, Immediate(property->key()->AsLiteral()->handle()));
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForAccumulatorValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
+        increment_stack_height();
       } else {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          MemOperand slot_operand =
-              EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
-          __ push(slot_operand);
-          __ SafePush(Immediate(property->key()->AsLiteral()->handle()));
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForStackValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
     }
@@ -1573,10 +1549,11 @@
   // For compound assignments we need another deoptimization point after the
   // variable/property load.
   if (expr->is_compound()) {
-    { AccumulatorValueContext context(this);
+    AccumulatorValueContext result_context(this);
+    { AccumulatorValueContext left_operand_context(this);
       switch (assign_type) {
         case VARIABLE:
-          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          EmitVariableLoad(expr->target()->AsVariableProxy());
           PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
@@ -1592,21 +1569,21 @@
 
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
+    increment_stack_height();
     VisitForAccumulatorValue(expr->value());
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
         ? OVERWRITE_RIGHT
         : NO_OVERWRITE;
     SetSourcePosition(expr->position() + 1);
-    AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
 
     // Deoptimization point in case the binary operation may have side effects.
@@ -1642,36 +1619,38 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left,
                                               Expression* right) {
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
-  NearLabel done, smi_case, stub_call;
+  Label smi_case, done, stub_call;
   __ pop(edx);
+  decrement_stack_height();
   __ mov(ecx, eax);
   __ or_(eax, Operand(edx));
   JumpPatchSite patch_site(masm_);
-  patch_site.EmitJumpIfSmi(eax, &smi_case);
+  patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
-  __ jmp(&done);
+  BinaryOpStub stub(op, mode);
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
+  __ jmp(&done, Label::kNear);
 
   // Smi case.
   __ bind(&smi_case);
@@ -1724,7 +1703,7 @@
       __ imul(eax, Operand(ecx));
       __ j(overflow, &stub_call);
       __ test(eax, Operand(eax));
-      __ j(not_zero, &done, taken);
+      __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
       __ or_(ebx, Operand(ecx));
       __ j(negative, &stub_call);
@@ -1748,11 +1727,15 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
+  decrement_stack_height();
+  BinaryOpStub stub(op, mode);
+  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
   context()->Plug(eax);
 }
 
@@ -1761,12 +1744,14 @@
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
-    VisitForEffect(expr);
+    ASSERT(expr->AsThrow() != NULL);
+    VisitInCurrentContext(expr);  // Throw does not plug the context
+    context()->Plug(eax);
     return;
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->AsProperty();
@@ -1785,37 +1770,32 @@
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
+      increment_stack_height();
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
+      decrement_stack_height();
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      if (prop->is_synthetic()) {
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        { AccumulatorValueContext for_object(this);
-          EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
-        }
-        __ mov(edx, eax);
-        __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-        __ mov(ecx, eax);
-        __ pop(edx);
-      }
+      increment_stack_height();
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ mov(ecx, eax);
+      __ pop(edx);
+      decrement_stack_height();
       __ pop(eax);  // Restore value.
+      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic);
       break;
     }
   }
@@ -1826,94 +1806,89 @@
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  // Left-hand sides that rewrite to explicit property accesses do not reach
-  // here.
-  ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->AsSlot() != NULL);
-
-  if (var->is_global()) {
-    ASSERT(!var->is_this());
-    // Assignment to a global variable.  Use inline caching for the
-    // assignment.  Right-hand-side value is passed in eax, variable name in
-    // ecx, and the global object on the stack.
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
     __ mov(ecx, var->name());
     __ mov(edx, GlobalObjectOperand());
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
-    // Like var declarations, const declarations are hoisted to function
-    // scope.  However, unlike var initializers, const initializers are able
-    // to drill a hole to that function context, even from inside a 'with'
-    // context.  We thus bypass the normal static scope lookup.
-    Slot* slot = var->AsSlot();
-    Label skip;
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-        // No const parameters.
-        UNREACHABLE();
-        break;
-      case Slot::LOCAL:
-        __ mov(edx, Operand(ebp, SlotOffset(slot)));
-        __ cmp(edx, isolate()->factory()->the_hole_value());
-        __ j(not_equal, &skip);
-        __ mov(Operand(ebp, SlotOffset(slot)), eax);
-        break;
-      case Slot::CONTEXT: {
-        __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
-        __ mov(edx, ContextOperand(ecx, slot->index()));
-        __ cmp(edx, isolate()->factory()->the_hole_value());
-        __ j(not_equal, &skip);
-        __ mov(ContextOperand(ecx, slot->index()), eax);
-        int offset = Context::SlotOffset(slot->index());
-        __ mov(edx, eax);  // Preserve the stored value in eax.
-        __ RecordWrite(ecx, offset, edx, ebx);
-        break;
-      }
-      case Slot::LOOKUP:
-        __ push(eax);
-        __ push(esi);
-        __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-        break;
+    // Const initializers need a write barrier.
+    ASSERT(!var->IsParameter());  // No const parameters.
+    if (var->IsStackLocal()) {
+      Label skip;
+      __ mov(edx, StackOperand(var));
+      __ cmp(edx, isolate()->factory()->the_hole_value());
+      __ j(not_equal, &skip);
+      __ mov(StackOperand(var), eax);
+      __ bind(&skip);
+    } else {
+      ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+      // Like var declarations, const declarations are hoisted to function
+      // scope.  However, unlike var initializers, const initializers are
+      // able to drill a hole to that function context, even from inside a
+      // 'with' context.  We thus bypass the normal static scope lookup for
+      // var->IsContextSlot().
+      __ push(eax);
+      __ push(esi);
+      __ push(Immediate(var->name()));
+      __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
-    __ bind(&skip);
+
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    if (var->IsLookupSlot()) {
+      __ push(eax);  // Value.
+      __ push(esi);  // Context.
+      __ push(Immediate(var->name()));
+      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
+    } else {
+      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      Label assign;
+      MemOperand location = VarOperand(var, ecx);
+      __ mov(edx, location);
+      __ cmp(edx, isolate()->factory()->the_hole_value());
+      __ j(not_equal, &assign, Label::kNear);
+      __ push(Immediate(var->name()));
+      __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      __ bind(&assign);
+      __ mov(location, eax);
+      if (var->IsContextSlot()) {
+        __ mov(edx, eax);
+        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+      }
+    }
 
   } else if (var->mode() != Variable::CONST) {
-    // Perform the assignment for non-const variables.  Const assignments
-    // are simply skipped.
-    Slot* slot = var->AsSlot();
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        // Perform the assignment.
-        __ mov(Operand(ebp, SlotOffset(slot)), eax);
-        break;
-
-      case Slot::CONTEXT: {
-        MemOperand target = EmitSlotSearch(slot, ecx);
-        // Perform the assignment and issue the write barrier.
-        __ mov(target, eax);
-        // The value of the assignment is in eax.  RecordWrite clobbers its
-        // register arguments.
-        __ mov(edx, eax);
-        int offset = Context::SlotOffset(slot->index());
-        __ RecordWrite(ecx, offset, edx, ebx);
-        break;
+    // Assignment to var or initializing assignment to let.
+    if (var->IsStackAllocated() || var->IsContextSlot()) {
+      MemOperand location = VarOperand(var, ecx);
+      if (FLAG_debug_code && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ mov(edx, location);
+        __ cmp(edx, isolate()->factory()->the_hole_value());
+        __ Check(equal, "Let binding re-initialization.");
       }
-
-      case Slot::LOOKUP:
-        // Call the runtime for the assignment.
-        __ push(eax);  // Value.
-        __ push(esi);  // Context.
-        __ push(Immediate(var->name()));
-        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-        __ CallRuntime(Runtime::kStoreContextSlot, 4);
-        break;
+      // Perform the assignment.
+      __ mov(location, eax);
+      if (var->IsContextSlot()) {
+        __ mov(edx, eax);
+        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+      }
+    } else {
+      ASSERT(var->IsLookupSlot());
+      __ push(eax);  // Value.
+      __ push(esi);  // Context.
+      __ push(Immediate(var->name()));
+      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
+  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -1940,11 +1915,12 @@
     __ mov(edx, Operand(esp, 0));
   } else {
     __ pop(edx);
+    decrement_stack_height();
   }
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1953,6 +1929,7 @@
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
     __ Drop(1);
+    decrement_stack_height();
   }
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -1974,17 +1951,19 @@
   }
 
   __ pop(ecx);
+  decrement_stack_height();
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
     __ pop(edx);
+    decrement_stack_height();
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1993,6 +1972,7 @@
     __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
+    decrement_stack_height();
   }
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2012,6 +1992,7 @@
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
+    decrement_stack_height();
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2032,20 +2013,19 @@
   }
   // Record source position of the IC call.
   SetSourcePosition(expr->position());
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arg_count, in_loop);
-  EmitCallIC(ic, mode);
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+  __ call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2054,6 +2034,7 @@
   __ pop(ecx);
   __ push(eax);
   __ push(ecx);
+  increment_stack_height();
 
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2065,19 +2046,19 @@
   }
   // Record source position of the IC call.
   SetSourcePosition(expr->position());
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
-      arg_count, in_loop);
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2088,12 +2069,13 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);
 }
 
@@ -2108,10 +2090,15 @@
   }
 
   // Push the receiver of the enclosing function.
-  __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+  __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
 
-  // Push the strict mode flag.
-  __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ push(Immediate(Smi::FromInt(strict_mode)));
 
   __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
                  ? Runtime::kResolvePossiblyDirectEvalNoLookup
@@ -2127,36 +2114,35 @@
 #endif
 
   Comment cmnt(masm_, "[ Call");
-  Expression* fun = expr->expression();
-  Variable* var = fun->AsVariableProxy()->AsVariable();
+  Expression* callee = expr->expression();
+  VariableProxy* proxy = callee->AsVariableProxy();
+  Property* property = callee->AsProperty();
 
-  if (var != NULL && var->is_possibly_eval()) {
+  if (proxy != NULL && proxy->var()->is_possibly_eval()) {
     // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
+    // resolve the function we need to call and the receiver of the call.
+    // Then we call the resolved function using the given arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
     { PreservePositionScope pos_scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(callee);
       // Reserved receiver slot.
       __ push(Immediate(isolate()->factory()->undefined_value()));
-
+      increment_stack_height();
       // Push the arguments.
       for (int i = 0; i < arg_count; i++) {
         VisitForStackValue(args->at(i));
       }
 
       // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
+      // variables we attempt to load the global eval function directly in
+      // generated code. If we succeed, there is no need to perform a
       // context lookup in the runtime system.
       Label done;
-      if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
         Label slow;
-        EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
-                                          NOT_INSIDE_TYPEOF,
-                                          &slow);
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
         // Push the function and resolve eval.
         __ push(eax);
         EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
@@ -2164,13 +2150,11 @@
         __ bind(&slow);
       }
 
-      // Push copy of the function (found below the arguments) and
+      // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ push(Operand(esp, (arg_count + 1) * kPointerSize));
       EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      if (done.is_linked()) {
-        __ bind(&done);
-      }
+      __ bind(&done);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
@@ -2179,110 +2163,81 @@
     }
     // Record source position for debugger.
     SetSourcePosition(expr->position());
-    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    decrement_stack_height(arg_count + 1);  // Function is left on the stack.
     context()->DropAndPlug(1, eax);
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
+
+  } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ push(GlobalObjectOperand());
-    EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
+    increment_stack_height();
+    EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
-
     { PreservePositionScope scope(masm()->positions_recorder());
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                      NOT_INSIDE_TYPEOF,
-                                      &slow,
-                                      &done);
+      // Generate code for loading from variables potentially shadowed by
+      // eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
     }
-
     __ bind(&slow);
-    // Call the runtime to find the function to call (returned in eax)
-    // and the object holding it (returned in edx).
+    // Call the runtime to find the function to call (returned in eax) and
+    // the object holding it (returned in edx).
     __ push(context_register());
-    __ push(Immediate(var->name()));
+    __ push(Immediate(proxy->name()));
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
+    increment_stack_height(2);
 
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
+    // If fast case code has been generated, emit code to push the function
+    // and receiver and have the slow path jump around this code.
     if (done.is_linked()) {
       Label call;
-      __ jmp(&call);
+      __ jmp(&call, Label::kNear);
       __ bind(&done);
-      // Push function.
+      // Push function.  Stack height already incremented in slow case
+      // above.
       __ push(eax);
-      // Push global receiver.
-      __ mov(ebx, GlobalObjectOperand());
-      __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+      // The receiver is implicitly the global receiver. Indicate this by
+      // passing the hole to the call function stub.
+      __ push(Immediate(isolate()->factory()->the_hole_value()));
       __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
-  } else if (fun->AsProperty() != NULL) {
-    // Call to an object property.
-    Property* prop = fun->AsProperty();
-    Literal* key = prop->key()->AsLiteral();
-    if (key != NULL && key->handle()->IsSymbol()) {
-      // Call to a named property, use call IC.
-      { PreservePositionScope scope(masm()->positions_recorder());
-        VisitForStackValue(prop->obj());
-      }
-      EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
-    } else {
-      // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed EmitCallIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, edx);
-        __ mov(edx, operand);
+    // The receiver is either the global receiver or an object found by
+    // LoadContextSlot. That object could be the hole if the receiver is
+    // implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
 
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ mov(eax, prop->key()->AsLiteral()->handle());
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
-        // Push result (function).
-        __ push(eax);
-        // Push Global receiver.
-        __ mov(ecx, GlobalObjectOperand());
-        __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
-        EmitCallWithStub(expr);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
-      }
-    }
-  } else {
+  } else if (property != NULL) {
     { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(property->obj());
+    }
+    if (property->key()->IsPropertyName()) {
+      EmitCallWithIC(expr,
+                     property->key()->AsLiteral()->handle(),
+                     RelocInfo::CODE_TARGET);
+    } else {
+      EmitKeyedCallWithIC(expr, property->key());
+    }
+
+  } else {
+    // Call to an arbitrary expression not handled specially above.
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
     }
     // Load global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    increment_stack_height();
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2321,6 +2276,8 @@
   Handle<Code> construct_builtin =
       isolate()->builtins()->JSConstructCall();
   __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -2377,8 +2334,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, if_false);
+  __ JumpIfSmi(eax, if_false);
   __ cmp(eax, isolate()->factory()->null_value());
   __ j(equal, if_true);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -2387,9 +2343,9 @@
   __ test(ecx, Immediate(1 << Map::kIsUndetectable));
   __ j(not_zero, if_false);
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, if_false);
-  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
@@ -2409,9 +2365,8 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(equal, if_false);
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+  __ JumpIfSmi(eax, if_false);
+  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
@@ -2431,8 +2386,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, if_false);
+  __ JumpIfSmi(eax, if_false);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
@@ -2474,7 +2428,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(ebx, ebx);
   __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
   // ebx: descriptor array
   // ecx: length of descriptor array
@@ -2506,8 +2460,7 @@
   // If a valueOf property is not found on the object check that it's
   // prototype is the un-modified String prototype. If not result is false.
   __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, if_false);
+  __ JumpIfSmi(ecx, if_false);
   __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edx,
@@ -2539,8 +2492,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, if_false);
+  __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
@@ -2561,8 +2513,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(equal, if_false);
+  __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
@@ -2583,8 +2534,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(equal, if_false);
+  __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
@@ -2640,6 +2590,7 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
+  decrement_stack_height();
   __ cmp(eax, Operand(ebx));
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
@@ -2655,7 +2606,7 @@
   // parameter count in eax.
   VisitForAccumulatorValue(args->at(0));
   __ mov(edx, eax);
-  __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+  __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
   context()->Plug(eax);
@@ -2667,7 +2618,7 @@
 
   Label exit;
   // Get the number of formal parameters.
-  __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+  __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
 
   // Check if the calling frame is an arguments adaptor frame.
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2692,21 +2643,22 @@
   VisitForAccumulatorValue(args->at(0));
 
   // If the object is a smi, we return null.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &null);
+  __ JumpIfSmi(eax, &null);
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax);  // Map is now in eax.
+  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+  // Map is now in eax.
   __ j(below, &null);
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-  __ j(equal, &function);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ j(above_equal, &function);
 
   // Check if the constructor in the map is a function.
   __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
@@ -2749,13 +2701,12 @@
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
   ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
+    decrement_stack_height(2);
   }
-#endif
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, isolate()->factory()->undefined_value());
   context()->Plug(eax);
@@ -2793,7 +2744,7 @@
     __ movd(xmm1, Operand(ebx));
     __ movd(xmm0, Operand(eax));
     __ cvtss2sd(xmm1, xmm1);
-    __ pxor(xmm0, xmm1);
+    __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
     __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
   } else {
@@ -2820,6 +2771,7 @@
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
@@ -2833,6 +2785,7 @@
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
+  decrement_stack_height(4);
   context()->Plug(eax);
 }
 
@@ -2842,13 +2795,12 @@
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ JumpIfSmi(eax, &done, Label::kNear);
   // If the object is not a value type, return the object.
   __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -2868,6 +2820,7 @@
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
   }
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
@@ -2878,15 +2831,15 @@
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
+  decrement_stack_height();
 
-  NearLabel done;
+  Label done;
   // If the object is a smi, return the value.
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ JumpIfSmi(ebx, &done, Label::kNear);
 
   // If the object is not a value type, return the value.
   __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
@@ -2908,6 +2861,7 @@
 
   NumberToStringStub stub;
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -2942,6 +2896,7 @@
   Register result = edx;
 
   __ pop(object);
+  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
@@ -2990,6 +2945,7 @@
   Register result = eax;
 
   __ pop(object);
+  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3034,6 +2990,7 @@
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
@@ -3046,6 +3003,7 @@
 
   StringCompareStub stub;
   __ CallStub(&stub);
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
@@ -3057,6 +3015,7 @@
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3068,6 +3027,7 @@
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3079,6 +3039,7 @@
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3088,6 +3049,7 @@
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3095,18 +3057,19 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; ++i) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in edi. Move it in there.
-  if (!result_register().is(edi)) __ mov(edi, result_register());
+  // InvokeFunction requires the function in edi. Move it in there.
+  __ mov(edi, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(edi, count, CALL_FUNCTION);
+  __ InvokeFunction(edi, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -3119,6 +3082,7 @@
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
@@ -3156,8 +3120,7 @@
   __ mov(index_2, Operand(esp, 0));
   __ mov(temp, index_1);
   __ or_(temp, Operand(index_2));
-  __ test(temp, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow_case);
+  __ JumpIfNotSmi(temp, &slow_case);
 
   // Check that both indices are valid.
   __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
@@ -3193,6 +3156,7 @@
   __ CallRuntime(Runtime::kSwapElements, 3);
 
   __ bind(&done);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
@@ -3226,7 +3190,7 @@
 
   Label done, not_found;
   // tmp now holds finger offset as a smi.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
   __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
   __ j(not_equal, &not_found);
@@ -3262,8 +3226,7 @@
   // Fail if either is a non-HeapObject.
   __ mov(tmp, left);
   __ and_(Operand(tmp), right);
-  __ test(Operand(tmp), Immediate(kSmiTagMask));
-  __ j(zero, &fail);
+  __ JumpIfSmi(tmp, &fail);
   __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
   __ j(not_equal, &fail);
@@ -3279,6 +3242,7 @@
   __ mov(eax, Immediate(isolate()->factory()->true_value()));
   __ bind(&done);
 
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3355,15 +3319,12 @@
   __ sub(Operand(esp), Immediate(2 * kPointerSize));
   __ cld();
   // Check that the array is a JSArray
-  __ test(array, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
+  __ JumpIfSmi(array, &bailout);
   __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
   __ j(not_equal, &bailout);
 
   // Check that the array has fast elements.
-  __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
-            1 << Map::kHasFastElements);
-  __ j(zero, &bailout);
+  __ CheckFastElements(scratch, &bailout);
 
   // If the array has length zero, return the empty string.
   __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
@@ -3399,8 +3360,7 @@
                               index,
                               times_pointer_size,
                               FixedArray::kHeaderSize));
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
+  __ JumpIfSmi(string, &bailout);
   __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, Immediate(
@@ -3433,8 +3393,7 @@
 
   // Check that the separator is a flat ASCII string.
   __ mov(string, separator_operand);
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
+  __ JumpIfSmi(string, &bailout);
   __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, Immediate(
@@ -3587,6 +3546,7 @@
   __ add(Operand(esp), Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3606,6 +3566,7 @@
     // Prepare for calling JS runtime function.
     __ mov(eax, GlobalObjectOperand());
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+    increment_stack_height();
   }
 
   // Push the arguments ("left-to-right").
@@ -3617,16 +3578,21 @@
   if (expr->is_jsruntime()) {
     // Call the JS runtime function via a call IC.
     __ Set(ecx, Immediate(expr->name()));
-    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-        arg_count, in_loop);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+    Handle<Code> ic =
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+    __ call(ic, mode, expr->id());
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
+  decrement_stack_height(arg_count);
+  if (expr->is_jsruntime()) {
+    decrement_stack_height();
+  }
+
   context()->Plug(eax);
 }
 
@@ -3635,36 +3601,32 @@
   switch (expr->op()) {
     case Token::DELETE: {
       Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
-      Property* prop = expr->expression()->AsProperty();
-      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
 
-      if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(eax);
-        }
-      } else if (var != NULL) {
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        decrement_stack_height(2);
+        context()->Plug(eax);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
-        // but "delete this" is.
+        // but "delete this" is allowed.
         ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
-        if (var->is_global()) {
+        if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
           __ push(Immediate(Smi::FromInt(kNonStrictMode)));
           __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
           context()->Plug(eax);
-        } else if (var->AsSlot() != NULL &&
-                   var->AsSlot()->type() != Slot::LOOKUP) {
-          // Result of deleting non-global, non-dynamic variables is false.
-          // The subexpression does not have side effects.
-          context()->Plug(false);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global variables is false.  'this' is
+          // not really a variable, though we implement it as one.  The
+          // subexpression does not have side effects.
+          context()->Plug(var->is_this());
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
@@ -3717,6 +3679,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ CallRuntime(Runtime::kTypeof, 1);
+      decrement_stack_height();
       context()->Plug(eax);
       break;
     }
@@ -3725,8 +3688,7 @@
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
       VisitForAccumulatorValue(expr->expression());
       Label no_conversion;
-      __ test(result_register(), Immediate(kSmiTagMask));
-      __ j(zero, &no_conversion);
+      __ JumpIfSmi(result_register(), &no_conversion);
       ToNumberStub convert_stub;
       __ CallStub(&convert_stub);
       __ bind(&no_conversion);
@@ -3734,48 +3696,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register eax.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(eax);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register eax.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_case) {
-        NearLabel call_stub;
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(not_zero, &call_stub);
-        __ lea(eax, Operand(eax, kSmiTagMask));
-        __ not_(eax);
-        __ jmp(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      UnaryOpFlags flags = inline_smi_case
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(eax);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3783,6 +3710,22 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register eax.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  context()->Plug(eax);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3790,12 +3733,15 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // as the left-hand side.
   if (!expr->expression()->IsValidLeftHandSide()) {
-    VisitForEffect(expr->expression());
+    ASSERT(expr->expression()->AsThrow() != NULL);
+    VisitInCurrentContext(expr->expression());
+    // Visiting Throw does not plug the context.
+    context()->Plug(eax);
     return;
   }
 
   // Expression can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->expression()->AsProperty();
@@ -3810,30 +3756,25 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
+      increment_stack_height();
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
+      increment_stack_height();
       EmitNamedPropertyLoad(prop);
     } else {
-      if (prop->is_arguments_access()) {
-        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
-        MemOperand slot_operand =
-            EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
-        __ push(slot_operand);
-        __ SafeSet(eax, Immediate(prop->key()->AsLiteral()->handle()));
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
+      increment_stack_height();
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -3847,10 +3788,9 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  NearLabel no_conversion;
+  Label no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &no_conversion);
+    __ JumpIfSmi(eax, &no_conversion, Label::kNear);
   }
   ToNumberStub convert_stub;
   __ CallStub(&convert_stub);
@@ -3865,6 +3805,7 @@
       switch (assign_type) {
         case VARIABLE:
           __ push(eax);
+          increment_stack_height();
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -3877,7 +3818,7 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call, done;
+  Label done, stub_call;
   JumpPatchSite patch_site(masm_);
 
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3886,10 +3827,10 @@
     } else {
       __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
     }
-    __ j(overflow, &stub_call);
+    __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    patch_site.EmitJumpIfSmi(eax, &done);
+    patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
 
     __ bind(&stub_call);
     // Call stub. Undo operation first.
@@ -3906,8 +3847,9 @@
   // Call stub for +1/-1.
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  patch_site.EmitPatchInfo();
   __ bind(&done);
 
   // Store the value returned in eax.
@@ -3937,10 +3879,11 @@
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
+      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3954,10 +3897,12 @@
     case KEYED_PROPERTY: {
       __ pop(ecx);
       __ pop(edx);
+      decrement_stack_height();
+      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
@@ -3978,25 +3923,22 @@
   ASSERT(!context()->IsEffect());
   ASSERT(!context()->IsTest());
 
-  if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
     __ mov(eax, GlobalObjectOperand());
     __ mov(ecx, Immediate(proxy->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    __ call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
-  } else if (proxy != NULL &&
-             proxy->var()->AsSlot() != NULL &&
-             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->AsSlot();
-    EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ push(esi);
@@ -4008,30 +3950,18 @@
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    context()->HandleExpression(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
-                                          Expression* left,
-                                          Expression* right,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
-  // Check for the pattern: typeof <expression> == <string literal>.
-  Literal* right_literal = right->AsLiteral();
-  if (right_literal == NULL) return false;
-  Handle<Object> right_literal_value = right_literal->handle();
-  if (!right_literal_value->IsString()) return false;
-  UnaryOperation* left_unary = left->AsUnaryOperation();
-  if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
-  Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(left_unary->expression());
+    VisitForTypeofValue(expr);
   }
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
@@ -4053,6 +3983,10 @@
     __ j(equal, if_true);
     __ cmp(eax, isolate()->factory()->false_value());
     Split(equal, if_true, if_false, fall_through);
+  } else if (FLAG_harmony_typeof &&
+             check->Equals(isolate()->heap()->null_symbol())) {
+    __ cmp(eax, isolate()->factory()->null_value());
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
     __ cmp(eax, isolate()->factory()->undefined_value());
     __ j(equal, if_true);
@@ -4064,16 +3998,18 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx);
+    __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
     Split(above_equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    __ cmp(eax, isolate()->factory()->null_value());
-    __ j(equal, if_true);
-    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx);
+    if (!FLAG_harmony_typeof) {
+      __ cmp(eax, isolate()->factory()->null_value());
+      __ j(equal, if_true);
+    }
+    __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
     __ j(below, if_false);
-    __ CmpInstanceType(edx, FIRST_FUNCTION_CLASS_TYPE);
-    __ j(above_equal, if_false);
+    __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ j(above, if_false);
     // Check for undetectable objects => false.
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
@@ -4081,8 +4017,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
+}
 
-  return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  Split(equal, if_true, if_false, fall_through);
 }
 
 
@@ -4102,19 +4048,18 @@
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
-  Token::Value op = expr->op();
-  Expression* left = expr->left();
-  Expression* right = expr->right();
-  if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
     context()->Plug(if_true, if_false);
     return;
   }
 
+  Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (expr->op()) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      decrement_stack_height(2);
       PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -4124,6 +4069,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
+      decrement_stack_height(2);
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       // The stub returns 0 for true.
@@ -4134,11 +4080,8 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = no_condition;
-      bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
-          strict = true;
-          // Fall through
         case Token::EQ:
           cc = equal;
           __ pop(edx);
@@ -4168,14 +4111,15 @@
         default:
           UNREACHABLE();
       }
+      decrement_stack_height();
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
-        NearLabel slow_case;
+        Label slow_case;
         __ mov(ecx, Operand(edx));
         __ or_(ecx, Operand(eax));
-        patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+        patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
         __ cmp(edx, Operand(eax));
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
@@ -4184,7 +4128,8 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      patch_site.EmitPatchInfo();
 
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
@@ -4216,8 +4161,7 @@
     __ j(equal, if_true);
     __ cmp(eax, isolate()->factory()->undefined_value());
     __ j(equal, if_true);
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, if_false);
+    __ JumpIfSmi(eax, if_false);
     // It can be an undetectable object.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
     __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
@@ -4244,78 +4188,6 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  ASSERT(mode == RelocInfo::CODE_TARGET ||
-         mode == RelocInfo::CODE_TARGET_CONTEXT);
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
-    default:
-      break;
-  }
-
-  __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
-  Counters* counters = isolate()->counters();
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(counters->named_load_full(), 1);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(counters->keyed_load_full(), 1);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(counters->named_store_full(), 1);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(counters->keyed_store_full(), 1);
-    default:
-      break;
-  }
-
-  __ call(ic, RelocInfo::CODE_TARGET);
-  if (patch_site != NULL && patch_site->is_bound()) {
-    patch_site->EmitPatchInfo();
-  } else {
-    __ nop();  // Signals no inlined code.
-  }
-}
-
-
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
@@ -4327,18 +4199,38 @@
 }
 
 
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_global_scope()) {
+    // Contexts nested in the global context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ push(Immediate(Smi::FromInt(0)));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts nested inside eval code have the same closure as the context
+    // calling eval, not the anonymous closure containing the eval code.
+    // Fetch it from the context.
+    __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+  } else {
+    ASSERT(declaration_scope->is_function_scope());
+    __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+}
+
+
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
-  __ mov(edx, Operand(esp, 0));
+  __ pop(edx);
   __ sub(Operand(edx), Immediate(masm_->CodeObject()));
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
-  ASSERT_EQ(0, kSmiTag);
-  __ add(edx, Operand(edx));  // Convert to smi.
-  __ mov(Operand(esp, 0), edx);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ SmiTag(edx);
+  __ push(edx);
   // Store result register while executing finally block.
   __ push(result_register());
 }
@@ -4346,15 +4238,40 @@
 
 void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(edx));
-  // Restore result register from stack.
   __ pop(result_register());
   // Uncook return address.
-  __ mov(edx, Operand(esp, 0));
-  __ sar(edx, 1);  // Convert smi to int.
+  __ pop(edx);
+  __ SmiUntag(edx);
   __ add(Operand(edx), Immediate(masm_->CodeObject()));
-  __ mov(Operand(esp, 0), edx);
-  // And return.
-  __ ret(0);
+  __ jmp(Operand(edx));
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth,
+    int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
+    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+  }
+  __ PopTryHandler();
+  __ call(finally_entry_);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
 }
 
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index b7af03c..9b5cc56 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -50,11 +50,11 @@
   // Register usage:
   //   type: holds the receiver instance type on entry.
   __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
   __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
   __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
 }
 
 
@@ -72,17 +72,16 @@
   //   r1: used to hold receivers map.
 
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the receiver is a valid JS object.
   __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r0, FIRST_JS_OBJECT_TYPE);
-  __ j(below, miss, not_taken);
+  __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
+  __ j(below, miss);
 
   // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
 
   GenerateGlobalInstanceTypeCheck(masm, r0, miss);
 
@@ -90,68 +89,13 @@
   __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
             (1 << Map::kIsAccessCheckNeeded) |
             (1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss, not_taken);
+  __ j(not_zero, miss);
 
   __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CheckMap(r0, FACTORY->hash_table_map(), miss, true);
+  __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register r0,
-                                           Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ mov(r1, FieldOperand(elements, kCapacityOffset));
-  __ shr(r1, kSmiTagSize);  // convert smi to int
-  __ dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
-    __ shr(r0, String::kHashShift);
-    if (i > 0) {
-      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r0, Operand(r1));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
-
-    // Check if the key is identical to the name.
-    __ cmp(name, Operand(elements, r0, times_4,
-                         kElementsStartOffset - kHeapObjectTag));
-    if (i != kProbes - 1) {
-      __ j(equal, done, taken);
-    } else {
-      __ j(not_equal, miss, not_taken);
-    }
-  }
-}
-
-
-
 // Helper function used to load a property from a dictionary backing
 // storage. This function may fail to load a property even though it is
 // in the dictionary, so code at miss_label must always call a backup
@@ -183,13 +127,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -200,8 +144,8 @@
       StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
-  __ j(not_zero, miss_label, not_taken);
+          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -238,13 +182,13 @@
 
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -254,12 +198,12 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask
-      = (PropertyDetails::TypeField::mask() |
-         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
           Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label, not_taken);
+  __ j(not_zero, miss_label);
 
   // Store the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -272,111 +216,6 @@
 }
 
 
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
-                                         Label* miss,
-                                         Register elements,
-                                         Register key,
-                                         Register r0,
-                                         Register r1,
-                                         Register r2,
-                                         Register result) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver and is unchanged.
-  //
-  // key      - holds the smi key on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // r1 - used to hold the capacity mask of the dictionary
-  //
-  // r2 - used for the index into the dictionary.
-  //
-  // result - holds the result on exit if the load succeeds and we fall through.
-
-  Label done;
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  __ mov(r1, r0);
-  __ not_(r0);
-  __ shl(r1, 15);
-  __ add(r0, Operand(r1));
-  // hash = hash ^ (hash >> 12);
-  __ mov(r1, r0);
-  __ shr(r1, 12);
-  __ xor_(r0, Operand(r1));
-  // hash = hash + (hash << 2);
-  __ lea(r0, Operand(r0, r0, times_4, 0));
-  // hash = hash ^ (hash >> 4);
-  __ mov(r1, r0);
-  __ shr(r1, 4);
-  __ xor_(r0, Operand(r1));
-  // hash = hash * 2057;
-  __ imul(r0, r0, 2057);
-  // hash = hash ^ (hash >> 16);
-  __ mov(r1, r0);
-  __ shr(r1, 16);
-  __ xor_(r0, Operand(r1));
-
-  // Compute capacity mask.
-  __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
-  __ shr(r1, kSmiTagSize);  // convert smi to int
-  __ dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
-    // Use r2 for index calculations and keep the hash intact in r0.
-    __ mov(r2, r0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r2, Operand(r1));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(NumberDictionary::kEntrySize == 3);
-    __ lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
-
-    // Check if the key matches.
-    __ cmp(key, FieldOperand(elements,
-                             r2,
-                             times_pointer_size,
-                             NumberDictionary::kElementsStartOffset));
-    if (i != (kProbes - 1)) {
-      __ j(equal, &done, taken);
-    } else {
-      __ j(not_equal, miss, not_taken);
-    }
-  }
-
-  __ bind(&done);
-  // Check that the value is a normal propety.
-  const int kDetailsOffset =
-      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ASSERT_EQ(NORMAL, 0);
-  __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
-          Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
-  __ j(not_zero, miss);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset =
-      NumberDictionary::kElementsStartOffset + kPointerSize;
-  __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-// The offset from the inlined patch site to the start of the
-// inlined load instruction.  It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -434,8 +273,7 @@
   //   map - used to hold the map of the receiver.
 
   // Check that the object isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, slow, not_taken);
+  __ JumpIfSmi(receiver, slow);
 
   // Get the map of the receiver.
   __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -443,7 +281,7 @@
   // Check bit field.
   __ test_b(FieldOperand(map, Map::kBitFieldOffset),
             (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
-  __ j(not_zero, slow, not_taken);
+  __ j(not_zero, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
   // we enter the runtime system to make sure that indexing
@@ -451,7 +289,7 @@
   ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
 
   __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow, not_taken);
+  __ j(below, slow);
 }
 
 
@@ -475,7 +313,10 @@
   __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
   if (not_fast_array != NULL) {
     // Check that the object is in fast mode and writable.
-    __ CheckMap(scratch, FACTORY->fixed_array_map(), not_fast_array, true);
+    __ CheckMap(scratch,
+                FACTORY->fixed_array_map(),
+                not_fast_array,
+                DONT_DO_SMI_CHECK);
   } else {
     __ AssertFastElements(scratch);
   }
@@ -483,7 +324,7 @@
   __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
   __ j(above_equal, out_of_range);
   // Fast case: Do the load.
-  ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
   __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
@@ -514,12 +355,89 @@
   // Is the string an array index, with cached numeric value?
   __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
   __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string, not_taken);
+  __ j(zero, index_string);
 
   // Is the string a symbol?
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
-  __ j(zero, not_symbol, not_taken);
+  __ j(zero, not_symbol);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+                                             Register object,
+                                             Register key,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* unmapped_case,
+                                             Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+  Factory* factory = masm->isolate()->factory();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+  __ j(below, slow_case);
+
+  // Check that the key is a positive smi.
+  __ test(key, Immediate(0x8000001));
+  __ j(not_zero, slow_case);
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+  __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+  __ cmp(key, Operand(scratch2));
+  __ j(greater_equal, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+  __ mov(scratch2, FieldOperand(scratch1,
+                                key,
+                                times_half_pointer_size,
+                                kHeaderSize));
+  __ cmp(scratch2, factory->the_hole_value());
+  __ j(equal, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  const int kContextOffset = FixedArray::kHeaderSize;
+  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+  return FieldOperand(scratch1,
+                      scratch2,
+                      times_half_pointer_size,
+                      Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                               Register key,
+                                               Register parameter_map,
+                                               Register scratch,
+                                               Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(scratch));
+  __ j(greater_equal, slow_case);
+  return FieldOperand(backing_store,
+                      key,
+                      times_half_pointer_size,
+                      FixedArray::kHeaderSize);
 }
 
 
@@ -533,8 +451,7 @@
   Label probe_dictionary, check_number_dictionary;
 
   // Check that the key is a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_string, not_taken);
+  __ JumpIfNotSmi(eax, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from
   // where a numeric string is converted to a smi.
@@ -542,11 +459,8 @@
   GenerateKeyedLoadReceiverCheck(
       masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
 
-  // Check the "has fast elements" bit in the receiver's map which is
-  // now in ecx.
-  __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
-            1 << Map::kHasFastElements);
-  __ j(zero, &check_number_dictionary, not_taken);
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(ecx, &check_number_dictionary);
 
   GenerateFastArrayLoad(masm,
                         edx,
@@ -570,19 +484,21 @@
   // ebx: untagged index
   // eax: key
   // ecx: elements
-  __ CheckMap(ecx, isolate->factory()->hash_table_map(), &slow, true);
+  __ CheckMap(ecx,
+              isolate->factory()->hash_table_map(),
+              &slow,
+              DONT_DO_SMI_CHECK);
   Label slow_pop_receiver;
   // Push receiver on the stack to free up a register for the dictionary
   // probing.
   __ push(edx);
-  GenerateNumberDictionaryLoad(masm,
-                               &slow_pop_receiver,
-                               ecx,
-                               eax,
-                               ebx,
-                               edx,
-                               edi,
-                               eax);
+  __ LoadFromNumberDictionary(&slow_pop_receiver,
+                              ecx,
+                              eax,
+                              ebx,
+                              edx,
+                              edi,
+                              eax);
   // Pop receiver before returning.
   __ pop(edx);
   __ ret(0);
@@ -710,7 +626,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -723,12 +639,11 @@
   Label slow;
 
   // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ JumpIfSmi(edx, &slow);
 
   // Check that the key is an array index, that is Uint32.
   __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
 
   // Get the map of the receiver.
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -738,7 +653,7 @@
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
   __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
   __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
   __ pop(ecx);
@@ -753,7 +668,61 @@
   __ TailCallExternalReference(ref, 2, 1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label slow, notin;
+  Factory* factory = masm->isolate()->factory();
+  Operand mapped_location =
+      GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, &notin, &slow);
+  __ mov(eax, mapped_location);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in ebx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
+  __ cmp(unmapped_location, factory->the_hole_value());
+  __ j(equal, &slow);
+  __ mov(eax, unmapped_location);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label slow, notin;
+  Operand mapped_location =
+      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
+  __ mov(mapped_location, eax);
+  __ lea(ecx, mapped_location);
+  __ mov(edx, eax);
+  __ RecordWrite(ebx, ecx, edx);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in ebx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+  __ mov(unmapped_location, eax);
+  __ lea(edi, unmapped_location);
+  __ mov(edx, eax);
+  __ RecordWrite(ebx, edi, edx);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
@@ -768,23 +737,25 @@
   Label slow, fast, array, extra;
 
   // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ JumpIfSmi(edx, &slow);
   // Get the map from the receiver.
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
             1 << Map::kIsAccessCheckNeeded);
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
   // Check that the key is a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ JumpIfNotSmi(ecx, &slow);
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
-  // Check that the object is some kind of JS object.
-  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &slow, not_taken);
+  // Check that the object is some kind of JSObject.
+  __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+  __ j(below, &slow);
+  __ CmpInstanceType(edi, JS_PROXY_TYPE);
+  __ j(equal, &slow);
+  __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
@@ -792,9 +763,9 @@
   // ecx: key (a smi)
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
   // Check that the object is in fast mode and writable.
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
   __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(below, &fast, taken);
+  __ j(below, &fast);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -809,9 +780,10 @@
   // ecx: key, a smi.
   // edi: receiver->elements, a FixedArray
   // flags: compare (ecx, edx.length())
-  __ j(not_equal, &slow, not_taken);  // do not leave holes in the array
+  // do not leave holes in the array:
+  __ j(not_equal, &slow);
   __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow);
   // Add 1 to receiver->length, and go to fast array write.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
@@ -825,12 +797,12 @@
   // edx: receiver, a JSArray
   // ecx: key, a smi.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra, not_taken);
+  __ j(above_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
@@ -850,7 +822,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- edx                 : receiver
@@ -859,9 +832,8 @@
 
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
@@ -873,10 +845,9 @@
   // to probe.
   //
   // Check for number.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &number, not_taken);
+  __ JumpIfSmi(edx, &number);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
-  __ j(not_equal, &non_number, taken);
+  __ j(not_equal, &non_number);
   __ bind(&number);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::NUMBER_FUNCTION_INDEX, edx);
@@ -885,7 +856,7 @@
   // Check for string.
   __ bind(&non_number);
   __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
-  __ j(above_equal, &non_string, taken);
+  __ j(above_equal, &non_string);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::STRING_FUNCTION_INDEX, edx);
   __ jmp(&probe);
@@ -893,9 +864,9 @@
   // Check for boolean.
   __ bind(&non_string);
   __ cmp(edx, FACTORY->true_value());
-  __ j(equal, &boolean, not_taken);
+  __ j(equal, &boolean);
   __ cmp(edx, FACTORY->false_value());
-  __ j(not_equal, &miss, taken);
+  __ j(not_equal, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -921,16 +892,16 @@
   // -----------------------------------
 
   // Check that the result is not a smi.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(edi, miss);
 
   // Check that the value is a JavaScript function, fetching its map into eax.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 // The generated code falls through if the call should be handled by runtime.
@@ -960,7 +931,8 @@
 
 static void GenerateCallMiss(MacroAssembler* masm,
                              int argc,
-                             IC::UtilityId id) {
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1001,14 +973,13 @@
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &invoke, not_taken);
+    __ JumpIfSmi(edx, &invoke, Label::kNear);
     __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
     __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
     __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
-    __ j(equal, &global);
+    __ j(equal, &global, Label::kNear);
     __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
-    __ j(not_equal, &invoke);
+    __ j(not_equal, &invoke, Label::kNear);
 
     // Patch the receiver on the stack.
     __ bind(&global);
@@ -1018,12 +989,21 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(edi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1034,8 +1014,9 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -1049,11 +1030,13 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1062,7 +1045,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
@@ -1083,8 +1066,7 @@
   Label index_smi, index_string;
 
   // Check that the key is a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_string, not_taken);
+  __ JumpIfNotSmi(ecx, &check_string);
 
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from
@@ -1109,13 +1091,16 @@
   // eax: elements
   // ecx: smi key
   // Check whether the elements is a number dictionary.
-  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow_load, true);
+  __ CheckMap(eax,
+              isolate->factory()->hash_table_map(),
+              &slow_load,
+              DONT_DO_SMI_CHECK);
   __ mov(ebx, ecx);
   __ SmiUntag(ebx);
   // ebx: untagged index
   // Receiver in edx will be clobbered, need to reload it on miss.
-  GenerateNumberDictionaryLoad(
-      masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
+  __ LoadFromNumberDictionary(
+      &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
   __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
   __ jmp(&do_call);
 
@@ -1150,7 +1135,7 @@
   __ CheckMap(ebx,
               isolate->factory()->hash_table_map(),
               &lookup_monomorphic_cache,
-              true);
+              DONT_DO_SMI_CHECK);
 
   GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
   __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
@@ -1158,7 +1143,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1178,6 +1166,35 @@
 }
 
 
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+                                             int argc) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+  Label slow, notin;
+  Factory* factory = masm->isolate()->factory();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  Operand mapped_location =
+      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
+  __ mov(edi, mapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow);
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in ebx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+  __ cmp(unmapped_location, factory->the_hole_value());
+  __ j(equal, &slow);
+  __ mov(edi, unmapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow);
+  __ bind(&slow);
+  GenerateMiss(masm, argc);
+}
+
+
 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
@@ -1189,8 +1206,7 @@
 
   // Check if the name is a string.
   Label miss;
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(ecx, &miss);
   Condition cond = masm->IsObjectStringType(ecx, eax, eax);
   __ j(NegateCondition(cond), &miss);
   GenerateCallNormal(masm, argc);
@@ -1208,7 +1224,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
@@ -1220,9 +1236,7 @@
   // -----------------------------------
 
   // Probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
                                                   edx);
 
@@ -1273,173 +1287,7 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = test_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the last 4 bytes of a six byte
-  // memory-to-register move instruction, so we add 2 to get the
-  // offset to the last 4 bytes.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 2;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   cmp reg, <the hole>
-//   je  slow
-//   ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address mov_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a mov ecx/edx,
-  // nothing was inlined.
-  byte b = *mov_instruction_address;
-  if (b != kMovEcxByte && b != kMovEdxByte) return false;
-  // If we don't have the hole check generated, we can only support
-  // "don't delete" cells.
-  if (b == kMovEdxByte && !is_dont_delete) return false;
-
-  Address delta_address = mov_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = mov_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The cell is in the last 4 bytes of a five byte mov reg, imm32
-  // instruction, so we add 1 to get the offset to the last 4 bytes.
-  Address offset_address =
-      mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
-  *reinterpret_cast<Object**>(offset_address) = cell;
-  return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test eax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 4 bytes of
-  // the 7-byte operand-immediate compare instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a six byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a six byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 2;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map cmp
-  // instruction.  This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 4 bytes
-  // of the 7-byte operand-immediate compare instruction, so we add 3
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 3;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -1454,8 +1302,10 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1486,10 +1336,8 @@
   //  -- esp[0] : return address
   // -----------------------------------
 
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC,
-                                         strict_mode);
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
                                                   no_reg);
 
@@ -1519,12 +1367,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.  It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
@@ -1545,23 +1387,21 @@
   Register scratch = ebx;
 
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(receiver, &miss);
 
   // Check that the object is a JS array.
   __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Check that elements are FixedArray.
   // We rely on StoreIC_ArrayLength below to deal with all types of
   // fast elements (including COW).
   __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Check that value is a smi.
-  __ test(value, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
+  __ JumpIfNotSmi(value, &miss);
 
   // Prepare tail call to StoreIC_ArrayLength.
   __ pop(scratch);
@@ -1653,7 +1493,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -1668,8 +1508,30 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);   // return address
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8bcce33..d5a4fe6 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -40,24 +40,26 @@
 
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
-                     int deoptimization_index)
+                     Safepoint::DeoptMode mode)
       : codegen_(codegen),
         pointers_(pointers),
-        deoptimization_index_(deoptimization_index) {}
+        deopt_mode_(mode) {}
   virtual ~SafepointGenerator() { }
 
-  virtual void Generate() {
-    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  virtual void BeforeCall(int call_size) const {}
+
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
  private:
   LCodeGen* codegen_;
   LPointerMap* pointers_;
-  int deoptimization_index_;
+  Safepoint::DeoptMode deopt_mode_;
 };
 
 
@@ -77,7 +79,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -86,7 +88,8 @@
 
 void LCodeGen::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LCodeGen in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -126,13 +129,28 @@
   }
 #endif
 
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). ecx is zero for method calls and non-zero for
+  // function calls.
+  if (info_->is_strict_mode() || info_->is_native()) {
+    Label ok;
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(Operand(esp, receiver_offset),
+           Immediate(isolate()->factory()->undefined_value()));
+    __ bind(&ok);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
   __ push(edi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(Operand(eax), Immediate(slots));
@@ -167,9 +185,9 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
-    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both eax and esi.  It replaces the context
     // passed to us.  It's saved in the stack and kept live in esi.
     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
@@ -177,14 +195,14 @@
     // Copy parameters into context if necessary.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ mov(eax, Operand(ebp, parameter_offset));
         // Store it in the context.
-        int context_offset = Context::SlotOffset(slot->index());
+        int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers all involved
         // registers, so we have to use a third register to avoid
@@ -223,26 +241,20 @@
       instr->CompileToNative(this);
     }
   }
+  EnsureSpaceForLazyDeopt();
   return !is_aborted();
 }
 
 
-LInstruction* LCodeGen::GetNextInstruction() {
-  if (current_instruction_ < instructions_->length() - 1) {
-    return instructions_->at(current_instruction_ + 1);
-  } else {
-    return NULL;
-  }
-}
-
-
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
-  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
-    LDeferredCode* code = deferred_[i];
-    __ bind(code->entry());
-    code->Generate();
-    __ jmp(code->exit());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      code->Generate();
+      __ jmp(code->exit());
+    }
   }
 
   // Deferred code is the last part of the instruction sequence. Mark
@@ -254,7 +266,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -386,7 +398,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -411,22 +423,16 @@
 void LCodeGen::CallCodeGeneric(Handle<Code> code,
                                RelocInfo::Mode mode,
                                LInstruction* instr,
-                               ContextMode context_mode,
                                SafepointMode safepoint_mode) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
-
-  if (context_mode == RESTORE_CONTEXT) {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
   __ call(code, mode);
-
-  RegisterLazyDeoptimization(instr, safepoint_mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -435,67 +441,47 @@
 
 void LCodeGen::CallCode(Handle<Code> code,
                         RelocInfo::Mode mode,
-                        LInstruction* instr,
-                        ContextMode context_mode) {
-  CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
 void LCodeGen::CallRuntime(const Runtime::Function* fun,
                            int argc,
-                           LInstruction* instr,
-                           ContextMode context_mode) {
+                           LInstruction* instr) {
   ASSERT(instr != NULL);
   ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (context_mode == RESTORE_CONTEXT) {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
   __ CallRuntime(fun, argc);
 
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
                                        int argc,
-                                       LInstruction* instr) {
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  ASSERT(context->IsRegister() || context->IsStackSlot());
+  if (context->IsRegister()) {
+    if (!ToRegister(context).is(esi)) {
+      __ mov(esi, ToRegister(context));
+    }
+  } else {
+    // Context is stack slot.
+    __ mov(esi, ToOperand(context));
+  }
+
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
-                                          SafepointMode safepoint_mode) {
-  // Create the environment to bailout to. If the call has side effects
-  // execution has to continue after the call otherwise execution can continue
-  // from a previous bailout point repeating the call.
-  LEnvironment* deoptimization_environment;
-  if (instr->HasDeoptimizationEnvironment()) {
-    deoptimization_environment = instr->deoptimization_environment();
-  } else {
-    deoptimization_environment = instr->environment();
-  }
-
-  RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    RecordSafepoint(instr->pointer_map(),
-                    deoptimization_environment->deoptimization_index());
-  } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(),
-        0,
-        deoptimization_environment->deoptimization_index());
-  }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+    LEnvironment* environment, Safepoint::DeoptMode mode) {
   if (!environment->HasBeenRegistered()) {
     // Physical stack frame layout:
     // -x ............. -4  0 ..................................... y
@@ -517,14 +503,17 @@
     Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
-    environment->Register(deoptimization_index, translation.index());
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
     deoptimizations_.Add(environment);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -543,7 +532,7 @@
     __ mov(ebx, shared);
     __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
     __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
-    __ j(not_zero, &no_deopt);
+    __ j(not_zero, &no_deopt, Label::kNear);
     if (FLAG_trap_on_deopt) __ int3();
     __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
     __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
@@ -564,13 +553,13 @@
     __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     if (FLAG_trap_on_deopt) {
-      NearLabel done;
-      __ j(NegateCondition(cc), &done);
+      Label done;
+      __ j(NegateCondition(cc), &done, Label::kNear);
       __ int3();
       __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
       __ bind(&done);
     } else {
-      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
     }
   }
 }
@@ -604,6 +593,7 @@
     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
     data->SetArgumentsStackHeight(i,
                                   Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
   }
   code->set_deoptimization_data(*data);
 }
@@ -635,15 +625,27 @@
 }
 
 
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
 void LCodeGen::RecordSafepoint(
     LPointerMap* pointers,
     Safepoint::Kind kind,
     int arguments,
-    int deoptimization_index) {
+    Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
   const ZoneList<LOperand*>* operands = pointers->operands();
-  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-      kind, arguments, deoptimization_index);
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
@@ -656,27 +658,26 @@
 
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+                               Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
 }
 
 
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
   LPointerMap empty_pointers(RelocInfo::kNoPosition);
-  RecordSafepoint(&empty_pointers, deoptimization_index);
+  RecordSafepoint(&empty_pointers, mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
-                                            int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
-      deoptimization_index);
+                                            Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
 }
 
 
 void LCodeGen::RecordPosition(int position) {
-  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
 }
 
@@ -689,7 +690,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -706,12 +707,11 @@
     LParallelMove* move = gap->GetParallelMove(inner_pos);
     if (move != NULL) DoParallelMove(move);
   }
+}
 
-  LInstruction* next = GetNextInstruction();
-  if (next != NULL && next->IsLazyBailout()) {
-    int pc = masm()->pc_offset();
-    safepoints_.SetPcAfterGap(pc);
-  }
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
 }
 
 
@@ -726,38 +726,38 @@
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpConstructResult: {
       RegExpConstructResultStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::RegExpExec: {
       RegExpExecStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::SubString: {
       SubStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::StringAdd: {
       StringAddStub stub(NO_STRING_ADD_FLAGS);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::StringCompare: {
       StringCompareStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     case CodeStub::TranscendentalCache: {
       TranscendentalCacheStub stub(instr->transcendental_type(),
                                    TranscendentalCacheStub::TAGGED);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     default:
@@ -780,50 +780,91 @@
 
     if (divisor < 0) divisor = -divisor;
 
-    NearLabel positive_dividend, done;
+    Label positive_dividend, done;
     __ test(dividend, Operand(dividend));
-    __ j(not_sign, &positive_dividend);
+    __ j(not_sign, &positive_dividend, Label::kNear);
     __ neg(dividend);
     __ and_(dividend, divisor - 1);
     __ neg(dividend);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       DeoptimizeIf(no_condition, instr->environment());
     } else {
-      __ jmp(&done);
+      __ jmp(&done, Label::kNear);
     }
     __ bind(&positive_dividend);
     __ and_(dividend, divisor - 1);
     __ bind(&done);
   } else {
-    LOperand* right = instr->InputAt(1);
-    ASSERT(ToRegister(instr->InputAt(0)).is(eax));
-    ASSERT(ToRegister(instr->result()).is(edx));
+    Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+    Register left_reg = ToRegister(instr->InputAt(0));
+    Register right_reg = ToRegister(instr->InputAt(1));
+    Register result_reg = ToRegister(instr->result());
 
-    Register right_reg = ToRegister(right);
+    ASSERT(left_reg.is(eax));
+    ASSERT(result_reg.is(edx));
     ASSERT(!right_reg.is(eax));
     ASSERT(!right_reg.is(edx));
 
     // Check for x % 0.
     if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-      __ test(right_reg, ToOperand(right));
+      __ test(right_reg, Operand(right_reg));
       DeoptimizeIf(zero, instr->environment());
     }
 
+    __ test(left_reg, Operand(left_reg));
+    __ j(zero, &remainder_eq_dividend, Label::kNear);
+    __ j(sign, &slow, Label::kNear);
+
+    __ test(right_reg, Operand(right_reg));
+    __ j(not_sign, &both_positive, Label::kNear);
+    // The sign of the divisor doesn't matter.
+    __ neg(right_reg);
+
+    __ bind(&both_positive);
+    // If the dividend is smaller than the nonnegative
+    // divisor, the dividend is the result.
+    __ cmp(left_reg, Operand(right_reg));
+    __ j(less, &remainder_eq_dividend, Label::kNear);
+
+    // Check if the divisor is a PowerOfTwo integer.
+    Register scratch = ToRegister(instr->TempAt(0));
+    __ mov(scratch, right_reg);
+    __ sub(Operand(scratch), Immediate(1));
+    __ test(scratch, Operand(right_reg));
+    __ j(not_zero, &do_subtraction, Label::kNear);
+    __ and_(left_reg, Operand(scratch));
+    __ jmp(&remainder_eq_dividend, Label::kNear);
+
+    __ bind(&do_subtraction);
+    const int kUnfolds = 3;
+    // Try a few subtractions of the dividend.
+    __ mov(scratch, left_reg);
+    for (int i = 0; i < kUnfolds; i++) {
+      // Reduce the dividend by the divisor.
+      __ sub(left_reg, Operand(right_reg));
+      // Check if the dividend is less than the divisor.
+      __ cmp(left_reg, Operand(right_reg));
+      __ j(less, &remainder_eq_dividend, Label::kNear);
+    }
+    __ mov(left_reg, scratch);
+
+    // Slow case, using idiv instruction.
+    __ bind(&slow);
     // Sign extend to edx.
     __ cdq();
 
     // Check for (0 % -x) that will produce negative zero.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel positive_left;
-      NearLabel done;
-      __ test(eax, Operand(eax));
-      __ j(not_sign, &positive_left);
+      Label positive_left;
+      Label done;
+      __ test(left_reg, Operand(left_reg));
+      __ j(not_sign, &positive_left, Label::kNear);
       __ idiv(right_reg);
 
       // Test the remainder for 0, because then the result would be -0.
-      __ test(edx, Operand(edx));
-      __ j(not_zero, &done);
+      __ test(result_reg, Operand(result_reg));
+      __ j(not_zero, &done, Label::kNear);
 
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&positive_left);
@@ -832,6 +873,12 @@
     } else {
       __ idiv(right_reg);
     }
+    __ jmp(&done, Label::kNear);
+
+    __ bind(&remainder_eq_dividend);
+    __ mov(result_reg, left_reg);
+
+    __ bind(&done);
   }
 }
 
@@ -854,9 +901,9 @@
 
   // Check for (0 / -x) that will produce negative zero.
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    NearLabel left_not_zero;
+    Label left_not_zero;
     __ test(left_reg, Operand(left_reg));
-    __ j(not_zero, &left_not_zero);
+    __ j(not_zero, &left_not_zero, Label::kNear);
     __ test(right_reg, ToOperand(right));
     DeoptimizeIf(sign, instr->environment());
     __ bind(&left_not_zero);
@@ -864,9 +911,9 @@
 
   // Check for (-kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    NearLabel left_not_min_int;
+    Label left_not_min_int;
     __ cmp(left_reg, kMinInt);
-    __ j(not_zero, &left_not_min_int);
+    __ j(not_zero, &left_not_min_int, Label::kNear);
     __ cmp(right_reg, -1);
     DeoptimizeIf(zero, instr->environment());
     __ bind(&left_not_min_int);
@@ -944,9 +991,9 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Bail out if the result is supposed to be negative zero.
-    NearLabel done;
+    Label done;
     __ test(left, Operand(left));
-    __ j(not_zero, &done);
+    __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
         DeoptimizeIf(no_condition, instr->environment());
@@ -1087,7 +1134,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (BitCast<uint64_t, double>(v) == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     uint64_t int_val = BitCast<uint64_t, double>(v);
@@ -1101,7 +1148,7 @@
         __ Set(temp, Immediate(upper));
         __ pinsrd(res, Operand(temp), 1);
       } else {
-        __ xorpd(res, res);
+        __ xorps(res, res);
         __ Set(temp, Immediate(upper));
         __ pinsrd(res, Operand(temp), 1);
       }
@@ -1132,17 +1179,26 @@
 }
 
 
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+void LCodeGen::DoFixedArrayBaseLength(
+    LFixedArrayBaseLength* instr) {
   Register result = ToRegister(instr->result());
   Register array = ToRegister(instr->InputAt(0));
-  __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
+  __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
 }
 
 
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
   Register result = ToRegister(instr->result());
-  Register array = ToRegister(instr->InputAt(0));
-  __ mov(result, FieldOperand(array, ExternalArray::kLengthOffset));
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following masking takes care of that anyway.
+  __ mov(result, FieldOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ and_(result, Map::kElementsKindMask);
+  __ shr(result, Map::kElementsKindShift);
 }
 
 
@@ -1151,14 +1207,13 @@
   Register result = ToRegister(instr->result());
   Register map = ToRegister(instr->TempAt(0));
   ASSERT(input.is(result));
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
-  __ test(input, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ JumpIfSmi(input, &done, Label::kNear);
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, map);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ mov(result, FieldOperand(input, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -1173,8 +1228,9 @@
 
 
 void LCodeGen::DoThrow(LThrow* instr) {
-  __ push(ToOperand(instr->InputAt(0)));
-  CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
+  __ push(ToOperand(instr->value()));
+  ASSERT(ToRegister(instr->context()).is(esi));
+  CallRuntime(Runtime::kThrow, 1, instr);
 
   if (FLAG_debug_code) {
     Comment("Unreachable code.");
@@ -1244,12 +1300,13 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(edx));
-  ASSERT(ToRegister(instr->InputAt(1)).is(eax));
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->left()).is(edx));
+  ASSERT(ToRegister(instr->right()).is(eax));
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ nop();  // Signals no inlined code.
 }
 
@@ -1285,102 +1342,127 @@
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  Representation r = instr->hydrogen()->representation();
+  Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
     Register reg = ToRegister(instr->InputAt(0));
     __ test(reg, Operand(reg));
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->InputAt(0));
-    if (instr->hydrogen()->type().IsBoolean()) {
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
       __ cmp(reg, factory()->true_value());
       EmitBranch(true_block, false_block, equal);
+    } else if (type.IsSmi()) {
+      __ test(reg, Operand(reg));
+      EmitBranch(true_block, false_block, not_equal);
     } else {
       Label* true_label = chunk_->GetAssemblyLabel(true_block);
       Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-      __ cmp(reg, factory()->undefined_value());
-      __ j(equal, false_label);
-      __ cmp(reg, factory()->true_value());
-      __ j(equal, true_label);
-      __ cmp(reg, factory()->false_value());
-      __ j(equal, false_label);
-      __ test(reg, Operand(reg));
-      __ j(equal, false_label);
-      __ test(reg, Immediate(kSmiTagMask));
-      __ j(zero, true_label);
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
 
-      // Test for double values. Zero is false.
-      NearLabel call_stub;
-      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-             factory()->heap_number_map());
-      __ j(not_equal, &call_stub);
-      __ fldz();
-      __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
-      __ FCmp();
-      __ j(zero, false_label);
-      __ jmp(true_label);
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ cmp(reg, factory()->undefined_value());
+        __ j(equal, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // true -> true.
+        __ cmp(reg, factory()->true_value());
+        __ j(equal, true_label);
+        // false -> false.
+        __ cmp(reg, factory()->false_value());
+        __ j(equal, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ cmp(reg, factory()->null_value());
+        __ j(equal, false_label);
+      }
 
-      // The conversion stub doesn't cause garbage collections so it's
-      // safe to not record a safepoint after the call.
-      __ bind(&call_stub);
-      ToBooleanStub stub;
-      __ pushad();
-      __ push(reg);
-      __ CallStub(&stub);
-      __ test(eax, Operand(eax));
-      __ popad();
-      EmitBranch(true_block, false_block, not_zero);
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ test(reg, Operand(reg));
+        __ j(equal, false_label);
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ test(reg, Immediate(kSmiTagMask));
+        DeoptimizeIf(zero, instr->environment());
+      }
+
+      Register map = no_reg;  // Keep the compiler happy.
+      if (expected.NeedsMap()) {
+        map = ToRegister(instr->TempAt(0));
+        ASSERT(!map.is(reg));
+        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+                    1 << Map::kIsUndetectable);
+          __ j(not_zero, false_label);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+        __ j(above_equal, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+        __ j(above_equal, &not_string, Label::kNear);
+        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+        __ j(not_zero, true_label);
+        __ jmp(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+               factory()->heap_number_map());
+        __ j(not_equal, &not_heap_number, Label::kNear);
+        __ fldz();
+        __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+        __ FCmp();
+        __ j(zero, false_label);
+        __ jmp(true_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(no_condition, instr->environment());
     }
   }
 }
 
 
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
   block = chunk_->LookupDestination(block);
   int next_block = GetNextEmittedBlock(current_block_);
   if (block != next_block) {
-    // Perform stack overflow check if this goto needs it before jumping.
-    if (deferred_stack_check != NULL) {
-      ExternalReference stack_limit =
-          ExternalReference::address_of_stack_limit(isolate());
-      __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, chunk_->GetAssemblyLabel(block));
-      __ jmp(deferred_stack_check->entry());
-      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
-    } else {
-      __ jmp(chunk_->GetAssemblyLabel(block));
-    }
+    __ jmp(chunk_->GetAssemblyLabel(block));
   }
 }
 
 
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  PushSafepointRegistersScope scope(this);
-  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
 void LCodeGen::DoGoto(LGoto* instr) {
-  class DeferredStackCheck: public LDeferredCode {
-   public:
-    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-   private:
-    LGoto* instr_;
-  };
-
-  DeferredStackCheck* deferred = NULL;
-  if (instr->include_stack_check()) {
-    deferred = new DeferredStackCheck(this, instr);
-  }
-  EmitGoto(instr->block_id(), deferred);
+  EmitGoto(instr->block_id());
 }
 
 
@@ -1421,32 +1503,6 @@
 }
 
 
-void LCodeGen::DoCmpID(LCmpID* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  LOperand* result = instr->result();
-
-  NearLabel unordered;
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the unordered case, which produces a false value.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, &unordered, not_taken);
-  } else {
-    EmitCmpI(left, right);
-  }
-
-  NearLabel done;
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  __ mov(ToRegister(result), factory()->true_value());
-  __ j(cc, &done);
-
-  __ bind(&unordered);
-  __ mov(ToRegister(result), factory()->false_value());
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
@@ -1467,23 +1523,9 @@
 }
 
 
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->InputAt(0));
-  Register right = ToRegister(instr->InputAt(1));
-  Register result = ToRegister(instr->result());
-
-  __ cmp(left, Operand(right));
-  __ mov(result, factory()->true_value());
-  NearLabel done;
-  __ j(equal, &done);
-  __ mov(result, factory()->false_value());
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
-  Register left = ToRegister(instr->InputAt(0));
-  Register right = ToRegister(instr->InputAt(1));
+  Operand right = ToOperand(instr->InputAt(1));
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
 
@@ -1492,41 +1534,13 @@
 }
 
 
-void LCodeGen::DoIsNull(LIsNull* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Materialize false.
-
-  __ cmp(reg, factory()->null_value());
-  if (instr->is_strict()) {
-    __ mov(result, factory()->true_value());
-    NearLabel done;
-    __ j(equal, &done);
-    __ mov(result, factory()->false_value());
-    __ bind(&done);
-  } else {
-    NearLabel true_value, false_value, done;
-    __ j(equal, &true_value);
-    __ cmp(reg, factory()->undefined_value());
-    __ j(equal, &true_value);
-    __ test(reg, Immediate(kSmiTagMask));
-    __ j(zero, &false_value);
-    // Check for undetectable objects by looking in the bit field in
-    // the map. The object has already been smi checked.
-    Register scratch = result;
-    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
-    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
-    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &true_value);
-    __ bind(&false_value);
-    __ mov(result, factory()->false_value());
-    __ jmp(&done);
-    __ bind(&true_value);
-    __ mov(result, factory()->true_value());
-    __ bind(&done);
-  }
+  __ cmp(left, instr->hydrogen()->right());
+  EmitBranch(true_block, false_block, equal);
 }
 
 
@@ -1548,8 +1562,7 @@
     __ j(equal, true_label);
     __ cmp(reg, factory()->undefined_value());
     __ j(equal, true_label);
-    __ test(reg, Immediate(kSmiTagMask));
-    __ j(zero, false_label);
+    __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
     // the map. The object has already been smi checked.
     Register scratch = ToRegister(instr->TempAt(0));
@@ -1563,83 +1576,42 @@
 
 Condition LCodeGen::EmitIsObject(Register input,
                                  Register temp1,
-                                 Register temp2,
                                  Label* is_not_object,
                                  Label* is_object) {
-  ASSERT(!input.is(temp1));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp1.is(temp2));
-
-  __ test(input, Immediate(kSmiTagMask));
-  __ j(equal, is_not_object);
+  __ JumpIfSmi(input, is_not_object);
 
   __ cmp(input, isolate()->factory()->null_value());
   __ j(equal, is_object);
 
   __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
   // Undetectable objects behave like undefined.
-  __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
-  __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
   __ j(not_zero, is_not_object);
 
-  __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
-  __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+  __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, is_not_object);
-  __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+  __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   return below_equal;
 }
 
 
-void LCodeGen::DoIsObject(LIsObject* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register temp = ToRegister(instr->TempAt(0));
-  Label is_false, is_true, done;
-
-  Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
-  __ j(true_cond, &is_true);
-
-  __ bind(&is_false);
-  __ mov(result, factory()->false_value());
-  __ jmp(&done);
-
-  __ bind(&is_true);
-  __ mov(result, factory()->true_value());
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
-  Register temp2 = ToRegister(instr->TempAt(1));
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+  Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
 
   EmitBranch(true_block, false_block, true_cond);
 }
 
 
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
-  Operand input = ToOperand(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ test(input, Immediate(kSmiTagMask));
-  __ mov(result, factory()->true_value());
-  NearLabel done;
-  __ j(zero, &done);
-  __ mov(result, factory()->false_value());
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->InputAt(0));
 
@@ -1651,7 +1623,23 @@
 }
 
 
-static InstanceType TestType(HHasInstanceType* instr) {
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
+  EmitBranch(true_block, false_block, not_zero);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
@@ -1660,7 +1648,7 @@
 }
 
 
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == to) return equal;
@@ -1671,24 +1659,6 @@
 }
 
 
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ test(input, Immediate(kSmiTagMask));
-  NearLabel done, is_false;
-  __ j(zero, &is_false);
-  __ CmpObjectType(input, TestType(instr->hydrogen()), result);
-  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
-  __ mov(result, factory()->true_value());
-  __ jmp(&done);
-  __ bind(&is_false);
-  __ mov(result, factory()->false_value());
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
@@ -1698,8 +1668,7 @@
 
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  __ test(input, Immediate(kSmiTagMask));
-  __ j(zero, false_label);
+  __ JumpIfSmi(input, false_label);
 
   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
   EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1719,21 +1688,6 @@
 }
 
 
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ mov(result, factory()->true_value());
-  __ test(FieldOperand(input, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  NearLabel done;
-  __ j(zero, &done);
-  __ mov(result, factory()->false_value());
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoHasCachedArrayIndexAndBranch(
     LHasCachedArrayIndexAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
@@ -1757,28 +1711,28 @@
                                Register temp2) {
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
-  __ test(input, Immediate(kSmiTagMask));
-  __ j(zero, is_false);
-  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+  __ JumpIfSmi(input, is_false);
+  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
   __ j(below, is_false);
 
   // Map is now in temp.
   // Functions have class 'Function'.
-  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    __ j(equal, is_false);
+    __ j(above_equal, is_false);
   }
 
   // Check if the constructor in the map is a function.
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
 
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
@@ -1804,29 +1758,6 @@
 }
 
 
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  ASSERT(input.is(result));
-  Register temp = ToRegister(instr->TempAt(0));
-  Handle<String> class_name = instr->hydrogen()->class_name();
-  NearLabel done;
-  Label is_true, is_false;
-
-  EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
-
-  __ j(not_equal, &is_false);
-
-  __ bind(&is_true);
-  __ mov(result, factory()->true_value());
-  __ jmp(&done);
-
-  __ bind(&is_false);
-  __ mov(result, factory()->false_value());
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
@@ -1865,31 +1796,19 @@
   // Object and function are in fixed registers defined by the stub.
   ASSERT(ToRegister(instr->context()).is(esi));
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 
-  NearLabel true_value, done;
+  Label true_value, done;
   __ test(eax, Operand(eax));
-  __ j(zero, &true_value);
+  __ j(zero, &true_value, Label::kNear);
   __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ mov(ToRegister(instr->result()), factory()->true_value());
   __ bind(&done);
 }
 
 
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
-  __ test(eax, Operand(eax));
-  EmitBranch(true_block, false_block, zero);
-}
-
-
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
    public:
@@ -1897,7 +1816,7 @@
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() {
-      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
 
     Label* map_check() { return &map_check_; }
@@ -1911,22 +1830,21 @@
   deferred = new DeferredInstanceOfKnownGlobal(this, instr);
 
   Label done, false_result;
-  Register object = ToRegister(instr->InputAt(0));
+  Register object = ToRegister(instr->InputAt(1));
   Register temp = ToRegister(instr->TempAt(0));
 
   // A Smi is not an instance of anything.
-  __ test(object, Immediate(kSmiTagMask));
-  __ j(zero, &false_result, not_taken);
+  __ JumpIfSmi(object, &false_result);
 
   // This is the inlined call site instanceof cache. The two occurences of the
   // hole value will be patched to the last map/result pair generated by the
   // instanceof stub.
-  NearLabel cache_miss;
+  Label cache_miss;
   Register map = ToRegister(instr->TempAt(0));
   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
   __ cmp(map, factory()->the_hole_value());  // Patched to cached map.
-  __ j(not_equal, &cache_miss, not_taken);
+  __ j(not_equal, &cache_miss, Label::kNear);
   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   __ jmp(&done);
 
@@ -1954,8 +1872,8 @@
 }
 
 
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                                Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
   PushSafepointRegistersScope scope(this);
 
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
@@ -1974,15 +1892,18 @@
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   __ mov(InstanceofStub::right(), Immediate(instr->function()));
-  static const int kAdditionalDelta = 16;
+  static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
   __ StoreToSafepointRegisterSlot(temp, temp);
   CallCodeGeneric(stub.GetCode(),
                   RelocInfo::CODE_TARGET,
                   instr,
-                  RESTORE_CONTEXT,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LEnvironment* env = instr->deoptimization_environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
   // Put the result value into the eax slot and restore all registers.
   __ StoreToSafepointRegisterSlot(eax, eax);
 }
@@ -2012,42 +1933,23 @@
   Token::Value op = instr->op();
 
   Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
   if (op == Token::GT || op == Token::LTE) {
     condition = ReverseCondition(condition);
   }
-  NearLabel true_value, done;
+  Label true_value, done;
   __ test(eax, Operand(eax));
-  __ j(condition, &true_value);
+  __ j(condition, &true_value, Label::kNear);
   __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ mov(ToRegister(instr->result()), factory()->true_value());
   __ bind(&done);
 }
 
 
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
-
-  // The compare stub expects compare condition and the input operands
-  // reversed for GT and LTE.
-  Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
-  __ test(eax, Operand(eax));
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 void LCodeGen::DoReturn(LReturn* instr) {
   if (FLAG_trace) {
     // Preserve the return value on the stack and rely on the runtime call
@@ -2060,7 +1962,7 @@
   }
   __ mov(esp, ebp);
   __ pop(ebp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
 
@@ -2083,7 +1985,7 @@
   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
                                                RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, mode, instr);
 }
 
 
@@ -2114,7 +2016,7 @@
   Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
 }
 
 
@@ -2149,23 +2051,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ mov(result, FieldOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ mov(result, FieldOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+      __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
-    __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2180,32 +2088,32 @@
     ASSERT(instr->hydrogen()->need_generic());
     __ mov(ecx, name);
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
   } else {
-    NearLabel done;
+    Label done;
     for (int i = 0; i < map_count - 1; ++i) {
       Handle<Map> map = instr->hydrogen()->types()->at(i);
-      NearLabel next;
+      Label next;
       __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-      __ j(not_equal, &next);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      __ j(not_equal, &next, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
     Handle<Map> map = instr->hydrogen()->types()->last();
     __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
     if (instr->hydrogen()->need_generic()) {
-      NearLabel generic;
-      __ j(not_equal, &generic);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      Label generic;
+      __ j(not_equal, &generic, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&generic);
       __ mov(ecx, name);
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+      CallCode(ic, RelocInfo::CODE_TARGET, instr);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2219,7 +2127,7 @@
 
   __ mov(ecx, instr->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2233,10 +2141,10 @@
   DeoptimizeIf(not_equal, instr->environment());
 
   // Check whether the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   __ test_b(FieldOperand(result, Map::kBitFieldOffset),
             1 << Map::kHasNonInstancePrototype);
-  __ j(not_zero, &non_instance);
+  __ j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   __ mov(result,
@@ -2247,13 +2155,13 @@
   DeoptimizeIf(equal, instr->environment());
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   __ CmpObjectType(result, MAP_TYPE, temp);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in the function's map.
@@ -2270,21 +2178,29 @@
   Register input = ToRegister(instr->InputAt(0));
   __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
   if (FLAG_debug_code) {
-    NearLabel done;
+    Label done, ok, fail;
     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
            Immediate(factory()->fixed_array_map()));
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
            Immediate(factory()->fixed_cow_array_map()));
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     Register temp((result.is(eax)) ? ebx : eax);
     __ push(temp);
     __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
-    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
-    __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
-    __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount));
+    __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
+    __ and_(temp, Map::kElementsKindMask);
+    __ shr(temp, Map::kElementsKindShift);
+    __ cmp(temp, FAST_ELEMENTS);
+    __ j(equal, &ok, Label::kNear);
+    __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+    __ j(less, &fail, Label::kNear);
+    __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+    __ j(less_equal, &ok, Label::kNear);
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed.");
+    __ bind(&ok);
     __ pop(temp);
-    __ Check(below, "Check for fast elements or pixel array failed.");
     __ bind(&done);
   }
 }
@@ -2315,60 +2231,109 @@
 
 
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
-  Register elements = ToRegister(instr->elements());
-  Register key = ToRegister(instr->key());
   Register result = ToRegister(instr->result());
-  ASSERT(result.is(elements));
 
   // Load the result.
-  __ mov(result, FieldOperand(elements,
-                              key,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
+  __ mov(result,
+         BuildFastArrayOperand(instr->elements(), instr->key(),
+                               FAST_ELEMENTS,
+                               FixedArray::kHeaderSize - kHeapObjectTag));
 
   // Check for the hole value.
-  __ cmp(result, factory()->the_hole_value());
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  XMMRegister result = ToDoubleRegister(instr->result());
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+        sizeof(kHoleNanLower32);
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(), instr->key(),
+        FAST_DOUBLE_ELEMENTS,
+        offset);
+    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr->environment());
+  }
+
+  Operand double_load_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+  __ movdbl(result, double_load_operand);
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+    LOperand* elements_pointer,
+    LOperand* key,
+    ElementsKind elements_kind,
+    uint32_t offset) {
+  Register elements_pointer_reg = ToRegister(elements_pointer);
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort("array index constant value too big");
+    }
+    return Operand(elements_pointer_reg,
+                   constant_value * (1 << shift_size) + offset);
+  } else {
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
+  }
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+                                        instr->key(), elements_kind, 0));
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, Operand(external_pointer, key, times_4, 0));
+    __ movss(result, operand);
     __ cvtss2sd(result, result);
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    __ movdbl(ToDoubleRegister(instr->result()), operand);
   } else {
     Register result(ToRegister(instr->result()));
-    switch (array_type) {
-      case kExternalByteArray:
-        __ movsx_b(result, Operand(external_pointer, key, times_1, 0));
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ movsx_b(result, operand);
         break;
-      case kExternalUnsignedByteArray:
-      case kExternalPixelArray:
-        __ movzx_b(result, Operand(external_pointer, key, times_1, 0));
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ movzx_b(result, operand);
         break;
-      case kExternalShortArray:
-        __ movsx_w(result, Operand(external_pointer, key, times_2, 0));
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ movsx_w(result, operand);
         break;
-      case kExternalUnsignedShortArray:
-        __ movzx_w(result, Operand(external_pointer, key, times_2, 0));
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ movzx_w(result, operand);
         break;
-      case kExternalIntArray:
-        __ mov(result, Operand(external_pointer, key, times_4, 0));
+      case EXTERNAL_INT_ELEMENTS:
+        __ mov(result, operand);
         break;
-      case kExternalUnsignedIntArray:
-        __ mov(result, Operand(external_pointer, key, times_4, 0));
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ mov(result, operand);
         __ test(result, Operand(result));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
         // happens, and generate code that returns a double rather than int.
         DeoptimizeIf(negative, instr->environment());
         break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -2382,7 +2347,7 @@
   ASSERT(ToRegister(instr->key()).is(eax));
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2390,16 +2355,16 @@
   Register result = ToRegister(instr->result());
 
   // Check for arguments adapter frame.
-  NearLabel done, adapted;
+  Label done, adapted;
   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(result),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adapted);
+  __ j(equal, &adapted, Label::kNear);
 
   // No arguments adaptor frame.
   __ mov(result, Operand(ebp));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Arguments adaptor frame present.
   __ bind(&adapted);
@@ -2415,12 +2380,12 @@
   Operand elem = ToOperand(instr->InputAt(0));
   Register result = ToRegister(instr->result());
 
-  NearLabel done;
+  Label done;
 
   // If no arguments adaptor frame the number of arguments is fixed.
   __ cmp(ebp, elem);
   __ mov(result, Immediate(scope()->num_parameters()));
-  __ j(equal, &done);
+  __ j(equal, &done, Label::kNear);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2443,20 +2408,36 @@
   ASSERT(function.is(edi));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  // If the receiver is null or undefined, we have to pass the global object
-  // as a receiver.
-  NearLabel global_object, receiver_ok;
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ mov(scratch,
+         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+  __ j(not_equal, &receiver_ok, Label::kNear);
+
+  // Do not transform the receiver to object for builtins.
+  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+            1 << SharedFunctionInfo::kNativeBitWithinByte);
+  __ j(not_equal, &receiver_ok, Label::kNear);
+
+  // Normal function. Replace undefined or null with global receiver.
   __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
   __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
   DeoptimizeIf(equal, instr->environment());
-  __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
+  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
   DeoptimizeIf(below, instr->environment());
-  __ jmp(&receiver_ok);
+  __ jmp(&receiver_ok, Label::kNear);
 
   __ bind(&global_object);
   // TODO(kmillikin): We have a hydrogen value for the global object.  See
@@ -2464,6 +2445,8 @@
   // here.
   __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+  __ mov(receiver,
+         FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2477,10 +2460,10 @@
 
   // Loop through the arguments pushing them onto the execution
   // stack.
-  NearLabel invoke, loop;
+  Label invoke, loop;
   // length is a small non-negative integer, due to the test above.
   __ test(length, Operand(length));
-  __ j(zero, &invoke);
+  __ j(zero, &invoke, Label::kNear);
   __ bind(&loop);
   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   __ dec(length);
@@ -2490,14 +2473,12 @@
   __ bind(&invoke);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
-  v8::internal::ParameterCount actual(eax);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount actual(eax);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
 }
 
 
@@ -2511,6 +2492,12 @@
 }
 
 
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2520,8 +2507,8 @@
 void LCodeGen::DoOuterContext(LOuterContext* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
-  __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+  __ mov(result,
+         Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
 }
 
 
@@ -2541,7 +2528,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2563,26 +2551,29 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(ecx, call_kind);
   if (*function == *info()->closure()) {
     __ CallSelf();
   } else {
     __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   }
 
-  // Setup deoptimization.
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
   __ mov(edi, instr->function());
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
-  Register input_reg = ToRegister(instr->InputAt(0));
+  Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
          factory()->heap_number_map());
   DeoptimizeIf(not_equal, instr->environment());
@@ -2613,7 +2604,8 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+                          instr, instr->context());
 
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -2634,7 +2626,7 @@
 
 
 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
-  Register input_reg = ToRegister(instr->InputAt(0));
+  Register input_reg = ToRegister(instr->value());
   __ test(input_reg, Operand(input_reg));
   Label is_positive;
   __ j(not_sign, &is_positive);
@@ -2659,13 +2651,13 @@
     LUnaryMathOperation* instr_;
   };
 
-  ASSERT(instr->InputAt(0)->Equals(instr->result()));
+  ASSERT(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
   if (r.IsDouble()) {
     XMMRegister  scratch = xmm0;
-    XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ pxor(scratch, scratch);
+    XMMRegister input_reg = ToDoubleRegister(instr->value());
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ pand(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2673,10 +2665,9 @@
   } else {  // Tagged case.
     DeferredMathAbsTaggedHeapNumber* deferred =
         new DeferredMathAbsTaggedHeapNumber(this, instr);
-    Register input_reg = ToRegister(instr->InputAt(0));
+    Register input_reg = ToRegister(instr->value());
     // Smi check.
-    __ test(input_reg, Immediate(kSmiTagMask));
-    __ j(not_zero, deferred->entry());
+    __ JumpIfNotSmi(input_reg, deferred->entry());
     EmitIntegerMathAbs(instr);
     __ bind(deferred->exit());
   }
@@ -2686,62 +2677,102 @@
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
-  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-  __ ucomisd(input_reg, xmm_scratch);
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(below_equal, instr->environment());
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatures::Scope scope(SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize on negative zero.
+      Label non_zero;
+      __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+      __ ucomisd(input_reg, xmm_scratch);
+      __ j(not_equal, &non_zero, Label::kNear);
+      __ movmskpd(output_reg, input_reg);
+      __ test(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr->environment());
+      __ bind(&non_zero);
+    }
+    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+    __ cvttsd2si(output_reg, Operand(xmm_scratch));
+    // Overflow is signalled with minint.
+    __ cmp(output_reg, 0x80000000u);
+    DeoptimizeIf(equal, instr->environment());
   } else {
+    Label done;
+    // Deoptimize on negative numbers.
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
     DeoptimizeIf(below, instr->environment());
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Check for negative zero.
+      Label positive_sign;
+      __ j(above, &positive_sign, Label::kNear);
+      __ movmskpd(output_reg, input_reg);
+      __ test(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr->environment());
+      __ Set(output_reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+      __ bind(&positive_sign);
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, Operand(input_reg));
+
+    // Overflow is signalled with minint.
+    __ cmp(output_reg, 0x80000000u);
+    DeoptimizeIf(equal, instr->environment());
+    __ bind(&done);
   }
-
-  // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, Operand(input_reg));
-
-  // Overflow is signalled with minint.
-  __ cmp(output_reg, 0x80000000u);
-  DeoptimizeIf(equal, instr->environment());
 }
 
-
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
-  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
 
+  Label below_half, done;
   // xmm_scratch = 0.5
   ExternalReference one_half = ExternalReference::address_of_one_half();
   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
-
-  // input = input + 0.5
-  __ addsd(input_reg, xmm_scratch);
-
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_half);
+  // xmm_scratch = input + 0.5
+  __ addsd(xmm_scratch, input_reg);
 
   // Compute Math.floor(value + 0.5).
   // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, Operand(input_reg));
+  __ cvttsd2si(output_reg, Operand(xmm_scratch));
 
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x80000000u);
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ movmskpd(output_reg, input_reg);
+    __ test(output_reg, Immediate(1));
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    // If the input is >= -0.5, we return +0.
+    __ mov(output_reg, Immediate(0xBF000000));
+    __ movd(xmm_scratch, Operand(output_reg));
+    __ cvtss2sd(xmm_scratch, xmm_scratch);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ Set(output_reg, Immediate(0));
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2749,9 +2780,9 @@
 
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   XMMRegister xmm_scratch = xmm0;
-  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2786,8 +2817,7 @@
     Register right_reg = ToRegister(right);
 
     Label non_smi, call;
-    __ test(right_reg, Immediate(kSmiTagMask));
-    __ j(not_zero, &non_smi);
+    __ JumpIfNotSmi(right_reg, &non_smi);
     __ SmiUntag(right_reg);
     __ cvtsi2sd(result_reg, Operand(right_reg));
     __ jmp(&call);
@@ -2818,22 +2848,23 @@
 
 
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
-  ASSERT(instr->InputAt(0)->Equals(instr->result()));
-  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  NearLabel positive, done, zero, negative;
-  __ xorpd(xmm0, xmm0);
+  ASSERT(instr->value()->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  Label positive, done, zero;
+  __ xorps(xmm0, xmm0);
   __ ucomisd(input_reg, xmm0);
-  __ j(above, &positive);
-  __ j(equal, &zero);
-  ExternalReference nan = ExternalReference::address_of_nan();
+  __ j(above, &positive, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  ExternalReference nan =
+      ExternalReference::address_of_canonical_non_hole_nan();
   __ movdbl(input_reg, Operand::StaticVariable(nan));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&zero);
   __ push(Immediate(0xFFF00000));
   __ push(Immediate(0));
   __ movdbl(input_reg, Operand(esp, 0));
   __ add(Operand(esp), Immediate(kDoubleSize));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&positive);
   __ fldln2();
   __ sub(Operand(esp), Immediate(kDoubleSize));
@@ -2851,7 +2882,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2859,7 +2890,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2896,15 +2927,29 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->
-      ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2913,10 +2958,11 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->
-      ComputeCallInitialize(arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, mode, instr);
 }
 
 
@@ -2925,8 +2971,8 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ Drop(1);
 }
 
@@ -2936,17 +2982,18 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->
-      ComputeCallInitialize(arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, mode, instr);
 }
 
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
   __ mov(edi, instr->target());
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -2957,12 +3004,12 @@
 
   Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ Set(eax, Immediate(instr->arity()));
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
+  CallRuntime(instr->function(), instr->arity(), instr);
 }
 
 
@@ -3005,58 +3052,54 @@
   Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
-  DeoptimizeIf(above_equal, instr->environment());
+  if (instr->index()->IsConstantOperand()) {
+    __ cmp(ToOperand(instr->length()),
+           ToImmediate(LConstantOperand::cast(instr->index())));
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+    DeoptimizeIf(above_equal, instr->environment());
+  }
 }
 
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+                                        instr->key(), elements_kind, 0));
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
-    __ movss(Operand(external_pointer, key, times_4, 0), xmm0);
+    __ movss(operand, xmm0);
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    __ movdbl(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value = ToRegister(instr->value());
-    switch (array_type) {
-      case kExternalPixelArray: {
-        // Clamp the value to [0..255].
-        Register temp = ToRegister(instr->TempAt(0));
-        // The dec_b below requires that the clamped value is in a byte
-        // register. eax is an arbitrary choice to satisfy this requirement, we
-        // hinted the register allocator to give us eax when building the
-        // instruction.
-        ASSERT(temp.is(eax));
-        __ mov(temp, ToRegister(instr->value()));
-        NearLabel done;
-        __ test(temp, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
-        __ setcc(negative, temp);  // 1 if negative, 0 if positive.
-        __ dec_b(temp);  // 0 if negative, 255 if positive.
-        __ bind(&done);
-        __ mov_b(Operand(external_pointer, key, times_1, 0), temp);
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ mov_b(operand, value);
         break;
-      }
-      case kExternalByteArray:
-      case kExternalUnsignedByteArray:
-        __ mov_b(Operand(external_pointer, key, times_1, 0), value);
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ mov_w(operand, value);
         break;
-      case kExternalShortArray:
-      case kExternalUnsignedShortArray:
-        __ mov_w(Operand(external_pointer, key, times_2, 0), value);
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ mov(operand, value);
         break;
-      case kExternalIntArray:
-      case kExternalUnsignedIntArray:
-        __ mov(Operand(external_pointer, key, times_4, 0), value);
-        break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3096,6 +3139,26 @@
 }
 
 
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  XMMRegister value = ToDoubleRegister(instr->value());
+  Label have_value;
+
+  __ ucomisd(value, value);
+  __ j(parity_odd, &have_value);  // NaN.
+
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+  __ bind(&have_value);
+
+  Operand double_store_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+  __ movdbl(double_store_operand, value);
+}
+
+
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->object()).is(edx));
@@ -3105,7 +3168,7 @@
   Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -3120,95 +3183,80 @@
   };
 
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  NearLabel flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
 
-  // Handle non-flat strings.
-  __ test(result, Immediate(kIsConsStringMask));
-  __ j(zero, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, Operand(result));
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  __ bind(&cons_string);
   __ cmp(FieldOperand(string, ConsString::kSecondOffset),
          Immediate(factory()->empty_string()));
   __ j(not_equal, deferred->entry());
-  // Get the first of the two strings and load its instance type.
   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result, Immediate(kStringRepresentationMask));
   __ j(not_zero, deferred->entry());
 
-  // Check for ASCII or two-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
+  Label done;
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzx_w(result,
-               FieldOperand(string,
-                            SeqTwoByteString::kHeaderSize +
-                            (kUC16Size * const_index)));
-  } else {
-    __ movzx_w(result, FieldOperand(string,
-                                    index,
-                                    times_2,
-                                    SeqTwoByteString::kHeaderSize));
-  }
-  __ jmp(&done);
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzx_b(result, FieldOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ movzx_b(result, FieldOperand(string,
-                                    index,
-                                    times_1,
-                                    SeqAsciiString::kHeaderSize));
-  }
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
   __ bind(&done);
   __ bind(deferred->exit());
 }
@@ -3236,7 +3284,8 @@
     __ SmiTag(index);
     __ push(index);
   }
-  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+                          instr, instr->context());
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(eax);
   }
@@ -3287,7 +3336,7 @@
   PushSafepointRegistersScope scope(this);
   __ SmiTag(char_code);
   __ push(char_code);
-  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
 }
 
@@ -3299,6 +3348,22 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  if (instr->left()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->left()));
+  } else {
+    __ push(ToOperand(instr->left()));
+  }
+  if (instr->right()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->right()));
+  } else {
+    __ push(ToOperand(instr->right()));
+  }
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -3340,13 +3405,13 @@
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
   // the value in there. If that fails, call the runtime system.
-  NearLabel done;
+  Label done;
   __ SmiUntag(reg);
   __ xor_(reg, 0x80000000);
   __ cvtsi2sd(xmm0, Operand(reg));
   if (FLAG_inline_new) {
     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
   }
 
   // Slow case: Call the runtime system to do the number allocation.
@@ -3356,8 +3421,15 @@
   // register is stored, as this register is in the pointer map, but contains an
   // integer value.
   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   if (!reg.is(eax)) __ mov(reg, eax);
 
   // Done. Put the value in xmm0 into the value of the allocated heap
@@ -3401,7 +3473,15 @@
   __ Set(reg, Immediate(0));
 
   PushSafepointRegistersScope scope(this);
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(reg, eax);
 }
 
@@ -3429,11 +3509,10 @@
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 LEnvironment* env) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
   // Smi check.
-  __ test(input_reg, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi, not_taken);
+  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
 
   // Heap number map check.
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3441,21 +3520,23 @@
   if (deoptimize_on_undefined) {
     DeoptimizeIf(not_equal, env);
   } else {
-    NearLabel heap_number;
-    __ j(equal, &heap_number);
+    Label heap_number;
+    __ j(equal, &heap_number, Label::kNear);
+
     __ cmp(input_reg, factory()->undefined_value());
     DeoptimizeIf(not_equal, env);
 
     // Convert undefined to NaN.
-    ExternalReference nan = ExternalReference::address_of_nan();
+    ExternalReference nan =
+        ExternalReference::address_of_canonical_non_hole_nan();
     __ movdbl(result_reg, Operand::StaticVariable(nan));
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
   }
   // Heap number to XMM conversion.
   __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
   __ bind(&load_smi);
@@ -3477,7 +3558,7 @@
 
 
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  NearLabel done, heap_number;
+  Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
 
   // Heap number map check.
@@ -3485,18 +3566,18 @@
          factory()->heap_number_map());
 
   if (instr->truncating()) {
-    __ j(equal, &heap_number);
+    __ j(equal, &heap_number, Label::kNear);
     // Check for undefined. Undefined is converted to zero for truncating
     // conversions.
     __ cmp(input_reg, factory()->undefined_value());
     DeoptimizeIf(not_equal, instr->environment());
     __ mov(input_reg, 0);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
     if (CpuFeatures::IsSupported(SSE3)) {
       CpuFeatures::Scope scope(SSE3);
-      NearLabel convert;
+      Label convert;
       // Use more powerful conversion when sse3 is available.
       // Load x87 register with heap number.
       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -3506,7 +3587,7 @@
       const uint32_t kTooBigExponent =
           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
-      __ j(less, &convert);
+      __ j(less, &convert, Label::kNear);
       // Pop FPU stack before deoptimizing.
       __ ffree(0);
       __ fincstp();
@@ -3520,7 +3601,6 @@
       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
       __ add(Operand(esp), Immediate(kDoubleSize));
     } else {
-      NearLabel deopt;
       XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
       __ cvttsd2si(input_reg, Operand(xmm0));
@@ -3567,8 +3647,7 @@
   DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
 
   // Smi check.
-  __ test(input_reg, Immediate(kSmiTagMask));
-  __ j(not_zero, deferred->entry());
+  __ JumpIfNotSmi(input_reg, deferred->entry());
 
   // Smi to int32 conversion
   __ SmiUntag(input_reg);  // Untag smi.
@@ -3609,8 +3688,8 @@
     if (CpuFeatures::IsSupported(SSE3)) {
       // This will deoptimize if the exponent of the input in out of range.
       CpuFeatures::Scope scope(SSE3);
-      NearLabel convert, done;
-      __ j(not_equal, &done);
+      Label convert, done;
+      __ j(not_equal, &done, Label::kNear);
       __ sub(Operand(esp), Immediate(kDoubleSize));
       __ movdbl(Operand(esp, 0), input_reg);
       // Get exponent alone and check for too-big exponent.
@@ -3619,7 +3698,7 @@
       const uint32_t kTooBigExponent =
           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
       __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
-      __ j(less, &convert);
+      __ j(less, &convert, Label::kNear);
       __ add(Operand(esp), Immediate(kDoubleSize));
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&convert);
@@ -3630,13 +3709,13 @@
       __ add(Operand(esp), Immediate(kDoubleSize));
       __ bind(&done);
     } else {
-      NearLabel done;
+      Label done;
       Register temp_reg = ToRegister(instr->TempAt(0));
       XMMRegister xmm_scratch = xmm0;
 
       // If cvttsd2si succeeded, we're done. Otherwise, we attempt
       // manual conversion.
-      __ j(not_equal, &done);
+      __ j(not_equal, &done, Label::kNear);
 
       // Get high 32 bits of the input in result_reg and temp_reg.
       __ pshufd(xmm_scratch, input_reg, 1);
@@ -3686,7 +3765,7 @@
       __ bind(&done);
     }
   } else {
-    NearLabel done;
+    Label done;
     __ cvttsd2si(result_reg, Operand(input_reg));
     __ cvtsi2sd(xmm0, Operand(result_reg));
     __ ucomisd(xmm0, input_reg);
@@ -3696,7 +3775,7 @@
       // The integer converted back is equal to the original. We
       // only have to test if we got -0 as an input.
       __ test(result_reg, Operand(result_reg));
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       __ movmskpd(result_reg, input_reg);
       // Bit 0 contains the sign of the double in input_reg.
       // If input was positive, we are ok and return 0, otherwise
@@ -3711,14 +3790,14 @@
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->InputAt(0);
-  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  __ test(ToOperand(input), Immediate(kSmiTagMask));
   DeoptimizeIf(not_zero, instr->environment());
 }
 
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   LOperand* input = instr->InputAt(0);
-  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  __ test(ToOperand(input), Immediate(kSmiTagMask));
   DeoptimizeIf(zero, instr->environment());
 }
 
@@ -3726,29 +3805,43 @@
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
             static_cast<int8_t>(first));
-    DeoptimizeIf(not_equal, instr->environment());
-  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
-    // String has a dedicated bit in instance type.
-    __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
-    DeoptimizeIf(not_zero, instr->environment());
-  } else  {
-    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-            static_cast<int8_t>(first));
-    DeoptimizeIf(below, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-              static_cast<int8_t>(last));
-      DeoptimizeIf(above, instr->environment());
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+                static_cast<int8_t>(last));
+        DeoptimizeIf(above, instr->environment());
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+    } else {
+      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+      __ and_(temp, mask);
+      __ cmpb(Operand(temp), tag);
+      DeoptimizeIf(not_equal, instr->environment());
     }
   }
 }
@@ -3756,8 +3849,8 @@
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
   ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  __ cmp(reg, instr->hydrogen()->target());
+  Operand operand = ToOperand(instr->InputAt(0));
+  __ cmp(operand, instr->hydrogen()->target());
   DeoptimizeIf(not_equal, instr->environment());
 }
 
@@ -3772,6 +3865,54 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  Label is_smi, done, heap_number;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr->environment());
+  __ mov(input_reg, 0);
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(input_reg);
+  __ ClampUint8(input_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
   if (isolate()->heap()->InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
@@ -3811,6 +3952,7 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
   // Setup the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
@@ -3824,16 +3966,16 @@
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -3855,12 +3997,9 @@
 
   // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
   } else {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow,
-                4,
-                instr,
-                CONTEXT_ADJUSTED);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   }
 }
 
@@ -3868,24 +4007,26 @@
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
   __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  NearLabel materialized;
+  ASSERT(ToRegister(instr->context()).is(esi));
+  Label materialized;
   // Registers will be used as follows:
   // edi = JS function.
   // ecx = literals array.
   // ebx = regexp literal.
   // eax = regexp literal clone.
+  // esi = context.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
   int literal_offset = FixedArray::kHeaderSize +
       instr->hydrogen()->literal_index() * kPointerSize;
   __ mov(ebx, FieldOperand(ecx, literal_offset));
   __ cmp(ebx, factory()->undefined_value());
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in eax.
@@ -3893,7 +4034,7 @@
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   __ push(Immediate(instr->hydrogen()->pattern()));
   __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -3905,7 +4046,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -3925,6 +4066,7 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
@@ -3933,49 +4075,26 @@
     FastNewClosureStub stub(
         shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ push(Immediate(shared_info));
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
     __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(Immediate(shared_info));
     __ push(Immediate(pretenure
                       ? factory()->true_value()
                       : factory()->false_value()));
-    CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
-  LOperand* input = instr->InputAt(0);
+  LOperand* input = instr->InputAt(1);
   if (input->IsConstantOperand()) {
     __ push(ToImmediate(input));
   } else {
     __ push(ToOperand(input));
   }
-  CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Label true_label;
-  Label false_label;
-  NearLabel done;
-
-  Condition final_branch_condition = EmitTypeofIs(&true_label,
-                                                  &false_label,
-                                                  input,
-                                                  instr->type_literal());
-  __ j(final_branch_condition, &true_label);
-  __ bind(&false_label);
-  __ mov(result, factory()->false_value());
-  __ jmp(&done);
-
-  __ bind(&true_label);
-  __ mov(result, factory()->true_value());
-
-  __ bind(&done);
+  CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
 
@@ -4020,6 +4139,10 @@
     __ cmp(input, factory()->false_value());
     final_branch_condition = equal;
 
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ cmp(input, factory()->null_value());
+    final_branch_condition = equal;
+
   } else if (type_name->Equals(heap()->undefined_symbol())) {
     __ cmp(input, factory()->undefined_value());
     __ j(equal, true_label);
@@ -4031,22 +4154,21 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label);
-    // Regular expressions => 'function' (they are callable).
-    __ CmpInstanceType(input, JS_REGEXP_TYPE);
-    final_branch_condition = equal;
+    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+    final_branch_condition = above_equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
-    __ cmp(input, factory()->null_value());
-    __ j(equal, true_label);
-    // Regular expressions => 'function', not 'object'.
-    __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+    if (!FLAG_harmony_typeof) {
+      __ cmp(input, factory()->null_value());
+      __ j(equal, true_label);
+    }
+    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
     __ j(below, false_label);
-    __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
-    __ j(above_equal, false_label);
+    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ j(above, false_label);
     // Check for undetectable objects => false.
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
@@ -4062,25 +4184,6 @@
 }
 
 
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
-  Register result = ToRegister(instr->result());
-  NearLabel true_label;
-  NearLabel false_label;
-  NearLabel done;
-
-  EmitIsConstructCall(result);
-  __ j(equal, &true_label);
-
-  __ mov(result, factory()->false_value());
-  __ jmp(&done);
-
-  __ bind(&true_label);
-  __ mov(result, factory()->true_value());
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp = ToRegister(instr->TempAt(0));
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4096,10 +4199,10 @@
   __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  NearLabel check_frame_marker;
+  Label check_frame_marker;
   __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker);
+  __ j(not_equal, &check_frame_marker, Label::kNear);
   __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
@@ -4109,9 +4212,27 @@
 }
 
 
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    while (padding_size-- > 0) {
+      __ nop();
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  // No code for lazy bailout instruction. Used to capture environment after a
-  // call for populating the safepoint data with deoptimization data.
+  EnsureSpaceForLazyDeopt();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
@@ -4131,32 +4252,76 @@
   }
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
   // Create safepoint generator that will also ensure enough space in the
   // reloc info for patching in deoptimization (since this is invoking a
   // builtin)
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  // Perform stack overflow check.
-  NearLabel done;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &done);
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LStackCheck* instr_;
+  };
 
-  StackCheckStub stub;
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
-  __ bind(&done);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &done, Label::kNear);
+
+    ASSERT(instr->context()->IsRegister());
+    ASSERT(ToRegister(instr->context()).is(esi));
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt();
+    __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(below, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt();
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
 }
 
 
@@ -4171,12 +4336,34 @@
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(osr_pc_offset_ == -1);
   osr_pc_offset_ = masm()->pc_offset();
 }
 
 
+void LCodeGen::DoIn(LIn* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  if (obj->IsConstantOperand()) {
+    __ push(ToImmediate(obj));
+  } else {
+    __ push(ToOperand(obj));
+  }
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index bdccd3c..d955450 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -60,7 +60,7 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        deoptimization_reloc_size(),
+        last_lazy_deopt_pc_(0),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -97,14 +97,15 @@
   void DoDeferredNumberTagI(LNumberTagI* instr);
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
-  void DoDeferredStackCheck(LGoto* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                        Label* map_check);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -138,7 +139,6 @@
   HGraph* graph() const { return chunk_->graph(); }
 
   int GetNextEmittedBlock(int block);
-  LInstruction* GetNextInstruction();
 
   void EmitClassOfTest(Label* if_true,
                        Label* if_false,
@@ -147,8 +147,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -165,11 +165,6 @@
   bool GenerateRelocPadding();
   bool GenerateSafepointTable();
 
-  enum ContextMode {
-    RESTORE_CONTEXT,
-    CONTEXT_ADJUSTED
-  };
-
   enum SafepointMode {
     RECORD_SIMPLE_SAFEPOINT,
     RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -177,44 +172,43 @@
 
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
-                LInstruction* instr,
-                ContextMode context_mode);
+                LInstruction* instr);
 
   void CallCodeGeneric(Handle<Code> code,
                        RelocInfo::Mode mode,
                        LInstruction* instr,
-                       ContextMode context_mode,
                        SafepointMode safepoint_mode);
 
   void CallRuntime(const Runtime::Function* fun,
                    int argc,
-                   LInstruction* instr,
-                   ContextMode context_mode);
+                   LInstruction* instr);
 
   void CallRuntime(Runtime::FunctionId id,
                    int argc,
-                   LInstruction* instr,
-                   ContextMode context_mode) {
+                   LInstruction* instr) {
     const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, argc, instr, context_mode);
+    CallRuntime(function, argc, instr);
   }
 
   void CallRuntimeFromDeferred(Runtime::FunctionId id,
                                int argc,
-                               LInstruction* instr);
+                               LInstruction* instr,
+                               LOperand* context);
 
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr,
-                                  SafepointMode safepoint_mode);
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
 
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
   void AddToTranslation(Translation* translation,
@@ -228,6 +222,10 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
+  Operand BuildFastArrayOperand(LOperand* elements_pointer,
+                                LOperand* key,
+                                ElementsKind elements_kind,
+                                uint32_t offset);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -244,16 +242,16 @@
   void RecordSafepoint(LPointerMap* pointers,
                        Safepoint::Kind kind,
                        int arguments,
-                       int deoptimization_index);
-  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
-  void RecordSafepoint(int deoptimization_index);
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
-                                    int deoptimization_index);
+                                    Safepoint::DeoptMode mode);
   void RecordPosition(int position);
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
-  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
@@ -272,7 +270,6 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsObject(Register input,
                          Register temp1,
-                         Register temp2,
                          Label* is_not_object,
                          Label* is_object);
 
@@ -280,10 +277,11 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+  void EnsureSpaceForLazyDeopt();
 
   LChunk* const chunk_;
   MacroAssembler* const masm_;
@@ -300,13 +298,7 @@
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
-
-  struct DeoptimizationRelocSize {
-    int min_size;
-    int last_pc_offset;
-  };
-
-  DeoptimizationRelocSize deoptimization_reloc_size;
+  int last_lazy_deopt_pc_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 3d1da40..fcf1f91 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -305,16 +305,24 @@
   } else if (source->IsConstantOperand()) {
     ASSERT(destination->IsRegister() || destination->IsStackSlot());
     Immediate src = cgen_->ToImmediate(source);
-    Operand dst = cgen_->ToOperand(destination);
-    __ mov(dst, src);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ Set(dst, src);
+    } else {
+      Operand dst = cgen_->ToOperand(destination);
+      __ Set(dst, src);
+    }
 
   } else if (source->IsDoubleRegister()) {
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
     XMMRegister src = cgen_->ToDoubleRegister(source);
-    Operand dst = cgen_->ToOperand(destination);
-    __ movdbl(dst, src);
-
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movdbl(dst, src);
+    }
   } else if (source->IsDoubleStackSlot()) {
     ASSERT(destination->IsDoubleRegister() ||
            destination->IsDoubleStackSlot());
@@ -391,13 +399,19 @@
       __ mov(dst, tmp1);
       __ mov(src, tmp0);
     }
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    XMMRegister dst = cgen_->ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
-    // XMM register-register or register-memory.  We rely on having xmm0
+    // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
-    ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
+    ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
                                                   ? source
                                                   : destination);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4b10562..3dc220d 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
-  for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
-  for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -114,21 +113,18 @@
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  inputs_.PrintOperandsTo(stream);
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
 }
 
 
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
-  for (int i = 0; i < N; i++) {
+  for (int i = 0; i < results_.length(); i++) {
     if (i > 0) stream->Add(" ");
-    elems_[i]->PrintTo(stream);
+    results_[i]->PrintTo(stream);
   }
 }
 
@@ -240,6 +236,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -264,12 +267,6 @@
 }
 
 
-void LTypeofIs::PrintDataTo(StringStream* stream) {
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
 void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if typeof ");
   InputAt(0)->PrintTo(stream);
@@ -303,19 +300,28 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" ");
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[ecx] #%d / ", arity());
 }
 
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
@@ -332,13 +338,6 @@
 }
 
 
-void LClassOfTest::PrintDataTo(StringStream* stream) {
-  stream->Add("= class_of_test(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
 void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
   arguments()->PrintTo(stream);
 
@@ -379,8 +378,7 @@
     LLabel* label = LLabel::cast(first_instr);
     if (last_instr->IsGoto()) {
       LGoto* goto_instr = LGoto::cast(last_instr);
-      if (!goto_instr->include_stack_check() &&
-          label->IsRedundant() &&
+      if (label->IsRedundant() &&
           !label->is_loop_header()) {
         bool can_eliminate = true;
         for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -431,6 +429,15 @@
 }
 
 
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
 void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add("[");
@@ -441,7 +448,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -533,7 +540,8 @@
 
 void LChunkBuilder::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LChunk building in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -789,6 +797,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new LDeoptimize);
 }
@@ -808,9 +821,10 @@
     ASSERT(instr->left()->representation().IsTagged());
     ASSERT(instr->right()->representation().IsTagged());
 
+    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
+    LArithmeticT* result = new LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 }
@@ -822,18 +836,19 @@
     ASSERT(instr->left()->representation().IsTagged());
     ASSERT(instr->right()->representation().IsTagged());
 
+    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
+    LArithmeticT* result = new LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 
   ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
-  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
-  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
 
-  HValue* right_value = instr->OperandAt(1);
+  HValue* right_value = instr->right();
   LOperand* right = NULL;
   int constant_value = 0;
   if (right_value->IsConstant()) {
@@ -844,24 +859,22 @@
     right = UseFixed(right_value, ecx);
   }
 
-  // Shift operations can only deoptimize if we do a logical shift
-  // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  // Shift operations can only deoptimize if we do a logical shift by 0 and
+  // the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
-  LShiftI* result = new LShiftI(op, left, right, can_deopt);
-  return can_deopt
-      ? AssignEnvironment(DefineSameAsFirst(result))
-      : DefineSameAsFirst(result);
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -889,12 +902,15 @@
   HValue* right = instr->right();
   ASSERT(left->representation().IsTagged());
   ASSERT(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
-  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result =
+      new LArithmeticT(op, context, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
+
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
   ASSERT(is_building());
   current_block_ = block;
@@ -971,18 +987,7 @@
     if (FLAG_stress_environments && !instr->HasEnvironment()) {
       instr = AssignEnvironment(instr);
     }
-    if (current->IsTest() && !instr->IsGoto()) {
-      ASSERT(instr->IsControl());
-      HTest* test = HTest::cast(current);
-      instr->set_hydrogen_value(test->value());
-      HBasicBlock* first = test->FirstSuccessor();
-      HBasicBlock* second = test->SecondSuccessor();
-      ASSERT(first != NULL && second != NULL);
-      instr->SetBranchTargets(first->block_id(), second->block_id());
-    } else {
-      instr->set_hydrogen_value(current);
-    }
-
+    instr->set_hydrogen_value(current);
     chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
@@ -1004,6 +1009,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1021,116 +1028,27 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
-                            instr->include_stack_check());
-  return (instr->include_stack_check())
-      ? AssignPointerMap(result)
-      : result;
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
   HValue* v = instr->value();
   if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister(),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseOrConstantAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
-        LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
-        LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
-                                                    right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
-                                           TempRegister());
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      // We only need a temp register for non-strict compare.
-      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  temp);
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = TempRegister();
-      return new LIsObjectAndBranch(UseRegister(compare->value()),
-                                    temp1,
-                                    temp2);
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
-      LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
-      LOperand* context = UseFixed(instance_of->context(), esi);
-      LInstanceOfAndBranch* result =
-          new LInstanceOfAndBranch(context, left, right);
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
-    } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
-    }
+    ASSERT(v->IsConstant());
+    ASSERT(!v->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
   }
-  return new LBranch(UseRegisterAtStart(v));
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  // We need a temporary register when we have to access the map *or* we have
+  // no type info yet, in which case we handle all cases (including the ones
+  // involving maps).
+  bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
+  LOperand* temp = needs_temp ? TempRegister() : NULL;
+  return AssignEnvironment(new LBranch(UseRegister(v), temp));
 }
 
 
@@ -1164,7 +1082,8 @@
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
       new LInstanceOfKnownGlobal(
-          UseFixed(instr->value(), InstanceofStub::left()),
+          UseFixed(instr->context(), esi),
+          UseFixed(instr->left(), InstanceofStub::left()),
           FixedTemp(edi));
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
@@ -1192,8 +1111,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1222,21 +1146,33 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog) {
     ASSERT(instr->representation().IsDouble());
     ASSERT(instr->value()->representation().IsDouble());
+    LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     return DefineSameAsFirst(result);
   } else if (op == kMathSin || op == kMathCos) {
+    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1307,7 +1243,8 @@
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
 }
 
 
@@ -1487,71 +1424,86 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
   Token::Value op = instr->token();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  bool reversed = (op == Token::GT || op == Token::LTE);
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+  LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+  LCmpT* result = new LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
   Representation r = instr->GetInputRepresentation();
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else if (r.IsDouble()) {
+    return new LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else {
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-    bool reversed = (op == Token::GT || op == Token::LTE);
-    LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
-    LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
-    LCmpT* result = new LCmpT(left, right);
-    return MarkAsCall(DefineFixed(result, eax), instr);
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
 
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
-    HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
-  LOperand* right = UseRegisterAtStart(instr->right());
-  LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
-  return DefineAsRegister(result);
+  LOperand* right = UseAtStart(instr->right());
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new LIsNull(value));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+  HCompareConstantEqAndBranch* instr) {
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-
-  return DefineAsRegister(new LIsObject(value, TempRegister()));
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+  // We only need a temp register for non-strict compare.
+  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseAtStart(instr->value());
-
-  return DefineAsRegister(new LIsSmi(value));
+  LOperand* temp = TempRegister();
+  return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
 }
 
 
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
+  return new LIsSmiAndBranch(Use(instr->value()));
+}
 
-  return DefineAsRegister(new LHasInstanceType(value));
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr  ->value()->representation().IsTagged());
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
+                                       TempRegister());
 }
 
 
@@ -1564,20 +1516,20 @@
 }
 
 
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
-    HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-
-  return DefineAsRegister(new LHasCachedArrayIndex(value));
+  return new LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseTempRegister(instr->value());
-
-  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister(),
+                                   TempRegister());
 }
 
 
@@ -1587,16 +1539,16 @@
 }
 
 
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
-LInstruction* LChunkBuilder::DoExternalArrayLength(
-    HExternalArrayLength* instr) {
-  LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LExternalArrayLength(array));
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
@@ -1608,8 +1560,9 @@
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
-                                            Use(instr->length())));
+  return AssignEnvironment(new LBoundsCheck(
+      UseRegisterOrConstantAtStart(instr->index()),
+      UseAtStart(instr->length())));
 }
 
 
@@ -1621,8 +1574,22 @@
 
 
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), eax);
-  return MarkAsCall(new LThrow(value), instr);
+  return MarkAsCall(new LThrow(context, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
 }
 
 
@@ -1639,8 +1606,9 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
+        bool truncating = instr->CanTruncateToInt32();
         LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+            (truncating && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1660,8 +1628,8 @@
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
-      bool needs_temp = instr->CanTruncateToInt32() &&
-          !CpuFeatures::IsSupported(SSE3);
+      bool truncating = instr->CanTruncateToInt32();
+      bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1688,7 +1656,7 @@
 
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = UseAtStart(instr->value());
   return AssignEnvironment(new LCheckNonSmi(value));
 }
 
@@ -1709,13 +1677,13 @@
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = UseAtStart(instr->value());
   return AssignEnvironment(new LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = UseAtStart(instr->value());
   return AssignEnvironment(new LCheckFunction(value));
 }
 
@@ -1727,6 +1695,55 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    return DefineAsRegister(new LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    LOperand* reg = UseFixed(value, eax);
+    return DefineFixed(new LClampIToUint8(reg), eax);
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseFixed(value, eax);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* temp = FixedTemp(xmm1);
+    LClampTToUint8* result = new LClampTToUint8(reg, temp);
+    return AssignEnvironment(DefineFixed(result, eax));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+
+  LInstruction* result;
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    LOperand* temp_reg =
+        CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
+    result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseRegister(value);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* xmm_temp =
+        CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
+    result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+  }
+  return AssignEnvironment(result);
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), eax));
 }
@@ -1817,13 +1834,16 @@
 LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
     HLoadNamedFieldPolymorphic* instr) {
   ASSERT(instr->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), eax);
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new LLoadNamedFieldPolymorphic(context, obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1863,28 +1883,45 @@
   ASSERT(instr->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineSameAsFirst(result));
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
-  ExternalArrayType array_type = instr->array_type();
+  ElementsKind elements_kind = instr->elements_kind();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
-  return (array_type == kExternalUnsignedIntArray)
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS)
       ? AssignEnvironment(load_instr)
       : load_instr;
 }
@@ -1919,29 +1956,40 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
-  ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ElementsKind elements_kind = instr->elements_kind();
+    ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
-  LOperand* temp = NULL;
-
-  if (array_type == kExternalPixelArray) {
-    // The generated code for pixel array stores requires that the clamped value
-    // is in a byte register. eax is an arbitrary choice to satisfy this
-    // requirement.
-    temp = FixedTemp(eax);
-  }
-
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LOperand* val = NULL;
-  if (array_type == kExternalByteArray ||
-      array_type == kExternalUnsignedByteArray) {
+  if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+      elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // We need a byte register in this case for the value.
     val = UseFixed(instr->value(), eax);
   } else {
@@ -1950,8 +1998,7 @@
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
-                                                val,
-                                                temp);
+                                                val);
 }
 
 
@@ -2002,17 +2049,28 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  LStringAdd* string_add = new LStringAdd(context, left, right);
+  return MarkAsCall(DefineFixed(string_add, eax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
-  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new LStringCharFromCode(char_code);
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
@@ -2024,7 +2082,8 @@
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
 }
 
 
@@ -2035,18 +2094,22 @@
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = UseAtStart(instr->object());
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LDeleteProperty* result = new LDeleteProperty(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2108,18 +2171,21 @@
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* value = UseAtStart(instr->value());
+  LTypeof* result = new LTypeof(context, value);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
-  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
-  return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2156,7 +2222,14 @@
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  return MarkAsCall(new LStackCheck, instr);
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), esi);
+    return MarkAsCall(new LStackCheck(context), instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+  }
 }
 
 
@@ -2165,8 +2238,8 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2180,6 +2253,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LOperand* object = UseOrConstantAtStart(instr->object());
+  LIn* result = new LIn(context, key, object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index be5658b..b0ab6b4 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -32,6 +32,7 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
@@ -39,12 +40,6 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
@@ -73,15 +68,15 @@
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
-  V(ClassOfTest)                                \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTestAndBranch)                       \
-  V(CmpID)                                      \
   V(CmpIDAndBranch)                             \
-  V(CmpJSObjectEq)                              \
-  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpObjectEqAndBranch)                       \
   V(CmpMapAndBranch)                            \
   V(CmpT)                                       \
-  V(CmpTAndBranch)                              \
+  V(CmpConstantEqAndBranch)                     \
   V(ConstantD)                                  \
   V(ConstantI)                                  \
   V(ConstantT)                                  \
@@ -90,30 +85,26 @@
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
-  V(ExternalArrayLength)                        \
-  V(FixedArrayLength)                           \
+  V(ElementsKind)                               \
+  V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceType)                            \
   V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
-  V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
-  V(IsNull)                                     \
-  V(IsNullAndBranch)                            \
-  V(IsObject)                                   \
-  V(IsObjectAndBranch)                          \
-  V(IsSmi)                                      \
-  V(IsSmiAndBranch)                             \
-  V(IsConstructCall)                            \
+  V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
+  V(IsNullAndBranch)                            \
+  V(IsObjectAndBranch)                          \
+  V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -124,6 +115,7 @@
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
+  V(LoadKeyedFastDoubleElement)                 \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
   V(LoadNamedField)                             \
@@ -149,40 +141,38 @@
   V(StoreContextSlot)                           \
   V(StoreGlobalCell)                            \
   V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
   V(SubI)                                       \
   V(TaggedToI)                                  \
+  V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
   V(Typeof)                                     \
-  V(TypeofIs)                                   \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -204,13 +194,27 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
-  virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
@@ -266,37 +270,6 @@
 };
 
 
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
-  OperandContainer() {
-    for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
-  }
-  int length() { return NumElements; }
-  ElementType& operator[](int i) {
-    ASSERT(i < length());
-    return elems_[i];
-  }
-  void PrintOperandsTo(StringStream* stream);
-
- private:
-  ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
-  int length() { return 0; }
-  void PrintOperandsTo(StringStream* stream) { }
-  ElementType& operator[](int i) {
-    UNREACHABLE();
-    static ElementType t = 0;
-    return t;
-  }
-};
-
-
 // R = number of result operands (0 or 1).
 // I = number of input operands.
 // T = number of temporary operands.
@@ -319,24 +292,28 @@
   virtual void PrintOutputOperandTo(StringStream* stream);
 
  protected:
-  OperandContainer<LOperand*, R> results_;
-  OperandContainer<LOperand*, I> inputs_;
-  OperandContainer<LOperand*, T> temps_;
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
 };
 
 
 class LGap: public LTemplateInstruction<0, 0, 0> {
  public:
-  explicit LGap(HBasicBlock* block)
-      : block_(block) {
+  explicit LGap(HBasicBlock* block) : block_(block) {
     parallel_moves_[BEFORE] = NULL;
     parallel_moves_[START] = NULL;
     parallel_moves_[END] = NULL;
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
   virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -366,37 +343,32 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
-  LGoto(int block_id, bool include_stack_check = false)
-    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+  explicit LGoto(int block_id) : block_id_(block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
   virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int block_id() const { return block_id_; }
-  bool include_stack_check() const { return include_stack_check_; }
 
  private:
   int block_id_;
-  bool include_stack_check_;
 };
 
 
 class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
  public:
-  LLazyBailout() : gap_instructions_size_(0) { }
-
   DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
-  void set_gap_instructions_size(int gap_instructions_size) {
-    gap_instructions_size_ = gap_instructions_size;
-  }
-  int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
-  int gap_instructions_size_;
 };
 
 
@@ -460,19 +432,17 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
-  int true_block_id() const { return true_block_id_; }
-  int false_block_id() const { return false_block_id_; }
-  void SetBranchTargets(int true_block_id, int false_block_id) {
-    true_block_id_ = true_block_id;
-    false_block_id_ = false_block_id;
-  }
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
 
  private:
-  int true_block_id_;
-  int false_block_id_;
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
 };
 
 
@@ -574,23 +544,6 @@
 };
 
 
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
-  LCmpID(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
-
-  Token::Value op() const { return hydrogen()->token(); }
-  bool is_double() const {
-    return hydrogen()->GetInputRepresentation().IsDouble();
-  }
-};
-
-
 class LCmpIDAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -599,7 +552,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
 
   Token::Value op() const { return hydrogen()->token(); }
   bool is_double() const {
@@ -610,12 +563,16 @@
 };
 
 
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LUnaryMathOperation(LOperand* value) {
+  LUnaryMathOperation(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
     inputs_[0] = value;
   }
 
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 
@@ -624,39 +581,27 @@
 };
 
 
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
-  LCmpJSObjectEq(LOperand* left, LOperand* right) {
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
 };
 
 
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
  public:
-  LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
     inputs_[0] = left;
-    inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
-                               "cmp-jsobject-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsNull(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
 };
 
 
@@ -668,7 +613,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
   bool is_strict() const { return hydrogen()->is_strict(); }
 
@@ -676,42 +621,19 @@
 };
 
 
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsObject(LOperand* value, LOperand* temp) {
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
- public:
-  LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-    temps_[1] = temp2;
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
-  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -719,19 +641,23 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
  public:
-  explicit LHasInstanceType(LOperand* value) {
+  LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
 };
 
 
@@ -744,7 +670,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
                                "has-instance-type-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -761,17 +687,6 @@
 };
 
 
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LHasCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
 class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -784,13 +699,6 @@
 };
 
 
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
-  DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
 class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -802,20 +710,6 @@
 };
 
 
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
-  LClassOfTest(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -826,35 +720,22 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
                                "class-of-test-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT: public LTemplateInstruction<1, 3, 0> {
  public:
-  LCmpT(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
-  LCmpTAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -874,24 +755,11 @@
 };
 
 
-class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
  public:
-  LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+  LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
-  LOperand* context() { return inputs_[0]; }
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
-  LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
+    inputs_[1] = value;
     temps_[0] = temp;
   }
 
@@ -997,14 +865,15 @@
 };
 
 
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch: public LControlInstruction<1, 1> {
  public:
-  explicit LBranch(LOperand* value) {
+  explicit LBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-  DECLARE_HYDROGEN_ACCESSOR(Value)
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1042,25 +911,26 @@
 };
 
 
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LExternalArrayLength(LOperand* value) {
+  explicit LFixedArrayBaseLength(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
 };
 
 
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LFixedArrayLength(LOperand* value) {
+  explicit LElementsKind(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
 };
 
 
@@ -1076,12 +946,16 @@
 };
 
 
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow: public LTemplateInstruction<0, 2, 0> {
  public:
-  explicit LThrow(LOperand* value) {
-    inputs_[0] = value;
+  LThrow(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
   }
 
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
   DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
 };
 
@@ -1130,6 +1004,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1138,18 +1013,26 @@
 };
 
 
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
  public:
-  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
       : op_(op) {
-    inputs_[0] = left;
-    inputs_[1] = right;
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
   Token::Value op() const { return op_; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
 
  private:
   Token::Value op_;
@@ -1179,16 +1062,18 @@
 };
 
 
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LLoadNamedFieldPolymorphic(LOperand* object) {
-    inputs_[0] = object;
+  LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
 
-  LOperand* object() { return inputs_[0]; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
 };
 
 
@@ -1258,6 +1143,23 @@
 };
 
 
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements,
+                              LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1272,8 +1174,8 @@
 
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
@@ -1331,9 +1233,9 @@
 
 class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
  public:
-  explicit LStoreGlobalGeneric(LOperand* context,
-                               LOperand* global_object,
-                               LOperand* value) {
+  LStoreGlobalGeneric(LOperand* context,
+                      LOperand* global_object,
+                      LOperand* value) {
     inputs_[0] = context;
     inputs_[1] = global_object;
     inputs_[2] = value;
@@ -1396,6 +1298,11 @@
 };
 
 
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
 class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1450,6 +1357,25 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
  public:
   LCallKeyed(LOperand* context, LOperand* key) {
@@ -1547,11 +1473,15 @@
 };
 
 
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
+  LOperand* context() { return inputs_[0]; }
   const Runtime::Function* function() const { return hydrogen()->function(); }
   int arity() const { return hydrogen()->argument_count(); }
 };
@@ -1597,7 +1527,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1612,7 +1542,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1720,16 +1650,36 @@
 };
 
 
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 1> {
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
                                      LOperand* key,
-                                     LOperand* val,
-                                     LOperand* temp) {
+                                     LOperand* val) {
     inputs_[0] = external_pointer;
     inputs_[1] = key;
     inputs_[2] = val;
-    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
@@ -1739,8 +1689,8 @@
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
@@ -1770,31 +1720,52 @@
 };
 
 
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd: public LTemplateInstruction<1, 3, 0> {
  public:
-  LStringCharCodeAt(LOperand* string, LOperand* index) {
-    inputs_[0] = string;
-    inputs_[1] = index;
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+};
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
   DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
 
-  LOperand* string() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
 };
 
 
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LStringCharFromCode(LOperand* char_code) {
-    inputs_[0] = char_code;
+  LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
   DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
 
-  LOperand* char_code() { return inputs_[0]; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
 };
 
 
@@ -1869,6 +1840,43 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
@@ -1879,8 +1887,14 @@
 };
 
 
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LArrayLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
   DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
 };
@@ -1892,22 +1906,34 @@
     inputs_[0] = context;
   }
 
+  LOperand* context() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
   DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-
-  LOperand* context() { return inputs_[0]; }
 };
 
 
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LRegExpLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
   DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
 };
 
 
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LFunctionLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
   DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
 
@@ -1926,31 +1952,17 @@
 };
 
 
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LTypeof(LOperand* value) {
-    inputs_[0] = value;
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
 };
 
 
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LTypeofIs(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
-  Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
@@ -1958,7 +1970,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
@@ -1966,17 +1978,19 @@
 };
 
 
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
  public:
-  LDeleteProperty(LOperand* obj, LOperand* key) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
+  LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
 
-  LOperand* object() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
 };
 
 
@@ -2003,16 +2017,44 @@
 };
 
 
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck: public LTemplateInstruction<0, 1, 0> {
  public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LIn(LOperand* context, LOperand* key, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = key;
+    inputs_[2] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* object() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
 };
 
 
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(CompilationInfo* info, HGraph* graph)
+  LChunk(CompilationInfo* info, HGraph* graph)
     : spill_slot_count_(0),
       info_(info),
       graph_(graph),
@@ -2185,14 +2227,18 @@
   template<int I, int T>
       LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
                                       XMMRegister reg);
+  // Assigns an environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
   LInstruction* AssignEnvironment(LInstruction* instr);
+  // Assigns a pointer map to an instruction.  An instruction which can
+  // trigger a GC or a lazy deoptimization must have a pointer map.
   LInstruction* AssignPointerMap(LInstruction* instr);
 
   enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
 
-  // By default we assume that instruction sequences generated for calls
-  // cannot deoptimize eagerly and we do not attach environment to this
-  // instruction.
+  // Marks a call for the register allocator.  Assigns a pointer map to
+  // support GC and lazy deoptimization.  Assigns an environment to support
+  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
   LInstruction* MarkAsCall(
       LInstruction* instr,
       HInstruction* hinstr,
@@ -2232,7 +2278,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 13394cb..ce6d6a6 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -69,11 +69,73 @@
 
   // Compute number of region covering addr. See Page::GetRegionNumberForAddress
   // method for more details.
-  and_(addr, Page::kPageAlignmentMask);
   shr(addr, Page::kRegionSizeLog2);
+  and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
 
   // Set dirty mark for region.
-  bts(Operand(object, Page::kDirtyFlagOffset), addr);
+  // Bit tests with a memory operand should be avoided on Intel processors,
+  // as they usually have long latency and multiple uops. We load the bit base
+  // operand to a register at first and store it back after bit set.
+  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
+  bts(Operand(scratch), addr);
+  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+                                        XMMRegister scratch_reg,
+                                        Register result_reg) {
+  Label done;
+  ExternalReference zero_ref = ExternalReference::address_of_zero();
+  movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
+  Set(result_reg, Immediate(0));
+  ucomisd(input_reg, scratch_reg);
+  j(below, &done, Label::kNear);
+  ExternalReference half_ref = ExternalReference::address_of_one_half();
+  movdbl(scratch_reg, Operand::StaticVariable(half_ref));
+  addsd(scratch_reg, input_reg);
+  cvttsd2si(result_reg, Operand(scratch_reg));
+  test(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  Set(result_reg, Immediate(255));
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampUint8(Register reg) {
+  Label done;
+  test(reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(negative, reg);  // 1 if negative, 0 if positive.
+  dec_b(reg);  // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance branch_near) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch),
+         Immediate(ExternalReference::new_space_mask(isolate())));
+    cmp(Operand(scratch),
+        Immediate(ExternalReference::new_space_start(isolate())));
+    j(cc, branch, branch_near);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start(isolate()).address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, isolate()->heap()->NewSpaceMask());
+    j(cc, branch, branch_near);
+  }
 }
 
 
@@ -83,14 +145,13 @@
                                  Register scratch) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
-  NearLabel done;
+  Label done;
 
   // Skip barrier if writing a smi.
-  ASSERT_EQ(0, kSmiTag);
-  test(value, Immediate(kSmiTagMask));
-  j(zero, &done);
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(value, &done, Label::kNear);
 
-  InNewSpace(object, value, equal, &done);
+  InNewSpace(object, value, equal, &done, Label::kNear);
 
   // The offset is relative to a tagged or untagged HeapObject pointer,
   // so either offset or offset + kHeapObjectTag must be a
@@ -105,8 +166,8 @@
     // Array access: calculate the destination address in the same manner as
     // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
     // into an array of words.
-    ASSERT_EQ(1, kSmiTagSize);
-    ASSERT_EQ(0, kSmiTag);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    STATIC_ASSERT(kSmiTag == 0);
     lea(dst, Operand(object, dst, times_half_pointer_size,
                      FixedArray::kHeaderSize - kHeapObjectTag));
   }
@@ -132,9 +193,8 @@
   Label done;
 
   // Skip barrier if writing a smi.
-  ASSERT_EQ(0, kSmiTag);
-  test(value, Immediate(kSmiTagMask));
-  j(zero, &done);
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(value, &done, Label::kNear);
 
   InNewSpace(object, value, equal, &done);
 
@@ -203,6 +263,13 @@
 }
 
 
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+  // see ROOT_ACCESSOR macro in factory.h
+  Handle<Object> value(&isolate()->heap()->roots_address()[index]);
+  cmp(with, value);
+}
+
+
 void MacroAssembler::CmpObjectType(Register heap_object,
                                    InstanceType type,
                                    Register map) {
@@ -217,25 +284,49 @@
 }
 
 
+void MacroAssembler::CheckFastElements(Register map,
+                                       Label* fail,
+                                       Label::Distance distance) {
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastElementValue);
+  j(above, fail, distance);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
-    test(obj, Immediate(kSmiTagMask));
-    j(zero, fail);
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
   }
   cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
   j(not_equal, fail);
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  j(equal, success);
+
+  bind(&fail);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
   mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  ASSERT(kNotStringTag != 0);
+  STATIC_ASSERT(kNotStringTag != 0);
   test(instance_type, Immediate(kIsNotStringMask));
   return zero;
 }
@@ -254,8 +345,9 @@
                                             Register scratch,
                                             Label* fail) {
   movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
-  cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+  sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  cmp(scratch,
+      LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   j(above, fail);
 }
 
@@ -277,8 +369,7 @@
 
 void MacroAssembler::AbortIfNotNumber(Register object) {
   Label ok;
-  test(object, Immediate(kSmiTagMask));
-  j(zero, &ok);
+  JumpIfSmi(object, &ok);
   cmp(FieldOperand(object, HeapObject::kMapOffset),
       isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
@@ -346,9 +437,9 @@
   push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
-  ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
                                        isolate());
-  ExternalReference context_address(Isolate::k_context_address,
+  ExternalReference context_address(Isolate::kContextAddress,
                                     isolate());
   mov(Operand::StaticVariable(c_entry_fp_address), ebp);
   mov(Operand::StaticVariable(context_address), esi);
@@ -427,14 +518,14 @@
 
 void MacroAssembler::LeaveExitFrameEpilogue() {
   // Restore current context from top and clear it in debug mode.
-  ExternalReference context_address(Isolate::k_context_address, isolate());
+  ExternalReference context_address(Isolate::kContextAddress, isolate());
   mov(esi, Operand::StaticVariable(context_address));
 #ifdef DEBUG
   mov(Operand::StaticVariable(context_address), Immediate(0));
 #endif
 
   // Clear the top frame.
-  ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
                                        isolate());
   mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
 }
@@ -451,7 +542,12 @@
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // The pc (return address) is already on TOS.
   if (try_location == IN_JAVASCRIPT) {
     if (type == TRY_CATCH_HANDLER) {
@@ -460,6 +556,7 @@
       push(Immediate(StackHandler::TRY_FINALLY));
     }
     push(ebp);
+    push(esi);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
     // The frame pointer does not point to a JS frame so we save NULL
@@ -467,20 +564,21 @@
     // before dereferencing it to restore the context.
     push(Immediate(StackHandler::ENTRY));
     push(Immediate(0));  // NULL frame pointer.
+    push(Immediate(Smi::FromInt(0)));  // No context.
   }
   // Save the current handler as the next handler.
-  push(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+  push(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
                                                  isolate())));
   // Link this handler as the new current one.
-  mov(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+  mov(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
                                                 isolate())),
       esp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
-  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
-  pop(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
                                                 isolate())));
   add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
@@ -488,36 +586,37 @@
 
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // eax must hold the exception.
   if (!value.is(eax)) {
     mov(eax, value);
   }
 
   // Drop the sp to the top of the handler.
-  ExternalReference handler_address(Isolate::k_handler_address,
+  ExternalReference handler_address(Isolate::kHandlerAddress,
                                     isolate());
   mov(esp, Operand::StaticVariable(handler_address));
 
-  // Restore next handler and frame pointer, discard handler state.
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  // Restore next handler, context, and frame pointer; discard handler state.
   pop(Operand::StaticVariable(handler_address));
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
-  pop(ebp);
-  pop(edx);  // Remove state.
+  pop(esi);  // Context.
+  pop(ebp);  // Frame pointer.
+  pop(edx);  // State.
 
-  // Before returning we restore the context from the frame pointer if
-  // not NULL.  The frame pointer is NULL in the exception handler of
-  // a JS entry frame.
-  Set(esi, Immediate(0));  // Tentatively set context pointer to NULL.
-  NearLabel skip;
-  cmp(ebp, 0);
-  j(equal, &skip, not_taken);
-  mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  // If the handler is a JS frame, restore the context to the frame.
+  // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
+  // of them.
+  Label skip;
+  cmp(Operand(edx), Immediate(StackHandler::ENTRY));
+  j(equal, &skip, Label::kNear);
+  mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
   bind(&skip);
 
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   ret(0);
 }
 
@@ -525,7 +624,12 @@
 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
                                       Register value) {
   // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
   // eax must hold the exception.
   if (!value.is(eax)) {
@@ -533,17 +637,17 @@
   }
 
   // Drop sp to the top stack handler.
-  ExternalReference handler_address(Isolate::k_handler_address,
+  ExternalReference handler_address(Isolate::kHandlerAddress,
                                     isolate());
   mov(esp, Operand::StaticVariable(handler_address));
 
   // Unwind the handlers until the ENTRY handler is found.
-  NearLabel loop, done;
+  Label loop, done;
   bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
   cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done);
+  j(equal, &done, Label::kNear);
   // Fetch the next handler in the list.
   const int kNextOffset = StackHandlerConstants::kNextOffset;
   mov(esp, Operand(esp, kNextOffset));
@@ -551,33 +655,31 @@
   bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   pop(Operand::StaticVariable(handler_address));
 
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
     ExternalReference external_caught(
-        Isolate::k_external_caught_exception_address,
+        Isolate::kExternalCaughtExceptionAddress,
         isolate());
     mov(eax, false);
     mov(Operand::StaticVariable(external_caught), eax);
 
     // Set pending exception and eax to out of memory exception.
-    ExternalReference pending_exception(Isolate::k_pending_exception_address,
+    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                         isolate());
     mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
     mov(Operand::StaticVariable(pending_exception), eax);
   }
 
-  // Clear the context pointer.
+  // Discard the context saved in the handler and clear the context pointer.
+  pop(edx);
   Set(esi, Immediate(0));
 
   // Restore fp from handler and discard handler state.
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
   pop(ebp);
   pop(edx);  // State.
 
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   ret(0);
 }
 
@@ -614,7 +716,7 @@
 
   // Check if both contexts are the same.
   cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
-  j(equal, &same_contexts, taken);
+  j(equal, &same_contexts);
 
   // Compare security tokens, save holder_reg on the stack so we can use it
   // as a temporary register.
@@ -644,12 +746,132 @@
   mov(scratch, FieldOperand(scratch, token_offset));
   cmp(scratch, FieldOperand(holder_reg, token_offset));
   pop(holder_reg);
-  j(not_equal, miss, not_taken);
+  j(not_equal, miss);
 
   bind(&same_contexts);
 }
 
 
+// Compute the hash code from the untagged key.  This must be kept in sync
+// with ComputeIntegerHash in utils.h.
+//
+// Note: r0 will contain hash code
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+  // Xor original key with a seed.
+  if (Serializer::enabled()) {
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+    mov(scratch, Operand::StaticArray(scratch,
+                                      times_pointer_size,
+                                      roots_address));
+    SmiUntag(scratch);
+    xor_(r0, Operand(scratch));
+  } else {
+    int32_t seed = isolate()->heap()->HashSeed();
+    xor_(r0, seed);
+  }
+
+  // hash = ~hash + (hash << 15);
+  mov(scratch, r0);
+  not_(r0);
+  shl(scratch, 15);
+  add(r0, Operand(scratch));
+  // hash = hash ^ (hash >> 12);
+  mov(scratch, r0);
+  shr(scratch, 12);
+  xor_(r0, Operand(scratch));
+  // hash = hash + (hash << 2);
+  lea(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  mov(scratch, r0);
+  shr(scratch, 4);
+  xor_(r0, Operand(scratch));
+  // hash = hash * 2057;
+  imul(r0, r0, 2057);
+  // hash = hash ^ (hash >> 16);
+  mov(scratch, r0);
+  shr(scratch, 16);
+  xor_(r0, Operand(scratch));
+}
+
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+                                              Register elements,
+                                              Register key,
+                                              Register r0,
+                                              Register r1,
+                                              Register r2,
+                                              Register result) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver and is unchanged.
+  //
+  // key      - holds the smi key on entry and is unchanged.
+  //
+  // Scratch registers:
+  //
+  // r0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // r1 - used to hold the capacity mask of the dictionary
+  //
+  // r2 - used for the index into the dictionary.
+  //
+  // result - holds the result on exit if the load succeeds and we fall through.
+
+  Label done;
+
+  GetNumberHash(r0, r1);
+
+  // Compute capacity mask.
+  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  shr(r1, kSmiTagSize);  // convert smi to int
+  dec(r1);
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use r2 for index calculations and keep the hash intact in r0.
+    mov(r2, r0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      add(Operand(r2), Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(r2, Operand(r1));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
+
+    // Check if the key matches.
+    cmp(key, FieldOperand(elements,
+                          r2,
+                          times_pointer_size,
+                          SeededNumberDictionary::kElementsStartOffset));
+    if (i != (kProbes - 1)) {
+      j(equal, &done);
+    } else {
+      j(not_equal, miss);
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a normal propety.
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ASSERT_EQ(NORMAL, 0);
+  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+       Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  j(not_zero, miss);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
 void MacroAssembler::LoadAllocationTopHelper(Register result,
                                              Register scratch,
                                              AllocationFlags flags) {
@@ -732,9 +954,9 @@
     mov(top_reg, result);
   }
   add(Operand(top_reg), Immediate(object_size));
-  j(carry, gc_required, not_taken);
+  j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
-  j(above, gc_required, not_taken);
+  j(above, gc_required);
 
   // Update allocation top.
   UpdateAllocationTopHelper(top_reg, scratch);
@@ -831,9 +1053,9 @@
     mov(result_end, object_size);
   }
   add(result_end, Operand(result));
-  j(carry, gc_required, not_taken);
+  j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
-  j(above, gc_required, not_taken);
+  j(above, gc_required);
 
   // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
@@ -972,7 +1194,7 @@
 }
 
 
-void MacroAssembler::AllocateConsString(Register result,
+void MacroAssembler::AllocateTwoByteConsString(Register result,
                                         Register scratch1,
                                         Register scratch2,
                                         Label* gc_required) {
@@ -1008,6 +1230,42 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                          Register scratch1,
+                                          Register scratch2,
+                                          Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  mov(FieldOperand(result, HeapObject::kMapOffset),
+      Immediate(isolate()->factory()->sliced_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  mov(FieldOperand(result, HeapObject::kMapOffset),
+      Immediate(isolate()->factory()->sliced_ascii_string_map()));
+}
+
+
 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
 // long or aligned copies.  The contents of scratch and length are destroyed.
 // Source and destination are incremented by length.
@@ -1062,9 +1320,9 @@
                                       Label* then_label) {
   Label ok;
   test(result, Operand(result));
-  j(not_zero, &ok, taken);
+  j(not_zero, &ok);
   test(op, Operand(op));
-  j(sign, then_label, not_taken);
+  j(sign, then_label);
   bind(&ok);
 }
 
@@ -1076,10 +1334,10 @@
                                       Label* then_label) {
   Label ok;
   test(result, Operand(result));
-  j(not_zero, &ok, taken);
+  j(not_zero, &ok);
   mov(scratch, Operand(op1));
   or_(scratch, Operand(op2));
-  j(sign, then_label, not_taken);
+  j(sign, then_label);
   bind(&ok);
 }
 
@@ -1089,18 +1347,17 @@
                                              Register scratch,
                                              Label* miss) {
   // Check that the receiver isn't a smi.
-  test(function, Immediate(kSmiTagMask));
-  j(zero, miss, not_taken);
+  JumpIfSmi(function, miss);
 
   // Check that the function really is a function.
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  j(not_equal, miss, not_taken);
+  j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
   Label non_instance;
   movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
   test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance, not_taken);
+  j(not_zero, &non_instance);
 
   // Get the prototype or initial map from the function.
   mov(result,
@@ -1110,7 +1367,7 @@
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
   cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
-  j(equal, miss, not_taken);
+  j(equal, miss);
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -1131,9 +1388,9 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
   ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  call(stub->GetCode(), RelocInfo::CODE_TARGET);
+  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
@@ -1329,32 +1586,30 @@
 }
 
 
-void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
+void MacroAssembler::PrepareCallApiFunction(int argc) {
   if (kReturnHandlesDirectly) {
     EnterApiExitFrame(argc);
     // When handles are returned directly we don't have to allocate extra
     // space for and pass an out parameter.
+    if (emit_debug_code()) {
+      mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+    }
   } else {
     // We allocate two additional slots: return value and pointer to it.
     EnterApiExitFrame(argc + 2);
 
     // The argument slots are filled as follows:
     //
-    //   n + 1: output cell
+    //   n + 1: output slot
     //   n: arg n
     //   ...
     //   1: arg1
-    //   0: pointer to the output cell
-    //
-    // Note that this is one more "argument" than the function expects
-    // so the out cell will have to be popped explicitly after returning
-    // from the function. The out cell contains Handle.
+    //   0: pointer to the output slot
 
-    // pointer to out cell.
-    lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
-    mov(Operand(esp, 0 * kPointerSize), scratch);  // output.
+    lea(esi, Operand(esp, (argc + 1) * kPointerSize));
+    mov(Operand(esp, 0 * kPointerSize), esi);
     if (emit_debug_code()) {
-      mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0));  // out cell.
+      mov(Operand(esi, 0), Immediate(0));
     }
   }
 }
@@ -1378,9 +1633,9 @@
   call(function->address(), RelocInfo::RUNTIME_ENTRY);
 
   if (!kReturnHandlesDirectly) {
-    // The returned value is a pointer to the handle holding the result.
-    // Dereference this to get to the location.
-    mov(eax, Operand(eax, 0));
+    // PrepareCallApiFunction saved pointer to the output slot into
+    // callee-save register esi.
+    mov(eax, Operand(esi, 0));
   }
 
   Label empty_handle;
@@ -1391,7 +1646,7 @@
 
   // Check if the result handle holds 0.
   test(eax, Operand(eax));
-  j(zero, &empty_handle, not_taken);
+  j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
   bind(&prologue);
@@ -1401,7 +1656,7 @@
   sub(Operand::StaticVariable(level_address), Immediate(1));
   Assert(above_equal, "Invalid HandleScope level");
   cmp(edi, Operand::StaticVariable(limit_address));
-  j(not_equal, &delete_allocated_handles, not_taken);
+  j(not_equal, &delete_allocated_handles);
   bind(&leave_exit_frame);
 
   // Check if the function scheduled an exception.
@@ -1409,7 +1664,7 @@
       ExternalReference::scheduled_exception_address(isolate());
   cmp(Operand::StaticVariable(scheduled_exception_address),
       Immediate(isolate()->factory()->the_hole_value()));
-  j(not_equal, &promote_scheduled_exception, not_taken);
+  j(not_equal, &promote_scheduled_exception);
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
@@ -1456,13 +1711,32 @@
 }
 
 
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be ecx to
+  // follow the calling convention which requires the call type to be
+  // in ecx.
+  ASSERT(dst.is(ecx));
+  if (call_kind == CALL_AS_FUNCTION) {
+    // Set to some non-zero smi by updating the least significant
+    // byte.
+    mov_b(Operand(dst), 1 << kSmiTagSize);
+  } else {
+    // Set to smi zero by clearing the register.
+    xor_(dst, Operand(dst));
+  }
+}
+
+
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
-                                    NearLabel* done,
+                                    Label* done,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    Label::Distance done_near,
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label invoke;
   if (expected.is_immediate()) {
@@ -1512,10 +1786,13 @@
     }
 
     if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+      SetCallKind(ecx, call_kind);
       call(adaptor, RelocInfo::CODE_TARGET);
-      if (post_call_generator != NULL) post_call_generator->Generate();
-      jmp(done);
+      call_wrapper.AfterCall();
+      jmp(done, done_near);
     } else {
+      SetCallKind(ecx, call_kind);
       jmp(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&invoke);
@@ -1527,15 +1804,20 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, flag, post_call_generator);
+                 &done, flag, Label::kNear, call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(ecx, call_kind);
     call(code);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
     jmp(code);
   }
   bind(&done);
@@ -1547,16 +1829,20 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done,
-                 flag, post_call_generator);
+  InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code, rmode));
+    SetCallKind(ecx, call_kind);
     call(code, rmode);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
     jmp(code, rmode);
   }
   bind(&done);
@@ -1566,7 +1852,8 @@
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1575,14 +1862,15 @@
 
   ParameterCount expected(ebx);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag, post_call_generator);
+             expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
@@ -1594,18 +1882,18 @@
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-               expected, actual, flag, post_call_generator);
+               expected, actual, flag, call_wrapper, call_kind);
   } else {
     Handle<Code> code(function->code());
     InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
-               flag, post_call_generator);
+               flag, call_wrapper, call_kind);
   }
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
-                                   PostCallGenerator* post_call_generator) {
+                                   const CallWrapper& call_wrapper) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1615,7 +1903,7 @@
   ParameterCount expected(0);
   GetBuiltinFunction(edi, id);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, expected, flag, post_call_generator);
+             expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
 void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1639,12 +1927,9 @@
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
-    mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
-    // Load the function context (which is the incoming, outer context).
-    mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+    mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     for (int i = 1; i < context_chain_length; i++) {
-      mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
-      mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+      mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     }
   } else {
     // Slot is in the current function context.  Move it into the
@@ -1653,14 +1938,14 @@
     mov(dst, esi);
   }
 
-  // We should not have found a 'with' context by walking the context chain
+  // We should not have found a with context by walking the context chain
   // (i.e., the static scope chain and runtime context chain do not agree).
   // A variable occurring in such a scope should have slot type LOOKUP and
   // not CONTEXT.
   if (emit_debug_code()) {
-    cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-    Check(equal, "Yo dawg, I heard you liked function contexts "
-                 "so I put function contexts in all your contexts");
+    cmp(FieldOperand(dst, HeapObject::kMapOffset),
+        isolate()->factory()->with_context_map());
+    Check(not_equal, "Variable resolved to with context.");
   }
 }
 
@@ -1681,7 +1966,7 @@
   mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, isolate()->factory()->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -1738,8 +2023,6 @@
 }
 
 
-
-
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
     add(Operand(esp), Immediate(stack_elements * kPointerSize));
@@ -1835,6 +2118,9 @@
         Immediate(factory->fixed_array_map()));
     j(equal, &ok);
     cmp(FieldOperand(elements, HeapObject::kMapOffset),
+        Immediate(factory->fixed_double_array_map()));
+    j(equal, &ok);
+    cmp(FieldOperand(elements, HeapObject::kMapOffset),
         Immediate(factory->fixed_cow_array_map()));
     j(equal, &ok);
     Abort("JSObject with fast elements map has slow elements");
@@ -1845,7 +2131,7 @@
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
   Label L;
-  j(cc, &L, taken);
+  j(cc, &L);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -1894,56 +2180,14 @@
 }
 
 
-void MacroAssembler::JumpIfNotNumber(Register reg,
-                                     TypeInfo info,
-                                     Label* on_not_number) {
-  if (emit_debug_code()) AbortIfSmi(reg);
-  if (!info.IsNumber()) {
-    cmp(FieldOperand(reg, HeapObject::kMapOffset),
-        isolate()->factory()->heap_number_map());
-    j(not_equal, on_not_number);
-  }
-}
-
-
-void MacroAssembler::ConvertToInt32(Register dst,
-                                    Register source,
-                                    Register scratch,
-                                    TypeInfo info,
-                                    Label* on_not_int32) {
-  if (emit_debug_code()) {
-    AbortIfSmi(source);
-    AbortIfNotNumber(source);
-  }
-  if (info.IsInteger32()) {
-    cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
-  } else {
-    Label done;
-    bool push_pop = (scratch.is(no_reg) && dst.is(source));
-    ASSERT(!scratch.is(source));
-    if (push_pop) {
-      push(dst);
-      scratch = dst;
-    }
-    if (scratch.is(no_reg)) scratch = dst;
-    cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
-    cmp(scratch, 0x80000000u);
-    if (push_pop) {
-      j(not_equal, &done);
-      pop(dst);
-      jmp(on_not_int32);
-    } else {
-      j(equal, on_not_int32);
-    }
-
-    bind(&done);
-    if (push_pop) {
-      add(Operand(esp), Immediate(kPointerSize));  // Pop.
-    }
-    if (!scratch.is(dst)) {
-      mov(dst, scratch);
-    }
-  }
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  mov(descriptors,
+      FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  mov(descriptors, isolate()->factory()->empty_descriptor_array());
+  bind(&not_smi);
 }
 
 
@@ -1978,11 +2222,10 @@
                                                          Register scratch2,
                                                          Label* failure) {
   // Check that both objects are not smis.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   mov(scratch1, Operand(object1));
   and_(scratch1, Operand(object2));
-  test(scratch1, Immediate(kSmiTagMask));
-  j(zero, failure);
+  JumpIfSmi(scratch1, failure);
 
   // Load instance type for both strings.
   mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index b986264..8c5f5e9 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,7 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
-#include "type-info.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -45,13 +45,11 @@
   RESULT_CONTAINS_TOP = 1 << 1
 };
 
+
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
-// Forward declaration.
-class PostCallGenerator;
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -73,11 +71,11 @@
 
   // Check if object is in new space.
   // scratch can be object itself, but it will be clobbered.
-  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,  // equal for new space, not_equal otherwise.
-                  LabelType* branch);
+                  Label* branch,
+                  Label::Distance branch_near = Label::kFar);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -155,37 +153,46 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in ecx. The method takes ecx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeFlag flag,
-                     PostCallGenerator* post_call_generator = NULL);
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -202,6 +209,9 @@
   void SafeSet(Register dst, const Immediate& x);
   void SafePush(const Immediate& x);
 
+  // Compare a register against a known root, e.g. undefined, null, true, ...
+  void CompareRoot(Register with, Heap::RootListIndex index);
+
   // Compare object type for heap object.
   // Incoming register is heap_object and outgoing register is map.
   void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -209,13 +219,27 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if the map of an object is equal to a specified map and
-  // branch to label if not. Skip the smi check if not required
-  // (object is known to be a heap object)
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map,
+                         Label* fail,
+                         Label::Distance distance = Label::kFar);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object)
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
@@ -242,10 +266,17 @@
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
+  void ClampUint8(Register reg);
+
+  void ClampDoubleToUint8(XMMRegister input_reg,
+                          XMMRegister scratch_reg,
+                          Register result_reg);
+
+
   // Smi tagging support.
   void SmiTag(Register reg) {
-    ASSERT(kSmiTag == 0);
-    ASSERT(kSmiTagSize == 1);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
     add(reg, Operand(reg));
   }
   void SmiUntag(Register reg) {
@@ -253,44 +284,36 @@
   }
 
   // Modifies the register even if it does not contain a Smi!
-  void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
-    ASSERT(kSmiTagSize == 1);
-    sar(reg, kSmiTagSize);
-    if (info.IsSmi()) {
-      ASSERT(kSmiTag == 0);
-      j(carry, non_smi);
-    }
-  }
-
-  // Modifies the register even if it does not contain a Smi!
   void SmiUntag(Register reg, Label* is_smi) {
-    ASSERT(kSmiTagSize == 1);
+    STATIC_ASSERT(kSmiTagSize == 1);
     sar(reg, kSmiTagSize);
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     j(not_carry, is_smi);
   }
 
   // Jump the register contains a smi.
-  inline void JumpIfSmi(Register value, Label* smi_label) {
+  inline void JumpIfSmi(Register value,
+                        Label* smi_label,
+                        Label::Distance distance = Label::kFar) {
     test(value, Immediate(kSmiTagMask));
-    j(zero, smi_label, not_taken);
+    j(zero, smi_label, distance);
+  }
+  // Jump if the operand is a smi.
+  inline void JumpIfSmi(Operand value,
+                        Label* smi_label,
+                        Label::Distance distance = Label::kFar) {
+    test(value, Immediate(kSmiTagMask));
+    j(zero, smi_label, distance);
   }
   // Jump if register contain a non-smi.
-  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+  inline void JumpIfNotSmi(Register value,
+                           Label* not_smi_label,
+                           Label::Distance distance = Label::kFar) {
     test(value, Immediate(kSmiTagMask));
-    j(not_zero, not_smi_label, not_taken);
+    j(not_zero, not_smi_label, distance);
   }
 
-  // Assumes input is a heap object.
-  void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
-
-  // Assumes input is a heap number.  Jumps on things out of range.  Also jumps
-  // on the min negative int32.  Ignores frational parts.
-  void ConvertToInt32(Register dst,
-                      Register src,      // Can be the same as dst.
-                      Register scratch,  // Can be no_reg or dst, but not src.
-                      TypeInfo info,
-                      Label* on_not_int32);
+  void LoadInstanceDescriptors(Register map, Register descriptors);
 
   void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
 
@@ -331,6 +354,16 @@
                               Register scratch,
                               Label* miss);
 
+  void GetNumberHash(Register r0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss,
+                                Register elements,
+                                Register key,
+                                Register r0,
+                                Register r1,
+                                Register r2,
+                                Register result);
+
 
   // ---------------------------------------------------------------------------
   // Allocation support
@@ -405,7 +438,7 @@
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
-  void AllocateConsString(Register result,
+  void AllocateTwoByteConsString(Register result,
                           Register scratch1,
                           Register scratch2,
                           Label* gc_required);
@@ -414,6 +447,17 @@
                                Register scratch2,
                                Label* gc_required);
 
+  // Allocate a raw sliced string object. Only the map field of the result is
+  // initialized.
+  void AllocateTwoByteSlicedString(Register result,
+                            Register scratch1,
+                            Register scratch2,
+                            Label* gc_required);
+  void AllocateAsciiSlicedString(Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+
   // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   // long or aligned copies.
   // The contents of index and scratch are destroyed.
@@ -457,7 +501,7 @@
   // Runtime calls
 
   // Call a code stub.  Generate the code if necessary.
-  void CallStub(CodeStub* stub);
+  void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
   // Call a code stub and return the code object called.  Try to generate
   // the code if necessary.  Do not perform a GC but instead return a retry
@@ -538,10 +582,10 @@
 
   // Prepares stack to put arguments (aligns and so on). Reserves
   // space for return value if needed (assumes the return value is a handle).
-  // Uses callee-saved esi to restore stack state after call. Arguments must be
-  // stored in ApiParameterOperand(0), ApiParameterOperand(1) etc. Saves
-  // context (esi).
-  void PrepareCallApiFunction(int argc, Register scratch);
+  // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+  // etc. Saves context (esi). If space was reserved for return value then
+  // stores the pointer to the reserved slot into esi.
+  void PrepareCallApiFunction(int argc);
 
   // Calls an API function. Allocates HandleScope, extracts
   // returned value from handle and propagates exceptions.
@@ -582,6 +626,9 @@
 
   void Move(Register target, Handle<Object> value);
 
+  // Push a handle value.
+  void Push(Handle<Object> handle) { push(handle); }
+
   Handle<Object> CodeObject() {
     ASSERT(!code_object_.is_null());
     return code_object_;
@@ -655,9 +702,11 @@
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       const Operand& code_operand,
-                      NearLabel* done,
+                      Label* done,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      Label::Distance done_near = Label::kFar,
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -692,33 +741,6 @@
 };
 
 
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                LabelType* branch) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch),
-         Immediate(ExternalReference::new_space_mask(isolate())));
-    cmp(Operand(scratch),
-        Immediate(ExternalReference::new_space_start(isolate())));
-    j(cc, branch);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start(isolate()).address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, isolate()->heap()->NewSpaceMask());
-    j(cc, branch);
-  }
-}
-
-
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -739,17 +761,6 @@
 };
 
 
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
-  PostCallGenerator() { }
-  virtual ~PostCallGenerator() { }
-  virtual void Generate() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 5b2f208..d175d9e 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -305,7 +305,7 @@
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
   // the start of the capture.
-  BranchOrBacktrack(less, on_no_match, not_taken);
+  BranchOrBacktrack(less, on_no_match);
 
   // If length is zero, either the capture is empty or it is completely
   // uncaptured. In either case succeed immediately.
@@ -348,7 +348,7 @@
     __ add(Operand(edi), Immediate(1));
     // Compare to end of match, and loop if not done.
     __ cmp(edi, Operand(ebx));
-    __ j(below, &loop, taken);
+    __ j(below, &loop);
     __ jmp(&success);
 
     __ bind(&fail);
@@ -687,11 +687,11 @@
   __ mov(ecx, esp);
   __ sub(ecx, Operand::StaticVariable(stack_limit));
   // Handle it if the stack pointer is already below the stack limit.
-  __ j(below_equal, &stack_limit_hit, not_taken);
+  __ j(below_equal, &stack_limit_hit);
   // Check if there is room for the variable number of registers above
   // the stack limit.
   __ cmp(ecx, num_registers_ * kPointerSize);
-  __ j(above_equal, &stack_ok, taken);
+  __ j(above_equal, &stack_ok);
   // Exit with OutOfMemory exception. There is not enough space on the stack
   // for our working registers.
   __ mov(eax, EXCEPTION);
@@ -971,9 +971,9 @@
 }
 
 void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by)  {
-  NearLabel after_position;
+  Label after_position;
   __ cmp(edi, -by * char_size());
-  __ j(greater_equal, &after_position);
+  __ j(greater_equal, &after_position, Label::kNear);
   __ mov(edi, -by * char_size());
   // On RegExp code entry (where this operation is used), the character before
   // the current position is expected to be already loaded.
@@ -1065,12 +1065,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1079,7 +1080,7 @@
   MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
-    int delta = *code_handle - re_code;
+    int delta = code_handle->address() - re_code->address();
     // Overwrite the return address on the stack.
     *return_address += delta;
   }
@@ -1088,8 +1089,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1100,8 +1113,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1109,13 +1122,14 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
@@ -1142,8 +1156,7 @@
 
 
 void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
-                                                 Label* to,
-                                                 Hint hint) {
+                                                 Label* to) {
   if (condition < 0) {  // No condition
     if (to == NULL) {
       Backtrack();
@@ -1153,10 +1166,10 @@
     return;
   }
   if (to == NULL) {
-    __ j(condition, &backtrack_label_, hint);
+    __ j(condition, &backtrack_label_);
     return;
   }
-  __ j(condition, to, hint);
+  __ j(condition, to);
 }
 
 
@@ -1209,7 +1222,7 @@
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(masm_->isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above, &no_preempt, taken);
+  __ j(above, &no_preempt);
 
   SafeCall(&check_preempt_label_);
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 70606da..d504470 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -28,6 +28,9 @@
 #ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
 #define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
 
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -168,7 +171,7 @@
 
   // Equivalent to a conditional branch to the label, unless the label
   // is NULL, in which case it is a conditional Backtrack.
-  void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+  void BranchOrBacktrack(Condition condition, Label* to);
 
   // Call and return internally in the generated code in a way that
   // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index cb660cd..13ddf35 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -56,7 +56,9 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
     return c_limit;
   }
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 27d2886..ab62764 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -57,7 +57,7 @@
 
     // Check that the key in the entry matches the name.
     __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
-    __ j(not_equal, &miss, not_taken);
+    __ j(not_equal, &miss);
 
     // Check that the flags match what we're looking for.
     __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
@@ -76,7 +76,7 @@
 
     // Check that the key in the entry matches the name.
     __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
-    __ j(not_equal, &miss, not_taken);
+    __ j(not_equal, &miss);
 
     // Get the code entry from the cache.
     __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
@@ -107,18 +107,17 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register r0,
-                                             Register r1) {
+static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                                     Label* miss_label,
+                                                     Register receiver,
+                                                     String* name,
+                                                     Register r0,
+                                                     Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
   __ IncrementCounter(counters->negative_lookups_miss(), 1);
 
-  Label done;
   __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
 
   const int kInterceptorOrAccessCheckNeededMask =
@@ -127,11 +126,11 @@
   // Bail out if the receiver has a named interceptor or requires access checks.
   __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
             kInterceptorOrAccessCheckNeededMask);
-  __ j(not_zero, miss_label, not_taken);
+  __ j(not_zero, miss_label);
 
   // Check that receiver is a JSObject.
-  __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
-  __ j(below, miss_label, not_taken);
+  __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+  __ j(below, miss_label);
 
   // Load properties array.
   Register properties = r0;
@@ -142,64 +141,20 @@
          Immediate(masm->isolate()->factory()->hash_table_map()));
   __ j(not_equal, miss_label);
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // r0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r1;
-    // Capacity is smi 2^n.
-    __ mov(index, FieldOperand(properties, kCapacityOffset));
-    __ dec(index);
-    __ and_(Operand(index),
-            Immediate(Smi::FromInt(name->Hash() +
-                                   StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-
-    Register entity_name = r1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
-                                kElementsStartOffset - kHeapObjectTag));
-    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    if (i != kProbes - 1) {
-      __ j(equal, &done, taken);
-
-      // Stop if found the property.
-      __ cmp(entity_name, Handle<String>(name));
-      __ j(equal, miss_label, not_taken);
-
-      // Check if the entry name is not a symbol.
-      __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-      __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                kIsSymbolMask);
-      __ j(zero, miss_label, not_taken);
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ j(not_equal, miss_label, not_taken);
-    }
-  }
+  Label done;
+  MaybeObject* result =
+      StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                         miss_label,
+                                                         &done,
+                                                         properties,
+                                                         name,
+                                                         r1);
+  if (result->IsFailure()) return result;
 
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -233,8 +188,7 @@
   ASSERT(extra2.is(no_reg));
 
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
   __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -294,12 +248,11 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, miss_label, not_taken);
+  __ j(not_equal, miss_label);
 
   // Load length directly from the JS array.
   __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
@@ -315,15 +268,14 @@
                                 Label* smi,
                                 Label* non_string_object) {
   // Check that the object isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, smi, not_taken);
+  __ JumpIfSmi(receiver, smi);
 
   // Check that the object is a string.
   __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  ASSERT(kNotStringTag != 0);
+  STATIC_ASSERT(kNotStringTag != 0);
   __ test(scratch, Immediate(kNotStringTag));
-  __ j(not_zero, non_string_object, not_taken);
+  __ j(not_zero, non_string_object);
 }
 
 
@@ -348,7 +300,7 @@
     // Check if the object is a JSValue wrapper.
     __ bind(&check_wrapper);
     __ cmp(scratch1, JS_VALUE_TYPE);
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
 
     // Check if the wrapped value is a string and load the length
     // directly if it is.
@@ -507,7 +459,7 @@
   // it's not controlled by GC.
   const int kApiStackSpace = 4;
 
-  __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, ebx);
+  __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
 
   __ mov(ApiParameterOperand(1), eax);  // v8::Arguments::implicit_args_.
   __ add(Operand(eax), Immediate(argc * kPointerSize));
@@ -533,10 +485,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -552,8 +506,7 @@
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
-    __ test(receiver, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
 
@@ -661,8 +614,11 @@
           GenerateFastApiCall(masm, optimization, arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -741,6 +697,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -758,6 +715,14 @@
 }
 
 
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -769,13 +734,12 @@
                                       Register scratch,
                                       Label* miss_label) {
   // Check that the object isn't a smi.
-  __ test(receiver_reg, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
+  __ JumpIfSmi(receiver_reg, miss_label);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, miss_label, not_taken);
+  __ j(not_equal, miss_label);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -865,7 +829,7 @@
     __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
            Immediate(masm->isolate()->factory()->the_hole_value()));
   }
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
   return cell;
 }
 
@@ -951,12 +915,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
@@ -965,7 +934,7 @@
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
       // Branch on the result of the map check.
-      __ j(not_equal, miss, not_taken);
+      __ j(not_equal, miss);
       // Check access rights to the global object.  This has to happen
       // after the map check so that we know that the object is
       // actually a global object.
@@ -985,7 +954,7 @@
       __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
              Immediate(Handle<Map>(current->map())));
       // Branch on the result of the map check.
-      __ j(not_equal, miss, not_taken);
+      __ j(not_equal, miss);
       // Check access rights to the global object.  This has to happen
       // after the map check so that we know that the object is
       // actually a global object.
@@ -1012,7 +981,7 @@
   // Check the holder map.
   __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
          Immediate(Handle<Map>(holder->map())));
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
 
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1046,8 +1015,7 @@
                                      String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
   Register reg =
@@ -1071,8 +1039,7 @@
                                                 String* name,
                                                 Label* miss) {
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1115,7 +1082,7 @@
   const int kStackSpace = 5;
   const int kApiArgc = 2;
 
-  __ PrepareCallApiFunction(kApiArgc, eax);
+  __ PrepareCallApiFunction(kApiArgc);
   __ mov(ApiParameterOperand(0), ebx);  // name.
   __ add(Operand(ebx), Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), ebx);  // arguments pointer.
@@ -1138,8 +1105,7 @@
                                         String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   CheckPrototypes(object, receiver, holder,
@@ -1165,8 +1131,7 @@
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
 
   // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ JumpIfSmi(receiver, miss);
 
   // So far the most popular follow ups for interceptor loads are FIELD
   // and CALLBACKS, so inline only them, other cases may be added
@@ -1295,7 +1260,7 @@
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
     __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   }
 }
 
@@ -1316,8 +1281,7 @@
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ JumpIfSmi(edx, miss);
   }
 
   // Check that the maps haven't changed.
@@ -1343,18 +1307,17 @@
     // the nice side effect that multiple closures based on the same
     // function can all use this call IC. Before we load through the
     // function, we have to verify that it still is a function.
-    __ test(edi, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ JumpIfSmi(edi, miss);
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
 
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   } else {
     __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   }
 }
 
@@ -1362,7 +1325,8 @@
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
   MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
-                                                       kind_);
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1391,8 +1355,7 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(edx, &miss);
 
   // Do the right check and compute the holder register.
   Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1401,10 +1364,9 @@
   GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
 
   // Check that the function really is a function.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(edi, &miss);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Patch the receiver on the stack with the global proxy if
   // necessary.
@@ -1414,7 +1376,11 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1453,8 +1419,7 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(edx, &miss);
 
   CheckPrototypes(JSObject::cast(object), edx,
                   holder, ebx,
@@ -1502,8 +1467,7 @@
       __ mov(Operand(edx, 0), ecx);
 
       // Check if value is a smi.
-      __ test(ecx, Immediate(kSmiTagMask));
-      __ j(not_zero, &with_write_barrier);
+      __ JumpIfNotSmi(ecx, &with_write_barrier);
 
       __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
@@ -1606,8 +1570,7 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(edx, &miss);
   CheckPrototypes(JSObject::cast(object), edx,
                   holder, ebx,
                   eax, edi, name, &miss);
@@ -1689,7 +1652,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1773,7 +1738,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1862,8 +1829,7 @@
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(edx, &miss);
 
     CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
                     &miss);
@@ -1880,8 +1846,7 @@
   // Check the code is a smi.
   Label slow;
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(code, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow);
+  __ JumpIfNotSmi(code, &slow);
 
   // Convert the smi code to uint16.
   __ and_(code, Immediate(Smi::FromInt(0xffff)));
@@ -1896,7 +1861,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // ecx: function name.
@@ -1942,8 +1911,7 @@
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(edx, &miss);
 
     CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
                     &miss);
@@ -1959,12 +1927,11 @@
   // Check if the argument is a smi.
   Label smi;
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi);
+  __ JumpIfSmi(eax, &smi);
 
   // Check if the argument is a heap number and load its value into xmm0.
   Label slow;
-  __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
 
   // Check if the argument is strictly positive. Note this also
@@ -2026,7 +1993,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // ecx: function name.
@@ -2066,8 +2034,7 @@
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(edx, &miss);
 
     CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
                     &miss);
@@ -2083,8 +2050,7 @@
   // Check if the argument is a smi.
   Label not_smi;
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &not_smi);
+  __ JumpIfNotSmi(eax, &not_smi);
 
   // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
   // otherwise.
@@ -2108,7 +2074,7 @@
   // Check if the argument is a heap number and load its exponent and
   // sign into ebx.
   __ bind(&not_smi);
-  __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -2131,7 +2097,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // ecx: function name.
@@ -2155,6 +2122,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2168,8 +2136,7 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss_before_stack_reserved, not_taken);
+  __ JumpIfSmi(edx, &miss_before_stack_reserved);
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_const(), 1);
@@ -2204,11 +2171,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
-                                                   CheckType check) {
+MaybeObject* CallStubCompiler::CompileCallConstant(
+    Object* object,
+    JSObject* holder,
+    JSFunction* function,
+    String* name,
+    CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2236,8 +2204,7 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss, not_taken);
+    __ JumpIfSmi(edx, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
@@ -2269,7 +2236,7 @@
       } else {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
-        __ j(above_equal, &miss, not_taken);
+        __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
@@ -2286,10 +2253,9 @@
       } else {
         Label fast;
         // Check that the object is a smi or a heap number.
-        __ test(edx, Immediate(kSmiTagMask));
-        __ j(zero, &fast, taken);
+        __ JumpIfSmi(edx, &fast);
         __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
-        __ j(not_equal, &miss, not_taken);
+        __ j(not_equal, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
@@ -2309,9 +2275,9 @@
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
-        __ j(equal, &fast, taken);
+        __ j(equal, &fast);
         __ cmp(edx, factory()->false_value());
-        __ j(not_equal, &miss, not_taken);
+        __ j(not_equal, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
@@ -2326,7 +2292,11 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2361,7 +2331,7 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx);
+  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2378,10 +2348,9 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   // Check that the function really is a function.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(eax, &miss);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Patch the receiver on the stack with the global proxy if
   // necessary.
@@ -2392,7 +2361,11 @@
 
   // Invoke the function.
   __ mov(edi, eax);
-  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle load cache miss.
   __ bind(&miss);
@@ -2404,11 +2377,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+    JSObject* object,
+    GlobalObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2451,16 +2425,21 @@
   __ IncrementCounter(counters->call_global_inline(), 1);
   ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-                  expected, arguments(), JUMP_FUNCTION);
+                  expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
     Handle<Code> code(function->code());
     __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   }
 
   // Handle call cache miss.
@@ -2517,13 +2496,12 @@
   Label miss;
 
   // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(edx, &miss);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2567,13 +2545,12 @@
   Label miss;
 
   // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(edx, &miss);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2620,7 +2597,7 @@
   // Check that the map of the global has not changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
 
   // Compute the cell operand to use.
@@ -2673,7 +2650,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
   GenerateStoreField(masm(),
@@ -2694,8 +2671,35 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2703,51 +2707,22 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(edx, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
-
-  // Check that the map matches.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-         Immediate(factory()->fixed_array_map()));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-    __ j(above_equal, &miss, not_taken);
-  } else {
-    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // Compare smis.
-    __ j(above_equal, &miss, not_taken);
+  Register map_reg = ebx;
+  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
   }
-
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ mov(edx, Operand(eax));
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
-  __ RecordWrite(edi, 0, edx, ecx);
-
-  // Done.
-  __ ret(0);
-
-  // Handle store cache miss.
   __ bind(&miss);
-  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -2762,8 +2737,7 @@
   Label miss;
 
   // Check that the receiver isn't a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ JumpIfSmi(eax, &miss);
 
   ASSERT(last->IsGlobalObject() || last->HasFastProperties());
 
@@ -2915,8 +2889,7 @@
   // object which can only happen for contextual loads. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &miss, not_taken);
+    __ JumpIfSmi(eax, &miss);
   }
 
   // Check that the maps haven't changed.
@@ -2933,7 +2906,7 @@
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
     __ cmp(ebx, factory()->the_hole_value());
-    __ j(equal, &miss, not_taken);
+    __ j(equal, &miss);
   } else if (FLAG_debug_code) {
     __ cmp(ebx, factory()->the_hole_value());
     __ Check(not_equal, "DontDelete cells can't contain the hole");
@@ -2969,7 +2942,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
 
@@ -2999,7 +2972,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
                                              ecx, edi, callback, name, &miss);
@@ -3034,7 +3007,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
                        value, name, &miss);
@@ -3062,7 +3035,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
@@ -3098,7 +3071,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
   __ bind(&miss);
@@ -3123,7 +3096,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
   __ bind(&miss);
@@ -3148,7 +3121,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
   __ bind(&miss);
@@ -3160,48 +3133,53 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(edx, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
-
-  // Check that the map matches.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
-
-  // Get the elements array.
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(ecx);
-
-  // Check that the key is within bounds.
-  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss, not_taken);
-
-  // Load the result and make sure it's not the hole.
-  __ mov(ebx, Operand(ecx, eax, times_2,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
-  __ cmp(ebx, factory()->the_hole_value());
-  __ j(equal, &miss, not_taken);
-  __ mov(eax, ebx);
-  __ ret(0);
+  Register map_reg = ebx;
+  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3222,14 +3200,13 @@
   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
   __ cmp(ebx, factory()->undefined_value());
-  __ j(not_equal, &generic_stub_call, not_taken);
+  __ j(not_equal, &generic_stub_call);
 #endif
 
   // Load the initial map and verify that it is in fact a map.
   __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
   // Will both indicate a NULL and a Smi.
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &generic_stub_call);
+  __ JumpIfSmi(ebx, &generic_stub_call);
   __ CmpObjectType(ebx, MAP_TYPE, ecx);
   __ j(not_equal, &generic_stub_call);
 
@@ -3344,58 +3321,116 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject*receiver, ExternalArrayType array_type, Code::Flags flags) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+    MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, failed_allocation;
+  Label slow, miss_force_generic;
 
-  // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+  __ JumpIfNotSmi(eax, &miss_force_generic);
+  __ mov(ebx, eax);
+  __ SmiUntag(ebx);
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+
+  // Push receiver on the stack to free up a register for the dictionary
+  // probing.
+  __ push(edx);
+  __ LoadFromNumberDictionary(&slow,
+                              ecx,
+                              eax,
+                              ebx,
+                              edx,
+                              edi,
+                              eax);
+  // Pop receiver before returning.
+  __ pop(edx);
+  __ ret(0);
+
+  __ bind(&slow);
+  __ pop(edx);
+
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Handle<Code> miss_force_generic_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, failed_allocation, slow;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ JumpIfNotSmi(eax, &miss_force_generic);
 
-  // Check that the map matches.
-  __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // eax: key, known to be a smi.
-  // edx: receiver, known to be a JSObject.
-  // ebx: elements object, known to be an external array.
   // Check that the index is in range.
-  __ mov(ecx, eax);
-  __ SmiUntag(ecx);  // Untag the index.
-  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(eax, FieldOperand(ebx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
   __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
   // ebx: base pointer of external storage
-  switch (array_type) {
-    case kExternalByteArray:
-      __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+      __ SmiUntag(eax);  // Untag the index.
+      __ movsx_b(eax, Operand(ebx, eax, times_1, 0));
       break;
-    case kExternalUnsignedByteArray:
-    case kExternalPixelArray:
-      __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      __ SmiUntag(eax);  // Untag the index.
+      __ movzx_b(eax, Operand(ebx, eax, times_1, 0));
       break;
-    case kExternalShortArray:
-      __ movsx_w(eax, Operand(ebx, ecx, times_2, 0));
+    case EXTERNAL_SHORT_ELEMENTS:
+      __ movsx_w(eax, Operand(ebx, eax, times_1, 0));
       break;
-    case kExternalUnsignedShortArray:
-      __ movzx_w(eax, Operand(ebx, ecx, times_2, 0));
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ movzx_w(eax, Operand(ebx, eax, times_1, 0));
       break;
-    case kExternalIntArray:
-    case kExternalUnsignedIntArray:
-      __ mov(ecx, Operand(ebx, ecx, times_4, 0));
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+      __ mov(ecx, Operand(ebx, eax, times_2, 0));
       break;
-    case kExternalFloatArray:
-      __ fld_s(Operand(ebx, ecx, times_4, 0));
+    case EXTERNAL_FLOAT_ELEMENTS:
+      __ fld_s(Operand(ebx, eax, times_2, 0));
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ fld_d(Operand(ebx, eax, times_4, 0));
       break;
     default:
       UNREACHABLE();
@@ -3407,17 +3442,17 @@
   // For floating-point array type:
   // FP(0): value
 
-  if (array_type == kExternalIntArray ||
-      array_type == kExternalUnsignedIntArray) {
+  if (elements_kind == EXTERNAL_INT_ELEMENTS ||
+      elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
     // For the Int and UnsignedInt array types, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
     Label box_int;
-    if (array_type == kExternalIntArray) {
+    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
       __ cmp(ecx, 0xC0000000);
       __ j(sign, &box_int);
     } else {
-      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
       // The test is different for unsigned int values. Since we need
       // the value to be in the range of a positive smi, we can't
       // handle either of the top two bits being set in the value.
@@ -3433,12 +3468,12 @@
 
     // Allocate a HeapNumber for the int and perform int-to-double
     // conversion.
-    if (array_type == kExternalIntArray) {
+    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
       __ push(ecx);
       __ fild_s(Operand(esp, 0));
       __ pop(ecx);
     } else {
-      ASSERT(array_type == kExternalUnsignedIntArray);
+      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
       // Need to zero-extend the value.
       // There's no fild variant for unsigned values, so zero-extend
       // to a 64-bit int manually.
@@ -3454,7 +3489,8 @@
     __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
-  } else if (array_type == kExternalFloatArray) {
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
     __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
@@ -3476,53 +3512,51 @@
 
   // Slow case: Jump to runtime.
   __ bind(&slow);
-  Counters* counters = isolate()->counters();
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
+
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
 
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(eax);  // name
-  __ push(ebx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
-  // Return the generated code.
-  return GetCode(flags);
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
   // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
+  //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, check_heap_number;
 
-  // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
 
-  // Check that the map matches.
-  __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow, check_heap_number;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow);
+  __ JumpIfNotSmi(ecx, &miss_force_generic);
 
   // Check that the index is in range.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ mov(ebx, ecx);
-  __ SmiUntag(ebx);
-  __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ j(above_equal, &slow);
 
@@ -3532,48 +3566,47 @@
   // edx: receiver
   // ecx: key
   // edi: elements array
-  // ebx: untagged index
-  __ test(eax, Immediate(kSmiTagMask));
-  if (array_type == kExternalPixelArray)
-    __ j(not_equal, &slow);
-  else
-    __ j(not_equal, &check_heap_number);
+  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+    __ JumpIfNotSmi(eax, &slow);
+  } else {
+    __ JumpIfNotSmi(eax, &check_heap_number);
+  }
 
   // smi case
-  __ mov(ecx, eax);  // Preserve the value in eax.  Key is no longer needed.
-  __ SmiUntag(ecx);
+  __ mov(ebx, eax);  // Preserve the value in eax as the return value.
+  __ SmiUntag(ebx);
   __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
-  // ecx: base pointer of external storage
-  switch (array_type) {
-    case kExternalPixelArray:
-      {  // Clamp the value to [0..255].
-        NearLabel done;
-        __ test(ecx, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
-        __ setcc(negative, ecx);  // 1 if negative, 0 if positive.
-        __ dec_b(ecx);  // 0 if negative, 255 if positive.
-        __ bind(&done);
-      }
-      __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+  // edi: base pointer of external storage
+  switch (elements_kind) {
+    case EXTERNAL_PIXEL_ELEMENTS:
+      __ ClampUint8(ebx);
+      __ SmiUntag(ecx);
+      __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
       break;
-    case kExternalByteArray:
-    case kExternalUnsignedByteArray:
-      __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ SmiUntag(ecx);
+      __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
       break;
-    case kExternalShortArray:
-    case kExternalUnsignedShortArray:
-      __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
       break;
-    case kExternalIntArray:
-    case kExternalUnsignedIntArray:
-      __ mov(Operand(edi, ebx, times_4, 0), ecx);
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ mov(Operand(edi, ecx, times_2, 0), ebx);
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
       // Need to perform int-to-float conversion.
-      __ push(ecx);
+      __ push(ebx);
       __ fild_s(Operand(esp, 0));
-      __ pop(ecx);
-      __ fstp_s(Operand(edi, ebx, times_4, 0));
+      __ pop(ebx);
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        __ fstp_s(Operand(edi, ecx, times_2, 0));
+      } else {  // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
+        __ fstp_d(Operand(edi, ecx, times_4, 0));
+      }
       break;
     default:
       UNREACHABLE();
@@ -3582,26 +3615,28 @@
   __ ret(0);  // Return the original value.
 
   // TODO(danno): handle heap number -> pixel array conversion
-  if (array_type != kExternalPixelArray) {
+  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     __ bind(&check_heap_number);
     // eax: value
     // edx: receiver
     // ecx: key
     // edi: elements array
-    // ebx: untagged index
     __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-           Immediate(factory()->heap_number_map()));
+           Immediate(masm->isolate()->factory()->heap_number_map()));
     __ j(not_equal, &slow);
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
     // reproducible behavior, convert these to zero.
     __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
-    // ebx: untagged index
     // edi: base pointer of external storage
-    if (array_type == kExternalFloatArray) {
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
       __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-      __ fstp_s(Operand(edi, ebx, times_4, 0));
+      __ fstp_s(Operand(edi, ecx, times_2, 0));
+      __ ret(0);
+    } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+      __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+      __ fstp_d(Operand(edi, ecx, times_4, 0));
       __ ret(0);
     } else {
       // Perform float-to-int conversion with truncation (round-to-zero)
@@ -3612,31 +3647,24 @@
       // (code-stubs-ia32.cc) is roughly what is needed here though the
       // conversion failure case does not need to be handled.
       if (CpuFeatures::IsSupported(SSE2)) {
-        if (array_type != kExternalIntArray &&
-            array_type != kExternalUnsignedIntArray) {
+        if (elements_kind != EXTERNAL_INT_ELEMENTS &&
+            elements_kind != EXTERNAL_UNSIGNED_INT_ELEMENTS) {
           ASSERT(CpuFeatures::IsSupported(SSE2));
           CpuFeatures::Scope scope(SSE2);
-          __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
+          __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
           // ecx: untagged integer value
-          switch (array_type) {
-            case kExternalPixelArray:
-              {  // Clamp the value to [0..255].
-                NearLabel done;
-                __ test(ecx, Immediate(0xFFFFFF00));
-                __ j(zero, &done);
-                __ setcc(negative, ecx);  // 1 if negative, 0 if positive.
-                __ dec_b(ecx);  // 0 if negative, 255 if positive.
-                __ bind(&done);
-              }
-              __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+          switch (elements_kind) {
+            case EXTERNAL_PIXEL_ELEMENTS:
+              __ ClampUint8(ebx);
+              // Fall through.
+            case EXTERNAL_BYTE_ELEMENTS:
+            case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+              __ SmiUntag(ecx);
+              __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
               break;
-            case kExternalByteArray:
-            case kExternalUnsignedByteArray:
-              __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
-              break;
-            case kExternalShortArray:
-            case kExternalUnsignedShortArray:
-              __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+            case EXTERNAL_SHORT_ELEMENTS:
+            case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+              __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
               break;
             default:
               UNREACHABLE();
@@ -3653,7 +3681,7 @@
             __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
             __ sub(Operand(esp), Immediate(2 * kPointerSize));
             __ fisttp_d(Operand(esp, 0));
-            __ pop(ecx);
+            __ pop(ebx);
             __ add(Operand(esp), Immediate(kPointerSize));
           } else {
             ASSERT(CpuFeatures::IsSupported(SSE2));
@@ -3664,15 +3692,15 @@
             // Note: we could do better for signed int arrays.
             __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
             // We will need the key if we have to make the slow runtime call.
-            __ push(ecx);
-            __ LoadPowerOf2(xmm1, ecx, 31);
-            __ pop(ecx);
+            __ push(ebx);
+            __ LoadPowerOf2(xmm1, ebx, 31);
+            __ pop(ebx);
             __ ucomisd(xmm1, xmm0);
             __ j(above_equal, &slow);
-            __ cvttsd2si(ecx, Operand(xmm0));
+            __ cvttsd2si(ebx, Operand(xmm0));
           }
-          // ecx: untagged integer value
-          __ mov(Operand(edi, ebx, times_4, 0), ecx);
+          // ebx: untagged integer value
+          __ mov(Operand(edi, ecx, times_2, 0), ebx);
         }
         __ ret(0);  // Return original value.
       }
@@ -3681,6 +3709,9 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
+
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -3688,19 +3719,265 @@
   //  -- esp[0] : return address
   // -----------------------------------
 
-  __ pop(ebx);
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(eax, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(ecx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+  __ j(equal, &miss_force_generic);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(eax, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Check for the hole
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(equal, &miss_force_generic);
+
+  // Always allocate a heap number for the result.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
+                                 FixedDoubleArray::kHeaderSize));
+  } else {
+    __ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  __ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
+  // Set the value.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+  }
+  __ mov(eax, ecx);
+  __ ret(0);
+
+  __ bind(&slow_allocate_heapnumber);
+  // A value was pushed on the floating point stack before the allocation, if
+  // the allocation fails it needs to be removed.
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    __ ffree();
+    __ fincstp();
+  }
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(ecx, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ j(not_equal, &miss_force_generic);
+
+  if (is_js_array) {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
+    __ j(above_equal, &miss_force_generic);
+  } else {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
+    __ j(above_equal, &miss_force_generic);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+    MacroAssembler* masm,
+    bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, smi_value, is_nan, maybe_nan;
+  Label have_double_value, not_nan;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(ecx, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(edi);
+
+  if (is_js_array) {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
+  } else {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
+  }
+  __ j(above_equal, &miss_force_generic);
+
+  __ JumpIfSmi(eax, &smi_value, Label::kNear);
+
+  __ CheckMap(eax,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
+  __ j(greater_equal, &maybe_nan, Label::kNear);
+
+  __ bind(&not_nan);
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ bind(&have_double_value);
+    __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
+              xmm0);
+    __ ret(0);
+  } else {
+    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ bind(&have_double_value);
+    __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+    __ ret(0);
+  }
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ j(greater, &is_nan, Label::kNear);
+  __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
+  __ j(zero, &not_nan);
+  __ bind(&is_nan);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
+  } else {
+    __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+  }
+  __ jmp(&have_double_value, Label::kNear);
+
+  __ bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  __ mov(edx, eax);
+  __ SmiUntag(edx);
   __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));   // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
-  __ push(ebx);   // return address
+  __ fild_s(Operand(esp, 0));
+  __ pop(edx);
+  __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+  __ ret(0);
 
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-
-  return GetCode(flags);
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 99eb21f..0f76a9a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -61,16 +61,41 @@
 void IC::TraceIC(const char* type,
                  Handle<Object> name,
                  State old_state,
-                 Code* new_target,
-                 const char* extra_info) {
+                 Code* new_target) {
   if (FLAG_trace_ic) {
     State new_state = StateFrom(new_target,
                                 HEAP->undefined_value(),
                                 HEAP->undefined_value());
-    PrintF("[%s (%c->%c)%s", type,
+    PrintF("[%s in ", type);
+    StackFrameIterator it;
+    while (it.frame()->fp() != this->fp()) it.Advance();
+    StackFrame* raw_frame = it.frame();
+    if (raw_frame->is_internal()) {
+      Isolate* isolate = new_target->GetIsolate();
+      Code* apply_builtin = isolate->builtins()->builtin(
+          Builtins::kFunctionApply);
+      if (raw_frame->unchecked_code() == apply_builtin) {
+        PrintF("apply from ");
+        it.Advance();
+        raw_frame = it.frame();
+      }
+    }
+    if (raw_frame->is_java_script()) {
+      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+      Code* js_code = frame->unchecked_code();
+      // Find the function on the stack and both the active code for the
+      // function and the original code.
+      JSFunction* function = JSFunction::cast(frame->function());
+      function->PrintName();
+      int code_offset =
+          static_cast<int>(address() - js_code->instruction_start());
+      PrintF("+%d", code_offset);
+    } else {
+      PrintF("<unknown>");
+    }
+    PrintF(" (%c->%c)",
            TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state),
-           extra_info);
+           TransitionMarkFromState(new_state));
     name->Print();
     PrintF("]\n");
   }
@@ -274,16 +299,16 @@
   switch (target->kind()) {
     case Code::LOAD_IC: return LoadIC::Clear(address, target);
     case Code::KEYED_LOAD_IC:
-    case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
       return KeyedLoadIC::Clear(address, target);
     case Code::STORE_IC: return StoreIC::Clear(address, target);
     case Code::KEYED_STORE_IC:
-    case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::TYPE_RECORDING_BINARY_OP_IC:
+    case Code::UNARY_OP_IC:
+    case Code::BINARY_OP_IC:
     case Code::COMPARE_IC:
+    case Code::TO_BOOLEAN_IC:
       // Clearing these is tricky and does not
       // make any performance difference.
       return;
@@ -293,65 +318,35 @@
 
 
 void CallICBase::Clear(Address address, Code* target) {
+  bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
   State state = target->ic_state();
   if (state == UNINITIALIZED) return;
   Code* code =
       Isolate::Current()->stub_cache()->FindCallInitialize(
           target->arguments_count(),
-          target->ic_in_loop(),
+          contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
           target->kind());
   SetTargetAtAddress(address, code);
 }
 
 
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
-  // Insert null as the map to check for to make sure the map check fails
-  // sending control flow to the IC instead of the inlined version.
-  PatchInlinedLoad(address, HEAP->null_value());
-}
-
-
 void KeyedLoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   // Make sure to also clear the map used in inline fast cases.  If we
   // do not clear these maps, cached code can keep objects alive
   // through the embedded maps.
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void LoadIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property load (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  Heap* heap = HEAP;
-  PatchInlinedLoad(address, heap->null_value(), 0);
-  PatchInlinedContextualLoad(address,
-                             heap->null_value(),
-                             heap->null_value(),
-                             true);
-}
-
-
 void LoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void StoreIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property store (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  PatchInlinedStore(address, HEAP->null_value(), 0);
-}
-
-
 void StoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address,
       (target->extra_ic_state() == kStrictMode)
         ? initialize_stub_strict()
@@ -359,21 +354,6 @@
 }
 
 
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
-  // Insert null as the elements map to check for.  This will make
-  // sure that the elements fast-case map check fails so that control
-  // flows to the IC instead of the inlined version.
-  PatchInlinedStore(address, HEAP->null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
-  // Restore the fast-case elements map check so that the inlined
-  // version can be used again.
-  PatchInlinedStore(address, HEAP->fixed_array_map());
-}
-
-
 void KeyedStoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
@@ -595,17 +575,13 @@
         ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
         // If we're in the default (fastest) state and the index is
         // out of bounds, update the state to record this fact.
-        if (*extra_ic_state == DEFAULT_STRING_STUB &&
+        if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
             argc >= 1 && args[1]->IsNumber()) {
-          double index;
-          if (args[1]->IsSmi()) {
-            index = Smi::cast(args[1])->value();
-          } else {
-            ASSERT(args[1]->IsHeapNumber());
-            index = DoubleToInteger(HeapNumber::cast(args[1])->value());
-          }
+          double index = DoubleToInteger(args.number_at(1));
           if (index < 0 || index >= string->length()) {
-            *extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
+            *extra_ic_state =
+                StringStubState::update(*extra_ic_state,
+                                        STRING_INDEX_OUT_OF_BOUNDS);
             return true;
           }
         }
@@ -625,14 +601,13 @@
     Handle<Object> object,
     Handle<String> name) {
   int argc = target()->arguments_count();
-  InLoopFlag in_loop = target()->ic_in_loop();
   MaybeObject* maybe_code = NULL;
   switch (lookup->type()) {
     case FIELD: {
       int index = lookup->GetFieldIndex();
       maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
-                                                             in_loop,
                                                              kind_,
+                                                             extra_ic_state,
                                                              *name,
                                                              *object,
                                                              lookup->holder(),
@@ -646,7 +621,6 @@
       JSFunction* function = lookup->GetConstantFunction();
       maybe_code =
           isolate()->stub_cache()->ComputeCallConstant(argc,
-                                                       in_loop,
                                                        kind_,
                                                        extra_ic_state,
                                                        *name,
@@ -666,8 +640,8 @@
         if (!cell->value()->IsJSFunction()) return NULL;
         JSFunction* function = JSFunction::cast(cell->value());
         maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
-                                                                in_loop,
                                                                 kind_,
+                                                                extra_ic_state,
                                                                 *name,
                                                                 *receiver,
                                                                 global,
@@ -680,8 +654,8 @@
         // applicable.
         if (lookup->holder() != *receiver) return NULL;
         maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
-                                                                in_loop,
                                                                 kind_,
+                                                                extra_ic_state,
                                                                 *name,
                                                                 *receiver);
       }
@@ -692,6 +666,7 @@
       maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
           argc,
           kind_,
+          extra_ic_state,
           *name,
           *object,
           lookup->holder());
@@ -723,16 +698,16 @@
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
-  InLoopFlag in_loop = target()->ic_in_loop();
   MaybeObject* maybe_code = NULL;
   bool had_proto_failure = false;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
-                                                                    in_loop,
-                                                                    kind_);
+    maybe_code =
+        isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
+                                                           kind_,
+                                                           extra_ic_state);
   } else if (state == MONOMORPHIC) {
     if (kind_ == Code::CALL_IC &&
         TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
@@ -752,9 +727,10 @@
                                           object,
                                           name);
     } else {
-      maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(argc,
-                                                                   in_loop,
-                                                                   kind_);
+      maybe_code =
+          isolate()->stub_cache()->ComputeCallMegamorphic(argc,
+                                                          kind_,
+                                                          extra_ic_state);
     }
   } else {
     maybe_code = ComputeMonomorphicStub(lookup,
@@ -789,7 +765,7 @@
 #ifdef DEBUG
   if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
   TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
-      name, state, target(), in_loop ? " (in-loop)" : "");
+          name, state, target());
 #endif
 }
 
@@ -808,18 +784,32 @@
     return TypeError("non_object_property_call", object, key);
   }
 
-  if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
+  if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
     int argc = target()->arguments_count();
-    InLoopFlag in_loop = target()->ic_in_loop();
-    MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
-        argc, in_loop, Code::KEYED_CALL_IC);
-    Object* code;
-    if (maybe_code->ToObject(&code)) {
-      set_target(Code::cast(code));
+    Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
+    Map* map = heap->non_strict_arguments_elements_map();
+    if (object->IsJSObject() &&
+        Handle<JSObject>::cast(object)->elements()->map() == map) {
+      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+          argc, Code::KEYED_CALL_IC);
+      Object* code;
+      if (maybe_code->ToObject(&code)) {
+        set_target(Code::cast(code));
 #ifdef DEBUG
-      TraceIC(
-          "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+        TraceIC("KeyedCallIC", key, state, target());
 #endif
+      }
+    } else if (FLAG_use_ic && state != MEGAMORPHIC &&
+               !object->IsAccessCheckNeeded()) {
+      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+          argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+      Object* code;
+      if (maybe_code->ToObject(&code)) {
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedCallIC", key, state, target());
+#endif
+      }
     }
   }
 
@@ -840,14 +830,6 @@
 }
 
 
-#ifdef DEBUG
-#define TRACE_IC_NAMED(msg, name) \
-  if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
-#else
-#define TRACE_IC_NAMED(msg, name)
-#endif
-
-
 MaybeObject* LoadIC::Load(State state,
                           Handle<Object> object,
                           Handle<String> name) {
@@ -858,40 +840,40 @@
   }
 
   if (FLAG_use_ic) {
-    Code* non_monomorphic_stub =
-        (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
-
     // Use specialized code for getting the length of strings and
     // string wrapper objects.  The length property of string wrapper
     // objects is read-only and therefore always returns the length of
     // the underlying string value.  See ECMA-262 15.5.5.1.
     if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      HandleScope scope(isolate());
-#ifdef DEBUG
-      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
-      if (state == PREMONOMORPHIC) {
+      AssertNoAllocation no_allocation;
+      Code* stub = NULL;
+      if (state == UNINITIALIZED) {
+        stub = pre_monomorphic_stub();
+      } else if (state == PREMONOMORPHIC) {
         if (object->IsString()) {
-          Map* map = HeapObject::cast(*object)->map();
-          const int offset = String::kLengthOffset;
-          PatchInlinedLoad(address(), map, offset);
-          set_target(isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringLength));
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_StringLength);
         } else {
-          set_target(isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringWrapperLength));
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_StringWrapperLength);
         }
       } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
-        set_target(isolate()->builtins()->builtin(
-            Builtins::kLoadIC_StringWrapperLength));
-      } else {
-        set_target(non_monomorphic_stub);
+        stub = isolate()->builtins()->builtin(
+            Builtins::kLoadIC_StringWrapperLength);
+      } else if (state != MEGAMORPHIC) {
+        stub = megamorphic_stub();
+      }
+      if (stub != NULL) {
+        set_target(stub);
+#ifdef DEBUG
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
       }
       // Get the string if we have a string wrapper object.
       if (object->IsJSValue()) {
-        object = Handle<Object>(Handle<JSValue>::cast(object)->value(),
-                                isolate());
+        return Smi::FromInt(
+            String::cast(Handle<JSValue>::cast(object)->value())->length());
       }
       return Smi::FromInt(String::cast(*object)->length());
     }
@@ -899,17 +881,21 @@
     // Use specialized code for getting the length of arrays.
     if (object->IsJSArray() &&
         name->Equals(isolate()->heap()->length_symbol())) {
+      AssertNoAllocation no_allocation;
+      Code* stub = NULL;
+      if (state == UNINITIALIZED) {
+        stub = pre_monomorphic_stub();
+      } else if (state == PREMONOMORPHIC) {
+        stub = isolate()->builtins()->builtin(
+            Builtins::kLoadIC_ArrayLength);
+      } else if (state != MEGAMORPHIC) {
+        stub = megamorphic_stub();
+      }
+      if (stub != NULL) {
+        set_target(stub);
 #ifdef DEBUG
-      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
-      if (state == PREMONOMORPHIC) {
-        Map* map = HeapObject::cast(*object)->map();
-        const int offset = JSArray::kLengthOffset;
-        PatchInlinedLoad(address(), map, offset);
-        set_target(isolate()->builtins()->builtin(
-            Builtins::kLoadIC_ArrayLength));
-      } else {
-        set_target(non_monomorphic_stub);
       }
       return JSArray::cast(*object)->length();
     }
@@ -918,14 +904,22 @@
     if (object->IsJSFunction() &&
         name->Equals(isolate()->heap()->prototype_symbol()) &&
         JSFunction::cast(*object)->should_have_prototype()) {
+      { AssertNoAllocation no_allocation;
+        Code* stub = NULL;
+        if (state == UNINITIALIZED) {
+          stub = pre_monomorphic_stub();
+        } else if (state == PREMONOMORPHIC) {
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_FunctionPrototype);
+        } else if (state != MEGAMORPHIC) {
+          stub = megamorphic_stub();
+        }
+        if (stub != NULL) {
+          set_target(stub);
 #ifdef DEBUG
-      if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+          if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
-      if (state == PREMONOMORPHIC) {
-        set_target(isolate()->builtins()->builtin(
-            Builtins::kLoadIC_FunctionPrototype));
-      } else {
-        set_target(non_monomorphic_stub);
+        }
       }
       return Accessors::FunctionGetPrototype(*object, 0);
     }
@@ -942,76 +936,20 @@
 
   // If we did not find a property, check if we need to throw an exception.
   if (!lookup.IsProperty()) {
-    if (FLAG_strict || IsContextual(object)) {
+    if (IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
     LOG(isolate(), SuspectReadEvent(*name, *object));
   }
 
-  bool can_be_inlined_precheck =
-      FLAG_use_ic &&
-      lookup.IsProperty() &&
-      lookup.IsCacheable() &&
-      lookup.holder() == *object &&
-      !object->IsAccessCheckNeeded();
-
-  bool can_be_inlined =
-      can_be_inlined_precheck &&
-      state == PREMONOMORPHIC &&
-      lookup.type() == FIELD;
-
-  bool can_be_inlined_contextual =
-      can_be_inlined_precheck &&
-      state == UNINITIALIZED &&
-      lookup.holder()->IsGlobalObject() &&
-      lookup.type() == NORMAL;
-
-  if (can_be_inlined) {
-    Map* map = lookup.holder()->map();
-    // Property's index in the properties array.  If negative we have
-    // an inobject property.
-    int index = lookup.GetFieldIndex() - map->inobject_properties();
-    if (index < 0) {
-      // Index is an offset from the end of the object.
-      int offset = map->instance_size() + (index * kPointerSize);
-      if (PatchInlinedLoad(address(), map, offset)) {
-        set_target(megamorphic_stub());
-        TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
-        return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-      } else {
-        TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
-                       name);
-      }
-    } else {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
-    }
-  } else if (can_be_inlined_contextual) {
-    Map* map = lookup.holder()->map();
-    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
-        lookup.holder()->property_dictionary()->ValueAt(
-            lookup.GetDictionaryEntry()));
-    if (PatchInlinedContextualLoad(address(),
-                                   map,
-                                   cell,
-                                   lookup.IsDontDelete())) {
-      set_target(megamorphic_stub());
-      TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
-      ASSERT(cell->value() != isolate()->heap()->the_hole_value());
-      return cell->value();
-    }
-  } else {
-    if (FLAG_use_ic && state == PREMONOMORPHIC) {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
-    }
-  }
-
   // Update inline cache and stub cache.
   if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
   PropertyAttributes attr;
-  if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+  if (lookup.IsProperty() &&
+      (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
     // Get the property.
     Object* result;
     { MaybeObject* maybe_result =
@@ -1139,9 +1077,34 @@
 }
 
 
+MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+    bool is_js_array,
+    ElementsKind elements_kind) {
+  return KeyedLoadElementStub(elements_kind).TryGetCode();
+}
+
+
+MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedLoadStubCompiler compiler;
+  MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
+                                                            targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
+  isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
+}
+
+
 MaybeObject* KeyedLoadIC::Load(State state,
                                Handle<Object> object,
-                               Handle<Object> key) {
+                               Handle<Object> key,
+                               bool force_generic_stub) {
   // Check for values that can be converted into a symbol.
   // TODO(1295): Remove this code.
   HandleScope scope(isolate());
@@ -1232,10 +1195,8 @@
     LookupForRead(*object, *name, &lookup);
 
     // If we did not find a property, check if we need to throw an exception.
-    if (!lookup.IsProperty()) {
-      if (FLAG_strict || IsContextual(object)) {
-        return ReferenceError("not_defined", name);
-      }
+    if (!lookup.IsProperty() && IsContextual(object)) {
+      return ReferenceError("not_defined", name);
     }
 
     if (FLAG_use_ic) {
@@ -1267,47 +1228,36 @@
 
   if (use_ic) {
     Code* stub = generic_stub();
-    if (state == UNINITIALIZED) {
+    if (!force_generic_stub) {
       if (object->IsString() && key->IsNumber()) {
-        stub = string_stub();
+        if (state == UNINITIALIZED) {
+          stub = string_stub();
+        }
       } else if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        if (receiver->HasExternalArrayElements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
-                  *receiver, false, kNonStrictMode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
+        JSObject* receiver = JSObject::cast(*object);
+        Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+        Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+        if (elements_map == heap->non_strict_arguments_elements_map()) {
+          stub = non_strict_arguments_stub();
         } else if (receiver->HasIndexedInterceptor()) {
           stub = indexed_interceptor_stub();
-        } else if (key->IsSmi() &&
-                   receiver->map()->has_fast_elements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadSpecialized(*receiver);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
+        } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+          MaybeObject* maybe_stub = ComputeStub(receiver,
+                                                false,
+                                                kNonStrictMode,
+                                                stub);
+          stub = maybe_stub->IsFailure() ?
+              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
         }
       }
     }
     if (stub != NULL) set_target(stub);
+  }
 
 #ifdef DEBUG
-    TraceIC("KeyedLoadIC", key, state, target());
+  TraceIC("KeyedLoadIC", key, state, target());
 #endif  // DEBUG
 
-    // For JSObjects with fast elements that are not value wrappers
-    // and that do not have indexed interceptors, we initialize the
-    // inlined fast case (if present) by patching the inlined map
-    // check.
-    if (object->IsJSObject() &&
-        !object->IsJSValue() &&
-        !JSObject::cast(*object)->HasIndexedInterceptor() &&
-        JSObject::cast(*object)->HasFastElements()) {
-      Map* map = JSObject::cast(*object)->map();
-      PatchInlinedLoad(address(), map);
-    }
-  }
-
   // Get the property.
   return Runtime::GetObjectProperty(isolate(), object, key);
 }
@@ -1401,15 +1351,16 @@
 }
 
 
-static bool LookupForWrite(JSObject* object,
+static bool LookupForWrite(JSReceiver* receiver,
                            String* name,
                            LookupResult* lookup) {
-  object->LocalLookup(name, lookup);
+  receiver->LocalLookup(name, lookup);
   if (!StoreICableLookup(lookup)) {
     return false;
   }
 
   if (lookup->type() == INTERCEPTOR) {
+    JSObject* object = JSObject::cast(receiver);
     if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
       object->LocalLookupRealNamedProperty(name, lookup);
       return StoreICableLookup(lookup);
@@ -1431,7 +1382,7 @@
     return TypeError("non_object_property_store", object, name);
   }
 
-  if (!object->IsJSObject()) {
+  if (!object->IsJSReceiver()) {
     // The length property of string values is read-only. Throw in strict mode.
     if (strict_mode == kStrictMode && object->IsString() &&
         name->Equals(isolate()->heap()->length_symbol())) {
@@ -1441,6 +1392,12 @@
     return *value;
   }
 
+  // Handle proxies.
+  if (object->IsJSProxy()) {
+    return JSReceiver::cast(*object)->
+        SetProperty(*name, *value, NONE, strict_mode);
+  }
+
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
   // Check if the given name is an array index.
@@ -1455,7 +1412,7 @@
   // Use specialized code for setting the length of arrays.
   if (receiver->IsJSArray()
       && name->Equals(isolate()->heap()->length_symbol())
-      && receiver->AllowsSetElementsLength()) {
+      && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
 #ifdef DEBUG
     if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
 #endif
@@ -1471,57 +1428,7 @@
     LookupResult lookup;
 
     if (LookupForWrite(*receiver, *name, &lookup)) {
-      bool can_be_inlined =
-          state == UNINITIALIZED &&
-          lookup.IsProperty() &&
-          lookup.holder() == *receiver &&
-          lookup.type() == FIELD &&
-          !receiver->IsAccessCheckNeeded();
-
-      if (can_be_inlined) {
-        Map* map = lookup.holder()->map();
-        // Property's index in the properties array.  If negative we have
-        // an inobject property.
-        int index = lookup.GetFieldIndex() - map->inobject_properties();
-        if (index < 0) {
-          // Index is an offset from the end of the object.
-          int offset = map->instance_size() + (index * kPointerSize);
-          if (PatchInlinedStore(address(), map, offset)) {
-            set_target((strict_mode == kStrictMode)
-                         ? megamorphic_stub_strict()
-                         : megamorphic_stub());
-#ifdef DEBUG
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
-            }
-#endif
-            return receiver->SetProperty(*name, *value, NONE, strict_mode);
-#ifdef DEBUG
-
-          } else {
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
-                     *name->ToCString());
-            }
-          }
-        } else {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
-                   *name->ToCString());
-          }
-        }
-      } else {
-        if (state == PREMONOMORPHIC) {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
-                   *name->ToCString());
-#endif
-          }
-        }
-      }
-
-      // If no inlined store ic was patched, generate a stub for this
-      // store.
+      // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
       // Strict mode doesn't allow setting non-existent global property
@@ -1653,11 +1560,180 @@
 }
 
 
+static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
+                                       Map* new_receiver_map) {
+  for (int current = 0; current < receiver_maps->length(); ++current) {
+    if (receiver_maps->at(current) == new_receiver_map) {
+      return false;
+    }
+  }
+  receiver_maps->Add(new_receiver_map);
+  return true;
+}
+
+
+void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+  ASSERT(stub->is_inline_cache_stub());
+  if (stub == string_stub()) {
+    return result->Add(isolate()->heap()->string_map());
+  } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
+    if (stub->ic_state() == MONOMORPHIC) {
+      result->Add(Map::cast(stub->FindFirstMap()));
+    } else {
+      ASSERT(stub->ic_state() == MEGAMORPHIC);
+      AssertNoAllocation no_allocation;
+      int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+      for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+        RelocInfo* info = it.rinfo();
+        Object* object = info->target_object();
+        ASSERT(object->IsMap());
+        result->Add(Map::cast(object));
+      }
+    }
+  }
+}
+
+
+MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
+                                  bool is_store,
+                                  StrictModeFlag strict_mode,
+                                  Code* generic_stub) {
+  State ic_state = target()->ic_state();
+  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+    Code* monomorphic_stub;
+    MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+                                                     is_store,
+                                                     strict_mode,
+                                                     generic_stub);
+    if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
+    return monomorphic_stub;
+  }
+  ASSERT(target() != generic_stub);
+
+  // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+  // via megamorphic stubs, since they don't have a map in their relocation info
+  // and so the stubs can't be harvested for the object needed for a map check.
+  if (target()->type() != NORMAL) {
+    return generic_stub;
+  }
+
+  // Determine the list of receiver maps that this call site has seen,
+  // adding the map that was just encountered.
+  MapList target_receiver_maps;
+  GetReceiverMapsForStub(target(), &target_receiver_maps);
+  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
+    // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+    // won't help, use the generic stub.
+    return generic_stub;
+  }
+
+  // If the maximum number of receiver maps has been exceeded, use the generic
+  // version of the IC.
+  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+    return generic_stub;
+  }
+
+  PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
+  Code::Flags flags = Code::ComputeFlags(this->kind(),
+                                         MEGAMORPHIC,
+                                         strict_mode);
+  Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
+  // If there is a cached stub, use it.
+  if (!maybe_cached_stub->IsUndefined()) {
+    ASSERT(maybe_cached_stub->IsCode());
+    return Code::cast(maybe_cached_stub);
+  }
+  // Collect MONOMORPHIC stubs for all target_receiver_maps.
+  CodeList handler_ics(target_receiver_maps.length());
+  for (int i = 0; i < target_receiver_maps.length(); ++i) {
+    Map* receiver_map(target_receiver_maps.at(i));
+    MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+        receiver_map, strict_mode);
+    Code* cached_stub;
+    if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
+    handler_ics.Add(cached_stub);
+  }
+  // Build the MEGAMORPHIC stub.
+  Code* stub;
+  MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+                                                     &handler_ics,
+                                                     strict_mode);
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
+  if (maybe_update->IsFailure()) return maybe_update;
+  return stub;
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+    Map* receiver_map,
+    StrictModeFlag strict_mode) {
+  if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+    ASSERT(string_stub() != NULL);
+    return string_stub();
+  } else {
+    ASSERT(receiver_map->has_dictionary_elements() ||
+           receiver_map->has_fast_elements() ||
+           receiver_map->has_fast_double_elements() ||
+           receiver_map->has_external_array_elements());
+    bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+    return GetElementStubWithoutMapCheck(is_js_array,
+                                         receiver_map->elements_kind());
+  }
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
+                                             bool is_store,
+                                             StrictModeFlag strict_mode,
+                                             Code* generic_stub) {
+  Code* result = NULL;
+  if (receiver->HasFastElements() ||
+      receiver->HasExternalArrayElements() ||
+      receiver->HasFastDoubleElements() ||
+      receiver->HasDictionaryElements()) {
+    MaybeObject* maybe_stub =
+        isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+            receiver, is_store, strict_mode);
+    if (!maybe_stub->To(&result)) return maybe_stub;
+  } else {
+    result = generic_stub;
+  }
+  return result;
+}
+
+
+MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+    bool is_js_array,
+    ElementsKind elements_kind) {
+  return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+}
+
+
+MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedStoreStubCompiler compiler(strict_mode);
+  MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
+                                                             targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
+  isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
+}
+
+
 MaybeObject* KeyedStoreIC::Store(State state,
                                  StrictModeFlag strict_mode,
                                  Handle<Object> object,
                                  Handle<Object> key,
-                                 Handle<Object> value) {
+                                 Handle<Object> value,
+                                 bool force_generic) {
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
@@ -1699,29 +1775,34 @@
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic) {
-    Code* stub =
-        (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
-    if (state == UNINITIALIZED) {
-      if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        if (receiver->HasExternalArrayElements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
-                  *receiver, true, strict_mode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
-        } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedStoreSpecialized(
-                  *receiver, strict_mode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
+    Code* stub = (strict_mode == kStrictMode)
+        ? generic_stub_strict()
+        : generic_stub();
+    if (object->IsJSObject()) {
+      JSObject* receiver = JSObject::cast(*object);
+      Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+      Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+      if (elements_map == heap->non_strict_arguments_elements_map()) {
+        stub = non_strict_arguments_stub();
+      } else if (!force_generic) {
+        if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+          HandleScope scope(isolate());
+          MaybeObject* maybe_stub = ComputeStub(receiver,
+                                                true,
+                                                strict_mode,
+                                                stub);
+          stub = maybe_stub->IsFailure() ?
+              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
         }
       }
     }
     if (stub != NULL) set_target(stub);
   }
 
+#ifdef DEBUG
+  TraceIC("KeyedStoreIC", key, state, target());
+#endif
+
   // Set the property.
   return Runtime::SetObjectProperty(
       isolate(), object , key, value, NONE, strict_mode);
@@ -1809,16 +1890,11 @@
 //
 
 static JSFunction* CompileFunction(Isolate* isolate,
-                                   JSFunction* function,
-                                   InLoopFlag in_loop) {
+                                   JSFunction* function) {
   // Compile now with optimization.
   HandleScope scope(isolate);
   Handle<JSFunction> function_handle(function, isolate);
-  if (in_loop == IN_LOOP) {
-    CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
-  } else {
-    CompileLazy(function_handle, CLEAR_EXCEPTION);
-  }
+  CompileLazy(function_handle, CLEAR_EXCEPTION);
   return *function_handle;
 }
 
@@ -1847,9 +1923,7 @@
   if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
     return result;
   }
-  return CompileFunction(isolate,
-                         JSFunction::cast(result),
-                         ic.target()->ic_in_loop());
+  return CompileFunction(isolate, JSFunction::cast(result));
 }
 
 
@@ -1868,9 +1942,7 @@
   if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
     return result;
   }
-  return CompileFunction(isolate,
-                         JSFunction::cast(result),
-                         ic.target()->ic_in_loop());
+  return CompileFunction(isolate, JSFunction::cast(result));
 }
 
 
@@ -1890,7 +1962,16 @@
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
-  return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+  KeyedLoadIC ic(isolate);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
 }
 
 
@@ -1974,22 +2055,123 @@
                   static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
                   args.at<Object>(0),
                   args.at<Object>(1),
-                  args.at<Object>(2));
+                  args.at<Object>(2),
+                  false);
 }
 
 
-void TRBinaryOpIC::patch(Code* code) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  KeyedStoreIC ic(isolate);
+  Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> value = args.at<Object>(2);
+  StrictModeFlag strict_mode =
+      static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
+  return Runtime::SetObjectProperty(isolate,
+                                    object,
+                                    key,
+                                    value,
+                                    NONE,
+                                    strict_mode);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  KeyedStoreIC ic(isolate);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+  Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+  return ic.Store(state,
+                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+                  args.at<Object>(0),
+                  args.at<Object>(1),
+                  args.at<Object>(2),
+                  true);
+}
+
+
+void UnaryOpIC::patch(Code* code) {
   set_target(code);
 }
 
 
-const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
+const char* UnaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED: return "Uninitialized";
+    case SMI: return "Smi";
+    case HEAP_NUMBER: return "HeapNumbers";
+    case GENERIC: return "Generic";
+    default: return "Invalid";
+  }
+}
+
+
+UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED:
+      return ::v8::internal::UNINITIALIZED;
+    case SMI:
+    case HEAP_NUMBER:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
+  }
+  UNREACHABLE();
+  return ::v8::internal::UNINITIALIZED;
+}
+
+UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
+  ::v8::internal::TypeInfo operand_type =
+      ::v8::internal::TypeInfo::TypeFromValue(operand);
+  if (operand_type.IsSmi()) {
+    return SMI;
+  } else if (operand_type.IsNumber()) {
+    return HEAP_NUMBER;
+  } else {
+    return GENERIC;
+  }
+}
+
+
+UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
+    UnaryOpIC::TypeInfo current_type,
+    UnaryOpIC::TypeInfo previous_type) {
+  switch (previous_type) {
+    case UnaryOpIC::UNINITIALIZED:
+      return current_type;
+    case UnaryOpIC::SMI:
+      return (current_type == UnaryOpIC::GENERIC)
+          ? UnaryOpIC::GENERIC
+          : UnaryOpIC::HEAP_NUMBER;
+    case UnaryOpIC::HEAP_NUMBER:
+      return UnaryOpIC::GENERIC;
+    case UnaryOpIC::GENERIC:
+      // We should never do patching if we are in GENERIC state.
+      UNREACHABLE();
+      return UnaryOpIC::GENERIC;
+  }
+  UNREACHABLE();
+  return UnaryOpIC::GENERIC;
+}
+
+
+void BinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
   switch (type_info) {
     case UNINITIALIZED: return "Uninitialized";
     case SMI: return "SMI";
     case INT32: return "Int32s";
     case HEAP_NUMBER: return "HeapNumbers";
     case ODDBALL: return "Oddball";
+    case BOTH_STRING: return "BothStrings";
     case STRING: return "Strings";
     case GENERIC: return "Generic";
     default: return "Invalid";
@@ -1997,7 +2179,7 @@
 }
 
 
-TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
   switch (type_info) {
     case UNINITIALIZED:
       return ::v8::internal::UNINITIALIZED;
@@ -2005,6 +2187,7 @@
     case INT32:
     case HEAP_NUMBER:
     case ODDBALL:
+    case BOTH_STRING:
     case STRING:
       return MONOMORPHIC;
     case GENERIC:
@@ -2015,18 +2198,23 @@
 }
 
 
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
-                                               TRBinaryOpIC::TypeInfo y) {
+BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
+                                           BinaryOpIC::TypeInfo y) {
   if (x == UNINITIALIZED) return y;
   if (y == UNINITIALIZED) return x;
-  if (x == STRING && y == STRING) return STRING;
-  if (x == STRING || y == STRING) return GENERIC;
-  if (x >= y) return x;
+  if (x == y) return x;
+  if (x == BOTH_STRING && y == STRING) return STRING;
+  if (x == STRING && y == BOTH_STRING) return STRING;
+  if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
+    return GENERIC;
+  }
+  if (x > y) return x;
   return y;
 }
 
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
-                                                 Handle<Object> right) {
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
+                                             Handle<Object> right) {
   ::v8::internal::TypeInfo left_type =
       ::v8::internal::TypeInfo::TypeFromValue(left);
   ::v8::internal::TypeInfo right_type =
@@ -2046,9 +2234,11 @@
     return HEAP_NUMBER;
   }
 
-  if (left_type.IsString() || right_type.IsString()) {
-    // Patching for fast string ADD makes sense even if only one of the
-    // arguments is a string.
+  // Patching for fast string ADD makes sense even if only one of the
+  // arguments is a string.
+  if (left_type.IsString())  {
+    return right_type.IsString() ? BOTH_STRING : STRING;
+  } else if (right_type.IsString()) {
     return STRING;
   }
 
@@ -2060,63 +2250,110 @@
 }
 
 
-// defined in code-stubs-<arch>.cc
-// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-                                          TRBinaryOpIC::TypeInfo type_info,
-                                          TRBinaryOpIC::TypeInfo result_type);
+RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
+  ASSERT(args.length() == 4);
 
+  HandleScope scope(isolate);
+  Handle<Object> operand = args.at<Object>(0);
+  Token::Value op = static_cast<Token::Value>(args.smi_at(1));
+  UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
+  UnaryOpIC::TypeInfo previous_type =
+      static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
 
-RUNTIME_FUNCTION(MaybeObject*, TypeRecordingBinaryOp_Patch) {
+  UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
+  type = UnaryOpIC::ComputeNewType(type, previous_type);
+
+  UnaryOpStub stub(op, mode, type);
+  Handle<Code> code = stub.GetCode();
+  if (!code.is_null()) {
+    if (FLAG_trace_ic) {
+      PrintF("[UnaryOpIC (%s->%s)#%s]\n",
+             UnaryOpIC::GetName(previous_type),
+             UnaryOpIC::GetName(type),
+             Token::Name(op));
+    }
+    UnaryOpIC ic(isolate);
+    ic.patch(*code);
+  }
+
+  Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+      isolate->thread_local_top()->context_->builtins(), isolate);
+  Object* builtin = NULL;  // Initialization calms down the compiler.
+  switch (op) {
+    case Token::SUB:
+      builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
+      break;
+    case Token::BIT_NOT:
+      builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
+
+  bool caught_exception;
+  Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
+                                          &caught_exception);
+  if (caught_exception) {
+    return Failure::Exception();
+  }
+  return *result;
+}
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
   ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
   Handle<Object> left = args.at<Object>(0);
   Handle<Object> right = args.at<Object>(1);
-  int key = Smi::cast(args[2])->value();
-  Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-  TRBinaryOpIC::TypeInfo previous_type =
-      static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
+  int key = args.smi_at(2);
+  Token::Value op = static_cast<Token::Value>(args.smi_at(3));
+  BinaryOpIC::TypeInfo previous_type =
+      static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
 
-  TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
-  type = TRBinaryOpIC::JoinTypes(type, previous_type);
-  TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
-  if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
-    type = TRBinaryOpIC::GENERIC;
+  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
+  type = BinaryOpIC::JoinTypes(type, previous_type);
+  BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
+  if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+      op != Token::ADD) {
+    type = BinaryOpIC::GENERIC;
   }
-  if (type == TRBinaryOpIC::SMI &&
-      previous_type == TRBinaryOpIC::SMI) {
-    if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
+  if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+    if (op == Token::DIV ||
+        op == Token::MUL ||
+        op == Token::SHR ||
+        kSmiValueSize == 32) {
       // Arithmetic on two Smi inputs has yielded a heap number.
       // That is the only way to get here from the Smi stub.
       // With 32-bit Smis, all overflows give heap numbers, but with
       // 31-bit Smis, most operations overflow to int32 results.
-      result_type = TRBinaryOpIC::HEAP_NUMBER;
+      result_type = BinaryOpIC::HEAP_NUMBER;
     } else {
       // Other operations on SMIs that overflow yield int32s.
-      result_type = TRBinaryOpIC::INT32;
+      result_type = BinaryOpIC::INT32;
     }
   }
-  if (type == TRBinaryOpIC::INT32 &&
-      previous_type == TRBinaryOpIC::INT32) {
+  if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
     // We must be here because an operation on two INT32 types overflowed.
-    result_type = TRBinaryOpIC::HEAP_NUMBER;
+    result_type = BinaryOpIC::HEAP_NUMBER;
   }
 
-  Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
+  BinaryOpStub stub(key, type, result_type);
+  Handle<Code> code = stub.GetCode();
   if (!code.is_null()) {
     if (FLAG_trace_ic) {
-      PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
-             TRBinaryOpIC::GetName(previous_type),
-             TRBinaryOpIC::GetName(type),
-             TRBinaryOpIC::GetName(result_type),
+      PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
+             BinaryOpIC::GetName(previous_type),
+             BinaryOpIC::GetName(type),
+             BinaryOpIC::GetName(result_type),
              Token::Name(op));
     }
-    TRBinaryOpIC ic(isolate);
+    BinaryOpIC ic(isolate);
     ic.patch(*code);
 
     // Activate inlined smi code.
-    if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
+    if (previous_type == BinaryOpIC::UNINITIALIZED) {
       PatchInlinedSmiCode(ic.address());
     }
   }
@@ -2198,6 +2435,8 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
+    case SYMBOLS: return "SYMBOLS";
+    case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
     default:
       UNREACHABLE();
@@ -2210,12 +2449,18 @@
                                         bool has_inlined_smi_code,
                                         Handle<Object> x,
                                         Handle<Object> y) {
-  if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
+  if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
+    return GENERIC;
+  }
   if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
   if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
       x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
   if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
   if (state == UNINITIALIZED &&
+      x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+  if ((state == UNINITIALIZED || state == SYMBOLS) &&
+      x->IsString() && y->IsString()) return STRINGS;
+  if (state == UNINITIALIZED &&
       x->IsJSObject() && y->IsJSObject()) return OBJECTS;
   return GENERIC;
 }
@@ -2225,12 +2470,37 @@
 RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
-  CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
+  CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
   ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
   return ic.target();
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
+  ASSERT(args.length() == 3);
+
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Register tos = Register::from_code(args.smi_at(1));
+  ToBooleanStub::Types old_types(args.smi_at(2));
+
+  ToBooleanStub::Types new_types(old_types);
+  bool to_boolean_value = new_types.Record(object);
+  old_types.TraceTransition(new_types);
+
+  ToBooleanStub stub(tos, new_types);
+  Handle<Code> code = stub.GetCode();
+  ToBooleanIC ic(isolate);
+  ic.patch(*code);
+  return Smi::FromInt(to_boolean_value ? 1 : 0);
+}
+
+
+void ToBooleanIC::patch(Code* code) {
+  set_target(code);
+}
+
+
 static const Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
diff --git a/src/ic.h b/src/ic.h
index 911cbd8..ece5be9 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_IC_H_
 
 #include "macro-assembler.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -39,12 +40,15 @@
 #define IC_UTIL_LIST(ICU)                             \
   ICU(LoadIC_Miss)                                    \
   ICU(KeyedLoadIC_Miss)                               \
+  ICU(KeyedLoadIC_MissForceGeneric)                   \
   ICU(CallIC_Miss)                                    \
   ICU(KeyedCallIC_Miss)                               \
   ICU(StoreIC_Miss)                                   \
   ICU(StoreIC_ArrayLength)                            \
   ICU(SharedStoreIC_ExtendStorage)                    \
   ICU(KeyedStoreIC_Miss)                              \
+  ICU(KeyedStoreIC_MissForceGeneric)                  \
+  ICU(KeyedStoreIC_Slow)                              \
   /* Utilities for IC stubs. */                       \
   ICU(LoadCallbackProperty)                           \
   ICU(StoreCallbackProperty)                          \
@@ -53,15 +57,16 @@
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(TypeRecordingBinaryOp_Patch)                    \
-  ICU(CompareIC_Miss)
+  ICU(UnaryOp_Patch)                                  \
+  ICU(BinaryOp_Patch)                                 \
+  ICU(CompareIC_Miss)                                 \
+  ICU(ToBoolean_Patch)
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
 // and KeyedStoreIC.
 //
 class IC {
  public:
-
   // The ids for utility called from the generated code.
   enum UtilityId {
   #define CONST_NAME(name) k##name,
@@ -141,11 +146,10 @@
   void set_target(Code* code) { SetTargetAtAddress(address(), code); }
 
 #ifdef DEBUG
-  static void TraceIC(const char* type,
-                      Handle<Object> name,
-                      State old_state,
-                      Code* new_target,
-                      const char* extra_info = "");
+  void TraceIC(const char* type,
+               Handle<Object> name,
+               State old_state,
+               Code* new_target);
 #endif
 
   Failure* TypeError(const char* type,
@@ -190,6 +194,10 @@
 
 
 class CallICBase: public IC {
+ public:
+  class Contextual: public BitField<bool, 0, 1> {};
+  class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
+
  protected:
   CallICBase(Code::Kind kind, Isolate* isolate)
       : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
@@ -230,6 +238,7 @@
   void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
 
   static void Clear(Address address, Code* target);
+
   friend class IC;
 };
 
@@ -241,11 +250,17 @@
   }
 
   // Code generator routines.
-  static void GenerateInitialize(MacroAssembler* masm, int argc) {
-    GenerateMiss(masm, argc);
+  static void GenerateInitialize(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
+    GenerateMiss(masm, argc, extra_ic_state);
   }
-  static void GenerateMiss(MacroAssembler* masm, int argc);
-  static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+  static void GenerateMiss(MacroAssembler* masm,
+                           int argc,
+                           Code::ExtraICState extra_ic_state);
+  static void GenerateMegamorphic(MacroAssembler* masm,
+                                  int argc,
+                                  Code::ExtraICState extra_ic_state);
   static void GenerateNormal(MacroAssembler* masm, int argc);
 };
 
@@ -268,6 +283,7 @@
   static void GenerateMiss(MacroAssembler* masm, int argc);
   static void GenerateMegamorphic(MacroAssembler* masm, int argc);
   static void GenerateNormal(MacroAssembler* masm, int argc);
+  static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
 };
 
 
@@ -296,14 +312,6 @@
                                    bool support_wrappers);
   static void GenerateFunctionPrototype(MacroAssembler* masm);
 
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined load instruction.  It is architecture-dependent, and not
-  // used on ARM.
-  static const int kOffsetToLoadInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -328,41 +336,74 @@
 
   static void Clear(Address address, Code* target);
 
-  static bool PatchInlinedLoad(Address address, Object* map, int index);
-
-  static bool PatchInlinedContextualLoad(Address address,
-                                         Object* map,
-                                         Object* cell,
-                                         bool is_dont_delete);
-
   friend class IC;
 };
 
 
-class KeyedLoadIC: public IC {
+class KeyedIC: public IC {
  public:
-  explicit KeyedLoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+  explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
+  virtual ~KeyedIC() {}
+
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
+      bool is_js_array,
+      ElementsKind elements_kind) = 0;
+
+ protected:
+  virtual Code* string_stub() {
+    return NULL;
+  }
+
+  virtual Code::Kind kind() const = 0;
+
+  MaybeObject* ComputeStub(JSObject* receiver,
+                           bool is_store,
+                           StrictModeFlag strict_mode,
+                           Code* default_stub);
+
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode) = 0;
+
+ private:
+  void GetReceiverMapsForStub(Code* stub, MapList* result);
+
+  MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
+      Map* receiver_map,
+      StrictModeFlag strict_mode);
+
+  MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
+                                      bool is_store,
+                                      StrictModeFlag strict_mode,
+                                      Code* default_stub);
+};
+
+
+class KeyedLoadIC: public KeyedIC {
+ public:
+  explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) {
     ASSERT(target()->is_keyed_load_stub());
   }
 
   MUST_USE_RESULT MaybeObject* Load(State state,
                                     Handle<Object> object,
-                                    Handle<Object> key);
+                                    Handle<Object> key,
+                                    bool force_generic_stub);
 
   // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateMiss(MacroAssembler* masm, bool force_generic);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GenerateInitialize(MacroAssembler* masm) {
+    GenerateMiss(masm, false);
+  }
   static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
+    GenerateMiss(masm, false);
   }
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateString(MacroAssembler* masm);
-
   static void GenerateIndexedInterceptor(MacroAssembler* masm);
-
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
+  static void GenerateNonStrictArguments(MacroAssembler* masm);
 
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
@@ -371,6 +412,23 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
+      bool is_js_array,
+      ElementsKind elements_kind);
+
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
+
+  virtual Code* string_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_String);
+  }
+
  private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
@@ -395,22 +453,17 @@
     return isolate()->builtins()->builtin(
         Builtins::kKeyedLoadIC_PreMonomorphic);
   }
-  Code* string_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_String);
-  }
-
   Code* indexed_interceptor_stub() {
     return isolate()->builtins()->builtin(
         Builtins::kKeyedLoadIC_IndexedInterceptor);
   }
+  Code* non_strict_arguments_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_NonStrictArguments);
+  }
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed load.
-  static bool PatchInlinedLoad(Address address, Object* map);
-
   friend class IC;
 };
 
@@ -437,13 +490,6 @@
   static void GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode);
 
-  // Clear the use of an inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined store instruction.
-  static const int kOffsetToStoreInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -489,38 +535,47 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the index and the map that is checked in an
-  // inlined version of the named store.
-  static bool PatchInlinedStore(Address address, Object* map, int index);
-
   friend class IC;
 };
 
 
-class KeyedStoreIC: public IC {
+class KeyedStoreIC: public KeyedIC {
  public:
-  explicit KeyedStoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+  explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
+    ASSERT(target()->is_keyed_store_stub());
+  }
 
   MUST_USE_RESULT MaybeObject* Store(State state,
-                                     StrictModeFlag strict_mode,
+                                   StrictModeFlag strict_mode,
                                      Handle<Object> object,
                                      Handle<Object> name,
-                                     Handle<Object> value);
+                                     Handle<Object> value,
+                                     bool force_generic);
 
   // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) {
+    GenerateMiss(masm, false);
+  }
+  static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+  static void GenerateSlow(MacroAssembler* masm);
   static void GenerateRuntimeSetProperty(MacroAssembler* masm,
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
+  static void GenerateNonStrictArguments(MacroAssembler* masm);
 
-  // Clear the inlined version so the IC is always hit.
-  static void ClearInlinedVersion(Address address);
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
+      bool is_js_array,
+      ElementsKind elements_kind);
 
-  // Restore the inlined version so the fast case can get hit.
-  static void RestoreInlinedVersion(Address address);
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
- private:
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
+
+  private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
                     State state,
@@ -561,36 +616,57 @@
     return isolate()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Generic_Strict);
   }
+  Code* non_strict_arguments_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_NonStrictArguments);
+  }
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed store.
-  // The address is the patch point for the IC call
-  // (Assembler::kCallTargetAddressOffset before the end of
-  // the call/return address).
-  // The map is the new map that the inlined code should check against.
-  static bool PatchInlinedStore(Address address, Object* map);
-
   friend class IC;
 };
 
 
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class TRBinaryOpIC: public IC {
+class UnaryOpIC: public IC {
  public:
+  // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
+  // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
+  enum TypeInfo {
+    UNINITIALIZED,
+    SMI,
+    HEAP_NUMBER,
+    GENERIC
+  };
 
+  explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+
+  void patch(Code* code);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Handle<Object> operand);
+
+  static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
+};
+
+
+// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
+class BinaryOpIC: public IC {
+ public:
   enum TypeInfo {
     UNINITIALIZED,
     SMI,
     INT32,
     HEAP_NUMBER,
     ODDBALL,
+    BOTH_STRING,  // Only used for addition operation.
     STRING,  // Only used for addition operation.  At least one string operand.
     GENERIC
   };
 
-  explicit TRBinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+  explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
 
   void patch(Code* code);
 
@@ -610,6 +686,8 @@
     UNINITIALIZED,
     SMIS,
     HEAP_NUMBERS,
+    SYMBOLS,
+    STRINGS,
     OBJECTS,
     GENERIC
   };
@@ -642,7 +720,16 @@
   Token::Value op_;
 };
 
-// Helper for TRBinaryOpIC and CompareIC.
+
+class ToBooleanIC: public IC {
+ public:
+  explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+
+  void patch(Code* code);
+};
+
+
+// Helper for BinaryOpIC and CompareIC.
 void PatchInlinedSmiCode(Address address);
 
 } }  // namespace v8::internal
diff --git a/src/inspector.h b/src/inspector.h
index f8b3042..e328bcd 100644
--- a/src/inspector.h
+++ b/src/inspector.h
@@ -41,7 +41,6 @@
 
 class Inspector {
  public:
-
   static void DumpObjectType(FILE* out, Object *obj, bool print_more);
   static void DumpObjectType(FILE* out, Object *obj) {
     DumpObjectType(out, obj, false);
@@ -59,4 +58,3 @@
 #endif  // INSPECTOR
 
 #endif  // V8_INSPECTOR_H_
-
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 1c6c52c..796a447 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -635,8 +635,9 @@
   AssertNoAllocation a;
   const byte* code_base = code_array->GetDataStartAddress();
   uc16 previous_char = '\n';
-  if (subject->IsAsciiRepresentation()) {
-    Vector<const char> subject_vector = subject->ToAsciiVector();
+  String::FlatContent subject_content = subject->GetFlatContent();
+  if (subject_content.IsAscii()) {
+    Vector<const char> subject_vector = subject_content.ToAsciiVector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate,
                     code_base,
@@ -645,7 +646,8 @@
                     start_position,
                     previous_char);
   } else {
-    Vector<const uc16> subject_vector = subject->ToUC16Vector();
+    ASSERT(subject_content.IsTwoByte());
+    Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate,
                     code_base,
diff --git a/src/frame-element.cc b/src/isolate-inl.h
similarity index 82%
rename from src/frame-element.cc
rename to src/isolate-inl.h
index f629900..aa6b537 100644
--- a/src/frame-element.cc
+++ b/src/isolate-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_ISOLATE_INL_H_
+#define V8_ISOLATE_INL_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "isolate.h"
+
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
 
 
+bool Isolate::DebuggerHasBreakPoints() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  return debug()->has_break_points();
+#else
+  return false;
+#endif
+}
+
+
 } }  // namespace v8::internal
+
+#endif  // V8_ISOLATE_INL_H_
diff --git a/src/isolate.cc b/src/isolate.cc
index 5b3438f..fd0f673 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -40,15 +40,16 @@
 #include "isolate.h"
 #include "lithium-allocator.h"
 #include "log.h"
+#include "messages.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
-#include "scanner.h"
 #include "scopeinfo.h"
 #include "serialize.h"
 #include "simulator.h"
 #include "spaces.h"
 #include "stub-cache.h"
 #include "version.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -61,6 +62,7 @@
   return new_id;
 }
 
+
 int ThreadId::GetCurrentThreadId() {
   int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
   if (thread_id == 0) {
@@ -71,6 +73,52 @@
 }
 
 
+ThreadLocalTop::ThreadLocalTop() {
+  InitializeInternal();
+  // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
+  // before an isolate is initialized. The initialize methods below do
+  // not touch it to preserve its value.
+  ignore_out_of_memory_ = false;
+}
+
+
+void ThreadLocalTop::InitializeInternal() {
+  c_entry_fp_ = 0;
+  handler_ = 0;
+#ifdef USE_SIMULATOR
+  simulator_ = NULL;
+#endif
+  js_entry_sp_ = NULL;
+  external_callback_ = NULL;
+  current_vm_state_ = EXTERNAL;
+  try_catch_handler_address_ = NULL;
+  context_ = NULL;
+  thread_id_ = ThreadId::Invalid();
+  external_caught_exception_ = false;
+  failed_access_check_callback_ = NULL;
+  save_context_ = NULL;
+  catcher_ = NULL;
+}
+
+
+void ThreadLocalTop::Initialize() {
+  InitializeInternal();
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+  simulator_ = Simulator::current(isolate_);
+#elif V8_TARGET_ARCH_MIPS
+  simulator_ = Simulator::current(isolate_);
+#endif
+#endif
+  thread_id_ = ThreadId::Current();
+}
+
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
 // Create a dummy thread that will wait forever on a semaphore. The only
 // purpose for this thread is to have some stack area to save essential data
 // into for use by a stacks only core dump (aka minidump).
@@ -141,8 +189,8 @@
 
 
  private:
-  explicit PreallocatedMemoryThread(Isolate* isolate)
-      : Thread(isolate, "v8:PreallocMem"),
+  PreallocatedMemoryThread()
+      : Thread("v8:PreallocMem"),
         keep_running_(true),
         wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
         data_ready_semaphore_(OS::CreateSemaphore(0)),
@@ -170,7 +218,7 @@
 
 void Isolate::PreallocatedMemoryThreadStart() {
   if (preallocated_memory_thread_ != NULL) return;
-  preallocated_memory_thread_ = new PreallocatedMemoryThread(this);
+  preallocated_memory_thread_ = new PreallocatedMemoryThread();
   preallocated_memory_thread_->Start();
 }
 
@@ -312,6 +360,17 @@
 }
 
 
+Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
+  ThreadId thread_id = ThreadId::Current();
+  PerIsolateThreadData* per_thread = NULL;
+  {
+    ScopedLock lock(process_wide_mutex_);
+    per_thread = thread_data_table_->Lookup(this, thread_id);
+  }
+  return per_thread;
+}
+
+
 void Isolate::EnsureDefaultIsolate() {
   ScopedLock lock(process_wide_mutex_);
   if (default_isolate_ == NULL) {
@@ -323,14 +382,18 @@
   }
   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
   // becase a non-null thread data may be already set.
-  Thread::SetThreadLocal(isolate_key_, default_isolate_);
+  if (Thread::GetThreadLocal(isolate_key_) == NULL) {
+    Thread::SetThreadLocal(isolate_key_, default_isolate_);
+  }
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 Debugger* Isolate::GetDefaultIsolateDebugger() {
   EnsureDefaultIsolate();
   return default_isolate_->debugger();
 }
+#endif
 
 
 StackGuard* Isolate::GetDefaultIsolateStackGuard() {
@@ -357,6 +420,894 @@
 }
 
 
+Address Isolate::get_address_from_id(Isolate::AddressId id) {
+  return isolate_addresses_[id];
+}
+
+
+char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+  Iterate(v, thread);
+  return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v) {
+  v->VisitThread(this, thread_local_top());
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v, char* t) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
+  v->VisitThread(this, thread);
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+  // Visit the roots from the top for a given thread.
+  Object* pending;
+  // The pending exception can sometimes be a failure.  We can't show
+  // that to the GC, which only understands objects.
+  if (thread->pending_exception_->ToObject(&pending)) {
+    v->VisitPointer(&pending);
+    thread->pending_exception_ = pending;  // In case GC updated it.
+  }
+  v->VisitPointer(&(thread->pending_message_obj_));
+  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+  Object* scheduled;
+  if (thread->scheduled_exception_->ToObject(&scheduled)) {
+    v->VisitPointer(&scheduled);
+    thread->scheduled_exception_ = scheduled;
+  }
+
+  for (v8::TryCatch* block = thread->TryCatchHandler();
+       block != NULL;
+       block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
+    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+    v->VisitPointer(BitCast<Object**>(&(block->message_)));
+  }
+
+  // Iterate over pointers on native execution stack.
+  for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
+    it.frame()->Iterate(v);
+  }
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v) {
+  ThreadLocalTop* current_t = thread_local_top();
+  Iterate(v, current_t);
+}
+
+
+void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
+  // The ARM simulator has a separate JS stack.  We therefore register
+  // the C++ try catch handler with the simulator and get back an
+  // address that can be used for comparisons with addresses into the
+  // JS stack.  When running without the simulator, the address
+  // returned will be the address of the C++ try catch handler itself.
+  Address address = reinterpret_cast<Address>(
+      SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+  thread_local_top()->set_try_catch_handler_address(address);
+}
+
+
+void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
+  ASSERT(thread_local_top()->TryCatchHandler() == that);
+  thread_local_top()->set_try_catch_handler_address(
+      reinterpret_cast<Address>(that->next_));
+  thread_local_top()->catcher_ = NULL;
+  SimulatorStack::UnregisterCTryCatch();
+}
+
+
+Handle<String> Isolate::StackTraceString() {
+  if (stack_trace_nesting_level_ == 0) {
+    stack_trace_nesting_level_++;
+    HeapStringAllocator allocator;
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(&allocator);
+    incomplete_message_ = &accumulator;
+    PrintStack(&accumulator);
+    Handle<String> stack_trace = accumulator.ToString();
+    incomplete_message_ = NULL;
+    stack_trace_nesting_level_ = 0;
+    return stack_trace;
+  } else if (stack_trace_nesting_level_ == 1) {
+    stack_trace_nesting_level_++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message_->OutputToStdOut();
+    return factory()->empty_symbol();
+  } else {
+    OS::Abort();
+    // Unreachable
+    return factory()->empty_symbol();
+  }
+}
+
+
+Handle<JSArray> Isolate::CaptureCurrentStackTrace(
+    int frame_limit, StackTrace::StackTraceOptions options) {
+  // Ensure no negative values.
+  int limit = Max(frame_limit, 0);
+  Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+
+  Handle<String> column_key = factory()->LookupAsciiSymbol("column");
+  Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
+  Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+  Handle<String> name_or_source_url_key =
+      factory()->LookupAsciiSymbol("nameOrSourceURL");
+  Handle<String> script_name_or_source_url_key =
+      factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
+  Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
+  Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+  Handle<String> constructor_key =
+      factory()->LookupAsciiSymbol("isConstructor");
+
+  StackTraceFrameIterator it(this);
+  int frames_seen = 0;
+  while (!it.done() && (frames_seen < limit)) {
+    JavaScriptFrame* frame = it.frame();
+    // Set initial size to the maximum inlining level + 1 for the outermost
+    // function.
+    List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+      // Create a JSObject to hold the information for the StackFrame.
+      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+
+      Handle<JSFunction> fun = frames[i].function();
+      Handle<Script> script(Script::cast(fun->shared()->script()));
+
+      if (options & StackTrace::kLineNumber) {
+        int script_line_offset = script->line_offset()->value();
+        int position = frames[i].code()->SourcePosition(frames[i].pc());
+        int line_number = GetScriptLineNumber(script, position);
+        // line_number is already shifted by the script_line_offset.
+        int relative_line_number = line_number - script_line_offset;
+        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+          int start = (relative_line_number == 0) ? 0 :
+              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+          int column_offset = position - start;
+          if (relative_line_number == 0) {
+            // For the case where the code is on the same line as the script
+            // tag.
+            column_offset += script->column_offset()->value();
+          }
+          SetLocalPropertyNoThrow(stackFrame, column_key,
+                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
+        }
+        SetLocalPropertyNoThrow(stackFrame, line_key,
+                                Handle<Smi>(Smi::FromInt(line_number + 1)));
+      }
+
+      if (options & StackTrace::kScriptName) {
+        Handle<Object> script_name(script->name(), this);
+        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+      }
+
+      if (options & StackTrace::kScriptNameOrSourceURL) {
+        Handle<Object> script_name(script->name(), this);
+        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+        Handle<Object> property = GetProperty(script_wrapper,
+                                              name_or_source_url_key);
+        ASSERT(property->IsJSFunction());
+        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+        bool caught_exception;
+        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+                                                   NULL, &caught_exception);
+        if (caught_exception) {
+          result = factory()->undefined_value();
+        }
+        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+                                result);
+      }
+
+      if (options & StackTrace::kFunctionName) {
+        Handle<Object> fun_name(fun->shared()->name(), this);
+        if (fun_name->ToBoolean()->IsFalse()) {
+          fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
+        }
+        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+      }
+
+      if (options & StackTrace::kIsEval) {
+        int type = Smi::cast(script->compilation_type())->value();
+        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+            factory()->true_value() : factory()->false_value();
+        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+      }
+
+      if (options & StackTrace::kIsConstructor) {
+        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+            factory()->true_value() : factory()->false_value();
+        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+      }
+
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+      frames_seen++;
+    }
+    it.Advance();
+  }
+
+  stack_trace->set_length(Smi::FromInt(frames_seen));
+  return stack_trace;
+}
+
+
+void Isolate::PrintStack() {
+  if (stack_trace_nesting_level_ == 0) {
+    stack_trace_nesting_level_++;
+
+    StringAllocator* allocator;
+    if (preallocated_message_space_ == NULL) {
+      allocator = new HeapStringAllocator();
+    } else {
+      allocator = preallocated_message_space_;
+    }
+
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(allocator);
+    incomplete_message_ = &accumulator;
+    PrintStack(&accumulator);
+    accumulator.OutputToStdOut();
+    InitializeLoggingAndCounters();
+    accumulator.Log();
+    incomplete_message_ = NULL;
+    stack_trace_nesting_level_ = 0;
+    if (preallocated_message_space_ == NULL) {
+      // Remove the HeapStringAllocator created above.
+      delete allocator;
+    }
+  } else if (stack_trace_nesting_level_ == 1) {
+    stack_trace_nesting_level_++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message_->OutputToStdOut();
+  }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+                        StackFrame::PrintMode mode) {
+  StackFrameIterator it;
+  for (int i = 0; !it.done(); it.Advance()) {
+    it.frame()->Print(accumulator, mode, i++);
+  }
+}
+
+
+void Isolate::PrintStack(StringStream* accumulator) {
+  if (!IsInitialized()) {
+    accumulator->Add(
+        "\n==== Stack trace is not available ==========================\n\n");
+    accumulator->Add(
+        "\n==== Isolate for the thread is not initialized =============\n\n");
+    return;
+  }
+  // The MentionedObjectCache is not GC-proof at the moment.
+  AssertNoAllocation nogc;
+  ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+  // Avoid printing anything if there are no frames.
+  if (c_entry_fp(thread_local_top()) == 0) return;
+
+  accumulator->Add(
+      "\n==== Stack trace ============================================\n\n");
+  PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+  accumulator->Add(
+      "\n==== Details ================================================\n\n");
+  PrintFrames(accumulator, StackFrame::DETAILS);
+
+  accumulator->PrintMentionedObjectCache();
+  accumulator->Add("=====================\n\n");
+}
+
+
+void Isolate::SetFailedAccessCheckCallback(
+    v8::FailedAccessCheckCallback callback) {
+  thread_local_top()->failed_access_check_callback_ = callback;
+}
+
+
+void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+  if (!thread_local_top()->failed_access_check_callback_) return;
+
+  ASSERT(receiver->IsAccessCheckNeeded());
+  ASSERT(context());
+
+  // Get the data object from access check info.
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return;
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return;
+
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+  thread_local_top()->failed_access_check_callback_(
+    v8::Utils::ToLocal(receiver_handle),
+    type,
+    v8::Utils::ToLocal(data));
+}
+
+
+enum MayAccessDecision {
+  YES, NO, UNKNOWN
+};
+
+
+static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
+                                           JSObject* receiver,
+                                           v8::AccessType type) {
+  // During bootstrapping, callback functions are not enabled yet.
+  if (isolate->bootstrapper()->IsActive()) return YES;
+
+  if (receiver->IsJSGlobalProxy()) {
+    Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+    if (!receiver_context->IsContext()) return NO;
+
+    // Get the global context of current top context.
+    // avoid using Isolate::global_context() because it uses Handle.
+    Context* global_context = isolate->context()->global()->global_context();
+    if (receiver_context == global_context) return YES;
+
+    if (Context::cast(receiver_context)->security_token() ==
+        global_context->security_token())
+      return YES;
+  }
+
+  return UNKNOWN;
+}
+
+
+bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+                             v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  // Skip checks for hidden properties access.  Note, we do not
+  // require existence of a context in this case.
+  if (key == heap_.hidden_symbol()) return true;
+
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(context());
+
+  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get named access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return false;
+
+  Object* data_obj =
+     constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+  v8::NamedSecurityCallback callback =
+      v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope(this);
+  Handle<JSObject> receiver_handle(receiver, this);
+  Handle<Object> key_handle(key, this);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+  LOG(this, ApiNamedSecurityCheck(key));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(this, EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      v8::Utils::ToLocal(key_handle),
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+bool Isolate::MayIndexedAccess(JSObject* receiver,
+                               uint32_t index,
+                               v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(context());
+
+  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get indexed access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return false;
+
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+  v8::IndexedSecurityCallback callback =
+      v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope(this);
+  Handle<JSObject> receiver_handle(receiver, this);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+  LOG(this, ApiIndexedSecurityCheck(index));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(this, EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      index,
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+const char* const Isolate::kStackOverflowMessage =
+  "Uncaught RangeError: Maximum call stack size exceeded";
+
+
+Failure* Isolate::StackOverflow() {
+  HandleScope scope;
+  Handle<String> key = factory()->stack_overflow_symbol();
+  Handle<JSObject> boilerplate =
+      Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
+  Handle<Object> exception = Copy(boilerplate);
+  // TODO(1240995): To avoid having to call JavaScript code to compute
+  // the message for stack overflow exceptions which is very likely to
+  // double fault with another stack overflow exception, we use a
+  // precomputed message.
+  DoThrow(*exception, NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::TerminateExecution() {
+  DoThrow(heap_.termination_exception(), NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
+  DoThrow(exception, location);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+  bool can_be_caught_externally = false;
+  bool catchable_by_javascript = is_catchable_by_javascript(exception);
+  ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+
+  thread_local_top()->catcher_ = can_be_caught_externally ?
+      try_catch_handler() : NULL;
+
+  // Set the exception being re-thrown.
+  set_pending_exception(exception);
+  if (exception->IsFailure()) return exception->ToFailureUnchecked();
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::ThrowIllegalOperation() {
+  return Throw(heap_.illegal_access_symbol());
+}
+
+
+void Isolate::ScheduleThrow(Object* exception) {
+  // When scheduling a throw we first throw the exception to get the
+  // error reporting if it is uncaught before rescheduling it.
+  Throw(exception);
+  thread_local_top()->scheduled_exception_ = pending_exception();
+  thread_local_top()->external_caught_exception_ = false;
+  clear_pending_exception();
+}
+
+
+Failure* Isolate::PromoteScheduledException() {
+  MaybeObject* thrown = scheduled_exception();
+  clear_scheduled_exception();
+  // Re-throw the exception to avoid getting repeated error reporting.
+  return ReThrow(thrown);
+}
+
+
+void Isolate::PrintCurrentStackTrace(FILE* out) {
+  StackTraceFrameIterator it(this);
+  while (!it.done()) {
+    HandleScope scope;
+    // Find code position if recorded in relocation info.
+    JavaScriptFrame* frame = it.frame();
+    int pos = frame->LookupCode()->SourcePosition(frame->pc());
+    Handle<Object> pos_obj(Smi::FromInt(pos));
+    // Fetch function and receiver.
+    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+    Handle<Object> recv(frame->receiver());
+    // Advance to the next JavaScript frame and determine if the
+    // current frame is the top-level frame.
+    it.Advance();
+    Handle<Object> is_top_level = it.done()
+        ? factory()->true_value()
+        : factory()->false_value();
+    // Generate and print stack trace line.
+    Handle<String> line =
+        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+    if (line->length() > 0) {
+      line->PrintOn(out);
+      fprintf(out, "\n");
+    }
+  }
+}
+
+
+void Isolate::ComputeLocation(MessageLocation* target) {
+  *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+  StackTraceFrameIterator it(this);
+  if (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    JSFunction* fun = JSFunction::cast(frame->function());
+    Object* script = fun->shared()->script();
+    if (script->IsScript() &&
+        !(Script::cast(script)->source()->IsUndefined())) {
+      int pos = frame->LookupCode()->SourcePosition(frame->pc());
+      // Compute the location from the function and the reloc info.
+      Handle<Script> casted_script(Script::cast(script));
+      *target = MessageLocation(casted_script, pos, pos + 1);
+    }
+  }
+}
+
+
+bool Isolate::ShouldReportException(bool* can_be_caught_externally,
+                                    bool catchable_by_javascript) {
+  // Find the top-most try-catch handler.
+  StackHandler* handler =
+      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+  while (handler != NULL && !handler->is_try_catch()) {
+    handler = handler->next();
+  }
+
+  // Get the address of the external handler so we can compare the address to
+  // determine which one is closer to the top of the stack.
+  Address external_handler_address =
+      thread_local_top()->try_catch_handler_address();
+
+  // The exception has been externally caught if and only if there is
+  // an external handler which is on top of the top-most try-catch
+  // handler.
+  *can_be_caught_externally = external_handler_address != NULL &&
+      (handler == NULL || handler->address() > external_handler_address ||
+       !catchable_by_javascript);
+
+  if (*can_be_caught_externally) {
+    // Only report the exception if the external handler is verbose.
+    return try_catch_handler()->is_verbose_;
+  } else {
+    // Report the exception if it isn't caught by JavaScript code.
+    return handler == NULL;
+  }
+}
+
+
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
+  ASSERT(!has_pending_exception());
+
+  HandleScope scope;
+  Object* exception_object = Smi::FromInt(0);
+  bool is_object = exception->ToObject(&exception_object);
+  Handle<Object> exception_handle(exception_object);
+
+  // Determine reporting and whether the exception is caught externally.
+  bool catchable_by_javascript = is_catchable_by_javascript(exception);
+  // Only real objects can be caught by JS.
+  ASSERT(!catchable_by_javascript || is_object);
+  bool can_be_caught_externally = false;
+  bool should_report_exception =
+      ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+  bool report_exception = catchable_by_javascript && should_report_exception;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger of exception.
+  if (catchable_by_javascript) {
+    debugger_->OnException(exception_handle, report_exception);
+  }
+#endif
+
+  // Generate the message.
+  Handle<Object> message_obj;
+  MessageLocation potential_computed_location;
+  bool try_catch_needs_message =
+      can_be_caught_externally &&
+      try_catch_handler()->capture_message_;
+  if (report_exception || try_catch_needs_message) {
+    if (location == NULL) {
+      // If no location was specified we use a computed one instead
+      ComputeLocation(&potential_computed_location);
+      location = &potential_computed_location;
+    }
+    if (!bootstrapper()->IsActive()) {
+      // It's not safe to try to make message objects or collect stack
+      // traces while the bootstrapper is active since the infrastructure
+      // may not have been properly initialized.
+      Handle<String> stack_trace;
+      if (FLAG_trace_exception) stack_trace = StackTraceString();
+      Handle<JSArray> stack_trace_object;
+      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+          stack_trace_object = CaptureCurrentStackTrace(
+              stack_trace_for_uncaught_exceptions_frame_limit_,
+              stack_trace_for_uncaught_exceptions_options_);
+      }
+      ASSERT(is_object);  // Can't use the handle unless there's a real object.
+      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+          stack_trace_object);
+    }
+  }
+
+  // Save the message for reporting if the the exception remains uncaught.
+  thread_local_top()->has_pending_message_ = report_exception;
+  if (!message_obj.is_null()) {
+    thread_local_top()->pending_message_obj_ = *message_obj;
+    if (location != NULL) {
+      thread_local_top()->pending_message_script_ = *location->script();
+      thread_local_top()->pending_message_start_pos_ = location->start_pos();
+      thread_local_top()->pending_message_end_pos_ = location->end_pos();
+    }
+  }
+
+  // Do not forget to clean catcher_ if currently thrown exception cannot
+  // be caught.  If necessary, ReThrow will update the catcher.
+  thread_local_top()->catcher_ = can_be_caught_externally ?
+      try_catch_handler() : NULL;
+
+  // NOTE: Notifying the debugger or generating the message
+  // may have caused new exceptions. For now, we just ignore
+  // that and set the pending exception to the original one.
+  if (is_object) {
+    set_pending_exception(*exception_handle);
+  } else {
+    // Failures are not on the heap so they neither need nor work with handles.
+    ASSERT(exception_handle->IsFailure());
+    set_pending_exception(exception);
+  }
+}
+
+
+bool Isolate::IsExternallyCaught() {
+  ASSERT(has_pending_exception());
+
+  if ((thread_local_top()->catcher_ == NULL) ||
+      (try_catch_handler() != thread_local_top()->catcher_)) {
+    // When throwing the exception, we found no v8::TryCatch
+    // which should care about this exception.
+    return false;
+  }
+
+  if (!is_catchable_by_javascript(pending_exception())) {
+    return true;
+  }
+
+  // Get the address of the external handler so we can compare the address to
+  // determine which one is closer to the top of the stack.
+  Address external_handler_address =
+      thread_local_top()->try_catch_handler_address();
+  ASSERT(external_handler_address != NULL);
+
+  // The exception has been externally caught if and only if there is
+  // an external handler which is on top of the top-most try-finally
+  // handler.
+  // There should be no try-catch blocks as they would prohibit us from
+  // finding external catcher in the first place (see catcher_ check above).
+  //
+  // Note, that finally clause would rethrow an exception unless it's
+  // aborted by jumps in control flow like return, break, etc. and we'll
+  // have another chances to set proper v8::TryCatch.
+  StackHandler* handler =
+      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+  while (handler != NULL && handler->address() < external_handler_address) {
+    ASSERT(!handler->is_try_catch());
+    if (handler->is_try_finally()) return false;
+
+    handler = handler->next();
+  }
+
+  return true;
+}
+
+
+void Isolate::ReportPendingMessages() {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
+  // If the pending exception is OutOfMemoryException set out_of_memory in
+  // the global context.  Note: We have to mark the global context here
+  // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+  // set it.
+  HandleScope scope;
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+    context()->mark_out_of_memory();
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    // Do nothing: if needed, the exception has been already propagated to
+    // v8::TryCatch.
+  } else {
+    if (thread_local_top_.has_pending_message_) {
+      thread_local_top_.has_pending_message_ = false;
+      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+        HandleScope scope;
+        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+        if (thread_local_top_.pending_message_script_ != NULL) {
+          Handle<Script> script(thread_local_top_.pending_message_script_);
+          int start_pos = thread_local_top_.pending_message_start_pos_;
+          int end_pos = thread_local_top_.pending_message_end_pos_;
+          MessageLocation location(script, start_pos, end_pos);
+          MessageHandler::ReportMessage(this, &location, message_obj);
+        } else {
+          MessageHandler::ReportMessage(this, NULL, message_obj);
+        }
+      }
+    }
+  }
+  clear_pending_message();
+}
+
+
+void Isolate::TraceException(bool flag) {
+  FLAG_trace_exception = flag;  // TODO(isolates): This is an unfortunate use.
+}
+
+
+bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
+  // Allways reschedule out of memory exceptions.
+  if (!is_out_of_memory()) {
+    bool is_termination_exception =
+        pending_exception() == heap_.termination_exception();
+
+    // Do not reschedule the exception if this is the bottom call.
+    bool clear_exception = is_bottom_call;
+
+    if (is_termination_exception) {
+      if (is_bottom_call) {
+        thread_local_top()->external_caught_exception_ = false;
+        clear_pending_exception();
+        return false;
+      }
+    } else if (thread_local_top()->external_caught_exception_) {
+      // If the exception is externally caught, clear it if there are no
+      // JavaScript frames on the way to the C++ frame that has the
+      // external handler.
+      ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+      Address external_handler_address =
+          thread_local_top()->try_catch_handler_address();
+      JavaScriptFrameIterator it;
+      if (it.done() || (it.frame()->sp() > external_handler_address)) {
+        clear_exception = true;
+      }
+    }
+
+    // Clear the exception if needed.
+    if (clear_exception) {
+      thread_local_top()->external_caught_exception_ = false;
+      clear_pending_exception();
+      return false;
+    }
+  }
+
+  // Reschedule the exception.
+  thread_local_top()->scheduled_exception_ = pending_exception();
+  clear_pending_exception();
+  return true;
+}
+
+
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
+      bool capture,
+      int frame_limit,
+      StackTrace::StackTraceOptions options) {
+  capture_stack_trace_for_uncaught_exceptions_ = capture;
+  stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
+  stack_trace_for_uncaught_exceptions_options_ = options;
+}
+
+
+bool Isolate::is_out_of_memory() {
+  if (has_pending_exception()) {
+    MaybeObject* e = pending_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  if (has_scheduled_exception()) {
+    MaybeObject* e = scheduled_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+Handle<Context> Isolate::global_context() {
+  GlobalObject* global = thread_local_top()->context_->global();
+  return Handle<Context>(global->global_context());
+}
+
+
+Handle<Context> Isolate::GetCallingGlobalContext() {
+  JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (debug_->InDebugger()) {
+    while (!it.done()) {
+      JavaScriptFrame* frame = it.frame();
+      Context* context = Context::cast(frame->context());
+      if (context->global_context() == *debug_->debug_context()) {
+        it.Advance();
+      } else {
+        break;
+      }
+    }
+  }
+#endif  // ENABLE_DEBUGGER_SUPPORT
+  if (it.done()) return Handle<Context>::null();
+  JavaScriptFrame* frame = it.frame();
+  Context* context = Context::cast(frame->context());
+  return Handle<Context>(context->global_context());
+}
+
+
+char* Isolate::ArchiveThread(char* to) {
+  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+    RuntimeProfiler::IsolateExitedJS(this);
+  }
+  memcpy(to, reinterpret_cast<char*>(thread_local_top()),
+         sizeof(ThreadLocalTop));
+  InitializeThreadLocal();
+  return to + sizeof(ThreadLocalTop);
+}
+
+
+char* Isolate::RestoreThread(char* from) {
+  memcpy(reinterpret_cast<char*>(thread_local_top()), from,
+         sizeof(ThreadLocalTop));
+  // This might be just paranoia, but it seems to be needed in case a
+  // thread_local_top_ is restored on a separate OS thread.
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+  thread_local_top()->simulator_ = Simulator::current(this);
+#elif V8_TARGET_ARCH_MIPS
+  thread_local_top()->simulator_ = Simulator::current(this);
+#endif
+#endif
+  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+    RuntimeProfiler::IsolateEnteredJS(this);
+  }
+  ASSERT(context() == NULL || context()->IsContext());
+  return from + sizeof(ThreadLocalTop);
+}
+
+
 Isolate::ThreadDataTable::ThreadDataTable()
     : list_(NULL) {
 }
@@ -383,6 +1334,7 @@
   if (list_ == data) list_ = data->next_;
   if (data->next_ != NULL) data->next_->prev_ = data->prev_;
   if (data->prev_ != NULL) data->prev_->next_ = data->next_;
+  delete data;
 }
 
 
@@ -395,6 +1347,16 @@
 }
 
 
+void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
+  PerIsolateThreadData* data = list_;
+  while (data != NULL) {
+    PerIsolateThreadData* next = data->next_;
+    if (data->isolate() == isolate) Remove(data);
+    data = next;
+  }
+}
+
+
 #ifdef DEBUG
 #define TRACE_ISOLATE(tag)                                              \
   do {                                                                  \
@@ -446,20 +1408,23 @@
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
-      ast_sentinels_(NULL),
       string_tracker_(NULL),
       regexp_stack_(NULL),
-      frame_element_constant_list_(0),
-      result_constant_list_(0) {
+      embedder_data_(NULL) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
-      sizeof(isolate_addresses_[0]) * (k_isolate_address_count + 1));
+      sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
 
   heap_.isolate_ = this;
   zone_.isolate_ = this;
   stack_guard_.isolate_ = this;
 
+  // ThreadManager is initialized early to support locking an isolate
+  // before it is entered.
+  thread_manager_ = new ThreadManager();
+  thread_manager_->isolate_ = this;
+
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
   simulator_initialized_ = false;
@@ -467,9 +1432,6 @@
   simulator_redirection_ = NULL;
 #endif
 
-  thread_manager_ = new ThreadManager();
-  thread_manager_->isolate_ = this;
-
 #ifdef DEBUG
   // heap_histograms_ initializes itself.
   memset(&js_spill_information_, 0, sizeof(js_spill_information_));
@@ -482,10 +1444,6 @@
   debugger_ = NULL;
 #endif
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  producer_heap_profile_ = NULL;
-#endif
-
   handle_scope_data_.Initialize();
 
 #define ISOLATE_INIT_EXECUTE(type, name, initial_value)                        \
@@ -512,6 +1470,10 @@
 
   Deinit();
 
+  { ScopedLock lock(process_wide_mutex_);
+    thread_data_table_->RemoveAllThreads(this);
+  }
+
   if (!IsDefaultIsolate()) {
     delete this;
   }
@@ -570,10 +1532,11 @@
 Isolate::~Isolate() {
   TRACE_ISOLATE(destructor);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  delete producer_heap_profile_;
-  producer_heap_profile_ = NULL;
-#endif
+  // Has to be called while counters_ are still alive.
+  zone_.DeleteKeptSegment();
+
+  delete[] assembler_spare_buffer_;
+  assembler_spare_buffer_ = NULL;
 
   delete unicode_cache_;
   unicode_cache_ = NULL;
@@ -581,9 +1544,6 @@
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
-  delete ast_sentinels_;
-  ast_sentinels_ = NULL;
-
   delete descriptor_lookup_cache_;
   descriptor_lookup_cache_ = NULL;
   delete context_slot_cache_;
@@ -608,6 +1568,8 @@
   handle_scope_implementer_ = NULL;
   delete break_access_;
   break_access_ = NULL;
+  delete debugger_access_;
+  debugger_access_ = NULL;
 
   delete compilation_cache_;
   compilation_cache_ = NULL;
@@ -633,6 +1595,9 @@
   delete global_handles_;
   global_handles_ = NULL;
 
+  delete external_reference_table_;
+  external_reference_table_ = NULL;
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   delete debugger_;
   debugger_ = NULL;
@@ -643,6 +1608,7 @@
 
 
 void Isolate::InitializeThreadLocal() {
+  thread_local_top_.isolate_ = this;
   thread_local_top_.Initialize();
   clear_pending_exception();
   clear_pending_message();
@@ -720,10 +1686,10 @@
   // ensuring that Isolate::Current() == this.
   heap_.SetStackLimits();
 
-#define C(name) isolate_addresses_[Isolate::k_##name] =                        \
-    reinterpret_cast<Address>(name());
-  ISOLATE_ADDRESS_LIST(C)
-  ISOLATE_ADDRESS_LIST_PROF(C)
+#define ASSIGN_ELEMENT(CamelName, hacker_name)                  \
+  isolate_addresses_[Isolate::k##CamelName##Address] =          \
+      reinterpret_cast<Address>(hacker_name##_address());
+  FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
 #undef C
 
   string_tracker_ = new StringTracker();
@@ -738,17 +1704,11 @@
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
-  handle_scope_implementer_ = new HandleScopeImplementer();
+  handle_scope_implementer_ = new HandleScopeImplementer(this);
   stub_cache_ = new StubCache(this);
-  ast_sentinels_ = new AstSentinels();
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  producer_heap_profile_ = new ProducerHeapProfile();
-  producer_heap_profile_->isolate_ = this;
-#endif
-
   // Enable logging before setting up the heap
   logger_->Setup();
 
@@ -758,7 +1718,7 @@
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
-  Simulator::Initialize();
+  Simulator::Initialize(this);
 #endif
 #endif
 
@@ -778,11 +1738,11 @@
     return false;
   }
 
+  InitializeThreadLocal();
+
   bootstrapper_->Initialize(create_heap_objects);
   builtins_.Setup(create_heap_objects);
 
-  InitializeThreadLocal();
-
   // Only preallocate on the first initialization.
   if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
     // Start the thread which will set aside some memory.
@@ -820,7 +1780,7 @@
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
-  if (des != NULL && FLAG_log_code) {
+  if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
     HandleScope scope;
     LOG(this, LogCodeObjects());
     LOG(this, LogCompiledFunctions());
@@ -910,11 +1870,6 @@
 }
 
 
-void Isolate::ResetEagerOptimizingData() {
-  compilation_cache_->ResetEagerOptimizingData();
-}
-
-
 #ifdef DEBUG
 #define ISOLATE_FIELD_OFFSET(type, name, ignored)                       \
 const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/isolate.h b/src/isolate.h
index 167c8ef..2582da6 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -47,7 +47,6 @@
 namespace v8 {
 namespace internal {
 
-class AstSentinels;
 class Bootstrapper;
 class CodeGenerator;
 class CodeRange;
@@ -69,7 +68,6 @@
 class NoAllocationStringAllocator;
 class PcToCodeCache;
 class PreallocatedMemoryThread;
-class ProducerHeapProfile;
 class RegExpStack;
 class SaveContext;
 class UnicodeCache;
@@ -121,19 +119,13 @@
 #define RETURN_IF_EMPTY_HANDLE(isolate, call)                       \
   RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
 
-#define ISOLATE_ADDRESS_LIST(C)            \
-  C(handler_address)                       \
-  C(c_entry_fp_address)                    \
-  C(context_address)                       \
-  C(pending_exception_address)             \
-  C(external_caught_exception_address)
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define ISOLATE_ADDRESS_LIST_PROF(C)       \
-  C(js_entry_sp_address)
-#else
-#define ISOLATE_ADDRESS_LIST_PROF(C)
-#endif
+#define FOR_EACH_ISOLATE_ADDRESS_NAME(C)                \
+  C(Handler, handler)                                   \
+  C(CEntryFP, c_entry_fp)                               \
+  C(Context, context)                                   \
+  C(PendingException, pending_exception)                \
+  C(ExternalCaughtException, external_caught_exception) \
+  C(JSEntrySP, js_entry_sp)
 
 
 // Platform-independent, reliable thread identifier.
@@ -224,6 +216,7 @@
     ASSERT(try_catch_handler_address_ == NULL);
   }
 
+  Isolate* isolate_;
   // The context where the current execution method is created and for variable
   // lookups.
   Context* context_;
@@ -252,14 +245,9 @@
 #endif
 #endif  // USE_SIMULATOR
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
   Address external_callback_;  // the external callback we're currently in
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
   StateTag current_vm_state_;
-#endif
 
   // Generated code scratch locations.
   int32_t formal_count_;
@@ -297,7 +285,6 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 #define ISOLATE_DEBUGGER_INIT_LIST(V)                                          \
-  V(uint64_t, enabled_cpu_features, 0)                                         \
   V(v8::Debug::EventCallback, debug_event_callback, NULL)                      \
   V(DebuggerAgent*, debugger_agent_instance, NULL)
 #else
@@ -317,18 +304,6 @@
 
 #endif
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#define ISOLATE_LOGGING_INIT_LIST(V)                                           \
-  V(CpuProfiler*, cpu_profiler, NULL)                                          \
-  V(HeapProfiler*, heap_profiler, NULL)
-
-#else
-
-#define ISOLATE_LOGGING_INIT_LIST(V)
-
-#endif
-
 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
   /* SerializerDeserializer state. */                                          \
   V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity)  \
@@ -336,6 +311,8 @@
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
   V(int, suffix_table, (kBMMaxShift + 1))                                      \
+  V(uint32_t, random_seed, 2)                                                  \
+  V(uint32_t, private_random_seed, 2)                                          \
   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
 
 typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
@@ -349,6 +326,7 @@
   /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */    \
   V(byte*, assembler_spare_buffer, NULL)                                       \
   V(FatalErrorCallback, exception_behavior, NULL)                              \
+  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL)     \
   V(v8::Debug::MessageHandler, message_handler, NULL)                          \
   /* To distinguish the function templates, so that we can find them in the */ \
   /* function cache of the global context. */                                  \
@@ -373,8 +351,10 @@
   V(unsigned, ast_node_count, 0)                                               \
   /* SafeStackFrameIterator activations count. */                              \
   V(int, safe_stack_iterator_counter, 0)                                       \
+  V(uint64_t, enabled_cpu_features, 0)                                         \
+  V(CpuProfiler*, cpu_profiler, NULL)                                          \
+  V(HeapProfiler*, heap_profiler, NULL)                                        \
   ISOLATE_PLATFORM_INIT_LIST(V)                                                \
-  ISOLATE_LOGGING_INIT_LIST(V)                                                 \
   ISOLATE_DEBUGGER_INIT_LIST(V)
 
 class Isolate {
@@ -443,11 +423,10 @@
 
 
   enum AddressId {
-#define C(name) k_##name,
-    ISOLATE_ADDRESS_LIST(C)
-    ISOLATE_ADDRESS_LIST_PROF(C)
+#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
+    FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
 #undef C
-    k_isolate_address_count
+    kIsolateAddressCount
   };
 
   // Returns the PerIsolateThreadData for the current thread (or NULL if one is
@@ -496,9 +475,15 @@
   // Safe to call multiple times.
   static void EnsureDefaultIsolate();
 
+  // Find the PerThread for this particular (isolate, thread) combination
+  // If one does not yet exist, return null.
+  PerIsolateThreadData* FindPerThreadDataForThisThread();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
   // Get the debugger from the default isolate. Preinitializes the
   // default isolate if needed.
   static Debugger* GetDefaultIsolateDebugger();
+#endif
 
   // Get the stack guard from the default isolate. Preinitializes the
   // default isolate if needed.
@@ -533,6 +518,7 @@
   // Access to top context (where the current function object was created).
   Context* context() { return thread_local_top_.context_; }
   void set_context(Context* context) {
+    ASSERT(context == NULL || context->IsContext());
     thread_local_top_.context_ = context;
   }
   Context** context_address() { return &thread_local_top_.context_; }
@@ -598,7 +584,7 @@
     return thread_local_top_.scheduled_exception_;
   }
   bool has_scheduled_exception() {
-    return !thread_local_top_.scheduled_exception_->IsTheHole();
+    return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
   }
   void clear_scheduled_exception() {
     thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
@@ -622,7 +608,6 @@
   }
   inline Address* handler_address() { return &thread_local_top_.handler_; }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Bottom JS entry (see StackTracer::Trace in log.cc).
   static Address js_entry_sp(ThreadLocalTop* thread) {
     return thread->js_entry_sp_;
@@ -630,7 +615,6 @@
   inline Address* js_entry_sp_address() {
     return &thread_local_top_.js_entry_sp_;
   }
-#endif
 
   // Generated code scratch locations.
   void* formal_count_address() { return &thread_local_top_.formal_count_; }
@@ -893,18 +877,8 @@
     return &objects_string_input_buffer_;
   }
 
-  AstSentinels* ast_sentinels() { return ast_sentinels_; }
-
   RuntimeState* runtime_state() { return &runtime_state_; }
 
-  StringInputBuffer* liveedit_compare_substrings_buf1() {
-    return &liveedit_compare_substrings_buf1_;
-  }
-
-  StringInputBuffer* liveedit_compare_substrings_buf2() {
-    return &liveedit_compare_substrings_buf2_;
-  }
-
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
@@ -923,14 +897,6 @@
     return &interp_canonicalize_mapping_;
   }
 
-  ZoneObjectList* frame_element_constant_list() {
-    return &frame_element_constant_list_;
-  }
-
-  ZoneObjectList* result_constant_list() {
-    return &result_constant_list_;
-  }
-
   void* PreallocatedStorageNew(size_t size);
   void PreallocatedStorageDelete(void* p);
   void PreallocatedStorageInit(size_t size);
@@ -946,11 +912,7 @@
   }
 #endif
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  ProducerHeapProfile* producer_heap_profile() {
-    return producer_heap_profile_;
-  }
-#endif
+  inline bool DebuggerHasBreakPoints();
 
 #ifdef DEBUG
   HistogramInfo* heap_histograms() { return heap_histograms_; }
@@ -989,22 +951,21 @@
 
   static const int kJSRegexpStaticOffsetsVectorSize = 50;
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Address external_callback() {
     return thread_local_top_.external_callback_;
   }
   void set_external_callback(Address callback) {
     thread_local_top_.external_callback_ = callback;
   }
-#endif
 
-#ifdef ENABLE_VMSTATE_TRACKING
   StateTag current_vm_state() {
     return thread_local_top_.current_vm_state_;
   }
 
   void SetCurrentVMState(StateTag state) {
     if (RuntimeProfiler::IsEnabled()) {
+      // Make sure thread local top is initialized.
+      ASSERT(thread_local_top_.isolate_ == this);
       StateTag current_state = thread_local_top_.current_vm_state_;
       if (current_state != JS && state == JS) {
         // Non-JS -> JS transition.
@@ -1022,9 +983,9 @@
     }
     thread_local_top_.current_vm_state_ = state;
   }
-#endif
 
-  void ResetEagerOptimizingData();
+  void SetData(void* data) { embedder_data_ = data; }
+  void* GetData() { return embedder_data_; }
 
  private:
   Isolate();
@@ -1040,6 +1001,7 @@
     void Insert(PerIsolateThreadData* data);
     void Remove(Isolate* isolate, ThreadId thread_id);
     void Remove(PerIsolateThreadData* data);
+    void RemoveAllThreads(Isolate* isolate);
 
    private:
     PerIsolateThreadData* list_;
@@ -1100,7 +1062,7 @@
   // If one does not yet exist, allocate a new one.
   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
 
-  // PreInits and returns a default isolate. Needed when a new thread tries
+// PreInits and returns a default isolate. Needed when a new thread tries
   // to create a Locker for the first time (the lock itself is in the isolate).
   static Isolate* GetDefaultIsolateForLocking();
 
@@ -1135,7 +1097,7 @@
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
   PreallocatedMemoryThread* preallocated_memory_thread_;
-  Address isolate_addresses_[k_isolate_address_count + 1];  // NOLINT
+  Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
   NoAllocationStringAllocator* preallocated_message_space_;
 
   Bootstrapper* bootstrapper_;
@@ -1173,10 +1135,7 @@
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
-  AstSentinels* ast_sentinels_;
   RuntimeState runtime_state_;
-  StringInputBuffer liveedit_compare_substrings_buf1_;
-  StringInputBuffer liveedit_compare_substrings_buf2_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
   StringTracker* string_tracker_;
@@ -1189,8 +1148,7 @@
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
-  ZoneObjectList frame_element_constant_list_;
-  ZoneObjectList result_constant_list_;
+  void* embedder_data_;
 
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
@@ -1211,10 +1169,6 @@
   Debug* debug_;
 #endif
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  ProducerHeapProfile* producer_heap_profile_;
-#endif
-
 #define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
   type name##_;
   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
@@ -1238,10 +1192,14 @@
 
   friend class ExecutionAccess;
   friend class IsolateInitializer;
+  friend class ThreadManager;
+  friend class Simulator;
+  friend class StackGuard;
   friend class ThreadId;
   friend class TestMemoryAllocatorScope;
   friend class v8::Isolate;
   friend class v8::Locker;
+  friend class v8::Unlocker;
 
   DISALLOW_COPY_AND_ASSIGN(Isolate);
 };
@@ -1399,26 +1357,6 @@
 }
 
 
-// Temporary macro to be used to flag definitions that are indeed static
-// and not per-isolate. (It would be great to be able to grep for [static]!)
-#define RLYSTC static
-
-
-// Temporary macro to be used to flag classes that should be static.
-#define STATIC_CLASS class
-
-
-// Temporary macro to be used to flag classes that are completely converted
-// to be isolate-friendly. Their mix of static/nonstatic methods/fields is
-// correct.
-#define ISOLATED_CLASS class
-
 } }  // namespace v8::internal
 
-// TODO(isolates): Get rid of these -inl.h includes and place them only where
-//                 they're needed.
-#include "allocation-inl.h"
-#include "zone-inl.h"
-#include "frames-inl.h"
-
 #endif  // V8_ISOLATE_H_
diff --git a/src/json-parser.h b/src/json-parser.h
new file mode 100644
index 0000000..68eab65
--- /dev/null
+++ b/src/json-parser.h
@@ -0,0 +1,599 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSON_PARSER_H_
+#define V8_JSON_PARSER_H_
+
+#include "v8.h"
+
+#include "char-predicates-inl.h"
+#include "v8conversions.h"
+#include "messages.h"
+#include "spaces-inl.h"
+#include "token.h"
+
+namespace v8 {
+namespace internal {
+
+// A simple json parser.
+template <bool seq_ascii>
+class JsonParser BASE_EMBEDDED {
+ public:
+  static Handle<Object> Parse(Handle<String> source) {
+    return JsonParser().ParseJson(source);
+  }
+
+  static const int kEndOfString = -1;
+
+ private:
+  // Parse a string containing a single JSON value.
+  Handle<Object> ParseJson(Handle<String> source);
+
+  inline void Advance() {
+    position_++;
+    if (position_ >= source_length_) {
+      c0_ = kEndOfString;
+    } else if (seq_ascii) {
+      c0_ = seq_source_->SeqAsciiStringGet(position_);
+    } else {
+      c0_ = source_->Get(position_);
+    }
+  }
+
+  // The JSON lexical grammar is specified in the ECMAScript 5 standard,
+  // section 15.12.1.1. The only allowed whitespace characters between tokens
+  // are tab, carriage-return, newline and space.
+
+  inline void AdvanceSkipWhitespace() {
+    do {
+      Advance();
+    } while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ');
+  }
+
+  inline void SkipWhitespace() {
+    while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ') {
+      Advance();
+    }
+  }
+
+  inline uc32 AdvanceGetChar() {
+    Advance();
+    return c0_;
+  }
+
+  // Checks that current charater is c.
+  // If so, then consume c and skip whitespace.
+  inline bool MatchSkipWhiteSpace(uc32 c) {
+    if (c0_ == c) {
+      AdvanceSkipWhitespace();
+      return true;
+    }
+    return false;
+  }
+
+  // A JSON string (production JSONString) is subset of valid JavaScript string
+  // literals. The string must only be double-quoted (not single-quoted), and
+  // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+  // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+  Handle<String> ParseJsonString() {
+    return ScanJsonString<false>();
+  }
+  Handle<String> ParseJsonSymbol() {
+    return ScanJsonString<true>();
+  }
+  template <bool is_symbol>
+  Handle<String> ScanJsonString();
+  // Creates a new string and copies prefix[start..end] into the beginning
+  // of it. Then scans the rest of the string, adding characters after the
+  // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
+  template <typename StringType, typename SinkChar>
+  Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
+
+  // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+  // decimal number literals.
+  // It includes an optional minus sign, must have at least one
+  // digit before and after a decimal point, may not have prefixed zeros (unless
+  // the integer part is zero), and may include an exponent part (e.g., "e-10").
+  // Hexadecimal and octal numbers are not allowed.
+  Handle<Object> ParseJsonNumber();
+
+  // Parse a single JSON value from input (grammar production JSONValue).
+  // A JSON value is either a (double-quoted) string literal, a number literal,
+  // one of "true", "false", or "null", or an object or array literal.
+  Handle<Object> ParseJsonValue();
+
+  // Parse a JSON object literal (grammar production JSONObject).
+  // An object literal is a squiggly-braced and comma separated sequence
+  // (possibly empty) of key/value pairs, where the key is a JSON string
+  // literal, the value is a JSON value, and the two are separated by a colon.
+  // A JSON array dosn't allow numbers and identifiers as keys, like a
+  // JavaScript array.
+  Handle<Object> ParseJsonObject();
+
+  // Parses a JSON array literal (grammar production JSONArray). An array
+  // literal is a square-bracketed and comma separated sequence (possibly empty)
+  // of JSON values.
+  // A JSON array doesn't allow leaving out values from the sequence, nor does
+  // it allow a terminal comma, like a JavaScript array does.
+  Handle<Object> ParseJsonArray();
+
+
+  // Mark that a parsing error has happened at the current token, and
+  // return a null handle. Primarily for readability.
+  inline Handle<Object> ReportUnexpectedCharacter() {
+    return Handle<Object>::null();
+  }
+
+  inline Isolate* isolate() { return isolate_; }
+
+  static const int kInitialSpecialStringLength = 1024;
+
+
+ private:
+  Handle<String> source_;
+  int source_length_;
+  Handle<SeqAsciiString> seq_source_;
+
+  Isolate* isolate_;
+  uc32 c0_;
+  int position_;
+};
+
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
+  isolate_ = source->map()->isolate();
+  FlattenString(source);
+  source_ = source;
+  source_length_ = source_->length();
+
+  // Optimized fast case where we only have ASCII characters.
+  if (seq_ascii) {
+    seq_source_ = Handle<SeqAsciiString>::cast(source_);
+  }
+
+  // Set initial position right before the string.
+  position_ = -1;
+  // Advance to the first character (posibly EOS)
+  AdvanceSkipWhitespace();
+  Handle<Object> result = ParseJsonValue();
+  if (result.is_null() || c0_ != kEndOfString) {
+    // Parse failed. Current character is the unexpected token.
+
+    const char* message;
+    Factory* factory = isolate()->factory();
+    Handle<JSArray> array;
+
+    switch (c0_) {
+      case kEndOfString:
+        message = "unexpected_eos";
+        array = factory->NewJSArray(0);
+        break;
+      case '-':
+      case '0':
+      case '1':
+      case '2':
+      case '3':
+      case '4':
+      case '5':
+      case '6':
+      case '7':
+      case '8':
+      case '9':
+        message = "unexpected_token_number";
+        array = factory->NewJSArray(0);
+        break;
+      case '"':
+        message = "unexpected_token_string";
+        array = factory->NewJSArray(0);
+        break;
+      default:
+        message = "unexpected_token";
+        Handle<Object> name = LookupSingleCharacterStringFromCode(c0_);
+        Handle<FixedArray> element = factory->NewFixedArray(1);
+        element->set(0, *name);
+        array = factory->NewJSArrayWithElements(element);
+        break;
+    }
+
+    MessageLocation location(factory->NewScript(source),
+                             position_,
+                             position_ + 1);
+    Handle<Object> result = factory->NewSyntaxError(message, array);
+    isolate()->Throw(*result, &location);
+    return Handle<Object>::null();
+  }
+  return result;
+}
+
+
+// Parse any JSON value.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
+  switch (c0_) {
+    case '"':
+      return ParseJsonString();
+    case '-':
+    case '0':
+    case '1':
+    case '2':
+    case '3':
+    case '4':
+    case '5':
+    case '6':
+    case '7':
+    case '8':
+    case '9':
+      return ParseJsonNumber();
+    case 'f':
+      if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+          AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+        AdvanceSkipWhitespace();
+        return isolate()->factory()->false_value();
+      } else {
+        return ReportUnexpectedCharacter();
+      }
+    case 't':
+      if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+          AdvanceGetChar() == 'e') {
+        AdvanceSkipWhitespace();
+        return isolate()->factory()->true_value();
+      } else {
+        return ReportUnexpectedCharacter();
+      }
+    case 'n':
+      if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+          AdvanceGetChar() == 'l') {
+        AdvanceSkipWhitespace();
+        return isolate()->factory()->null_value();
+      } else {
+        return ReportUnexpectedCharacter();
+      }
+    case '{':
+      return ParseJsonObject();
+    case '[':
+      return ParseJsonArray();
+    default:
+      return ReportUnexpectedCharacter();
+  }
+}
+
+
+// Parse a JSON object. Position must be right at '{'.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
+  Handle<JSFunction> object_constructor(
+      isolate()->global_context()->object_function());
+  Handle<JSObject> json_object =
+      isolate()->factory()->NewJSObject(object_constructor);
+  ASSERT_EQ(c0_, '{');
+
+  AdvanceSkipWhitespace();
+  if (c0_ != '}') {
+    do {
+      if (c0_ != '"') return ReportUnexpectedCharacter();
+      Handle<String> key = ParseJsonSymbol();
+      if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+      AdvanceSkipWhitespace();
+      Handle<Object> value = ParseJsonValue();
+      if (value.is_null()) return ReportUnexpectedCharacter();
+
+      uint32_t index;
+      if (key->AsArrayIndex(&index)) {
+        SetOwnElement(json_object, index, value, kNonStrictMode);
+      } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
+        SetPrototype(json_object, value);
+      } else {
+        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+      }
+    } while (MatchSkipWhiteSpace(','));
+    if (c0_ != '}') {
+      return ReportUnexpectedCharacter();
+    }
+  }
+  AdvanceSkipWhitespace();
+  return json_object;
+}
+
+// Parse a JSON array. Position must be right at '['.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
+  ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+  ZoneList<Handle<Object> > elements(4);
+  ASSERT_EQ(c0_, '[');
+
+  AdvanceSkipWhitespace();
+  if (c0_ != ']') {
+    do {
+      Handle<Object> element = ParseJsonValue();
+      if (element.is_null()) return ReportUnexpectedCharacter();
+      elements.Add(element);
+    } while (MatchSkipWhiteSpace(','));
+    if (c0_ != ']') {
+      return ReportUnexpectedCharacter();
+    }
+  }
+  AdvanceSkipWhitespace();
+  // Allocate a fixed array with all the elements.
+  Handle<FixedArray> fast_elements =
+      isolate()->factory()->NewFixedArray(elements.length());
+  for (int i = 0, n = elements.length(); i < n; i++) {
+    fast_elements->set(i, *elements[i]);
+  }
+  return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+}
+
+
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
+  bool negative = false;
+  int beg_pos = position_;
+  if (c0_ == '-') {
+    Advance();
+    negative = true;
+  }
+  if (c0_ == '0') {
+    Advance();
+    // Prefix zero is only allowed if it's the only digit before
+    // a decimal point or exponent.
+    if ('0' <= c0_ && c0_ <= '9') return ReportUnexpectedCharacter();
+  } else {
+    int i = 0;
+    int digits = 0;
+    if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
+    do {
+      i = i * 10 + c0_ - '0';
+      digits++;
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+      SkipWhitespace();
+      return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+    }
+  }
+  if (c0_ == '.') {
+    Advance();
+    if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+    do {
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+  }
+  if (AsciiAlphaToLower(c0_) == 'e') {
+    Advance();
+    if (c0_ == '-' || c0_ == '+') Advance();
+    if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+    do {
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+  }
+  int length = position_ - beg_pos;
+  double number;
+  if (seq_ascii) {
+    Vector<const char> chars(seq_source_->GetChars() +  beg_pos, length);
+    number = StringToDouble(isolate()->unicode_cache(),
+                             chars,
+                             NO_FLAGS,  // Hex, octal or trailing junk.
+                             OS::nan_value());
+  } else {
+    Vector<char> buffer = Vector<char>::New(length);
+    String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
+    Vector<const char> result =
+        Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
+        length);
+    number = StringToDouble(isolate()->unicode_cache(),
+                             result,
+                             NO_FLAGS,  // Hex, octal or trailing junk.
+                             0.0);
+    buffer.Dispose();
+  }
+  SkipWhitespace();
+  return isolate()->factory()->NewNumber(number);
+}
+
+
+template <typename StringType>
+inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
+
+template <>
+inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
+  seq_str->SeqTwoByteStringSet(i, c);
+}
+
+template <>
+inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
+  seq_str->SeqAsciiStringSet(i, c);
+}
+
+template <typename StringType>
+inline Handle<StringType> NewRawString(Factory* factory, int length);
+
+template <>
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
+  return factory->NewRawTwoByteString(length, NOT_TENURED);
+}
+
+template <>
+inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
+  return factory->NewRawAsciiString(length, NOT_TENURED);
+}
+
+
+// Scans the rest of a JSON string starting from position_ and writes
+// prefix[start..end] along with the scanned characters into a
+// sequential string of type StringType.
+template <bool seq_ascii>
+template <typename StringType, typename SinkChar>
+Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
+    Handle<String> prefix, int start, int end) {
+  int count = end - start;
+  int max_length = count + source_length_ - position_;
+  int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
+  Handle<StringType> seq_str = NewRawString<StringType>(isolate()->factory(),
+                                                        length);
+  // Copy prefix into seq_str.
+  SinkChar* dest = seq_str->GetChars();
+  String::WriteToFlat(*prefix, dest, start, end);
+
+  while (c0_ != '"') {
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Handle<String>::null();
+    if (count >= length) {
+      // We need to create a longer sequential string for the result.
+      return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count);
+    }
+    if (c0_ != '\\') {
+      // If the sink can contain UC16 characters, or source_ contains only
+      // ASCII characters, there's no need to test whether we can store the
+      // character. Otherwise check whether the UC16 source character can fit
+      // in the ASCII sink.
+      if (sizeof(SinkChar) == kUC16Size ||
+          seq_ascii ||
+          c0_ <= kMaxAsciiCharCode) {
+        SeqStringSet(seq_str, count++, c0_);
+        Advance();
+      } else {
+        // StringType is SeqAsciiString and we just read a non-ASCII char.
+        return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
+      }
+    } else {
+      Advance();  // Advance past the \.
+      switch (c0_) {
+        case '"':
+        case '\\':
+        case '/':
+          SeqStringSet(seq_str, count++, c0_);
+          break;
+        case 'b':
+          SeqStringSet(seq_str, count++, '\x08');
+          break;
+        case 'f':
+          SeqStringSet(seq_str, count++, '\x0c');
+          break;
+        case 'n':
+          SeqStringSet(seq_str, count++, '\x0a');
+          break;
+        case 'r':
+          SeqStringSet(seq_str, count++, '\x0d');
+          break;
+        case 't':
+          SeqStringSet(seq_str, count++, '\x09');
+          break;
+        case 'u': {
+          uc32 value = 0;
+          for (int i = 0; i < 4; i++) {
+            Advance();
+            int digit = HexValue(c0_);
+            if (digit < 0) {
+              return Handle<String>::null();
+            }
+            value = value * 16 + digit;
+          }
+          if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) {
+            SeqStringSet(seq_str, count++, value);
+            break;
+          } else {
+            // StringType is SeqAsciiString and we just read a non-ASCII char.
+            position_ -= 6;  // Rewind position_ to \ in \uxxxx.
+            Advance();
+            return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
+                                                              0,
+                                                              count);
+          }
+        }
+        default:
+          return Handle<String>::null();
+      }
+      Advance();
+    }
+  }
+  // Shrink seq_string length to count.
+  if (isolate()->heap()->InNewSpace(*seq_str)) {
+    isolate()->heap()->new_space()->
+        template ShrinkStringAtAllocationBoundary<StringType>(
+            *seq_str, count);
+  } else {
+    int string_size = StringType::SizeFor(count);
+    int allocated_string_size = StringType::SizeFor(length);
+    int delta = allocated_string_size - string_size;
+    Address start_filler_object = seq_str->address() + string_size;
+    seq_str->set_length(count);
+    isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
+  }
+  ASSERT_EQ('"', c0_);
+  // Advance past the last '"'.
+  AdvanceSkipWhitespace();
+  return seq_str;
+}
+
+
+template <bool seq_ascii>
+template <bool is_symbol>
+Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
+  ASSERT_EQ('"', c0_);
+  Advance();
+  if (c0_ == '"') {
+    AdvanceSkipWhitespace();
+    return Handle<String>(isolate()->heap()->empty_string());
+  }
+  int beg_pos = position_;
+  // Fast case for ASCII only without escape characters.
+  do {
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Handle<String>::null();
+    if (c0_ != '\\') {
+      if (seq_ascii || c0_ <= kMaxAsciiCharCode) {
+        Advance();
+      } else {
+        return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
+                                                          beg_pos,
+                                                          position_);
+      }
+    } else {
+      return SlowScanJsonString<SeqAsciiString, char>(source_,
+                                                      beg_pos,
+                                                      position_);
+    }
+  } while (c0_ != '"');
+  int length = position_ - beg_pos;
+  Handle<String> result;
+  if (seq_ascii && is_symbol) {
+    result = isolate()->factory()->LookupAsciiSymbol(seq_source_,
+                                                     beg_pos,
+                                                     length);
+  } else {
+    result = isolate()->factory()->NewRawAsciiString(length);
+    char* dest = SeqAsciiString::cast(*result)->GetChars();
+    String::WriteToFlat(*source_, dest, beg_pos, position_);
+  }
+  ASSERT_EQ('"', c0_);
+  // Advance past the last '"'.
+  AdvanceSkipWhitespace();
+  return result;
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_JSON_PARSER_H_
diff --git a/src/json.js b/src/json.js
index 7a6189c..deba126 100644
--- a/src/json.js
+++ b/src/json.js
@@ -54,7 +54,7 @@
 
 function JSONParse(text, reviver) {
   var unfiltered = %ParseJson(TO_STRING_INLINE(text));
-  if (IS_FUNCTION(reviver)) {
+  if (IS_SPEC_FUNCTION(reviver)) {
     return Revive({'': unfiltered}, '', reviver);
   } else {
     return unfiltered;
@@ -143,17 +143,17 @@
   var value = holder[key];
   if (IS_SPEC_OBJECT(value)) {
     var toJSON = value.toJSON;
-    if (IS_FUNCTION(toJSON)) {
+    if (IS_SPEC_FUNCTION(toJSON)) {
       value = %_CallFunction(value, key, toJSON);
     }
   }
-  if (IS_FUNCTION(replacer)) {
+  if (IS_SPEC_FUNCTION(replacer)) {
     value = %_CallFunction(holder, key, value, replacer);
   }
   if (IS_STRING(value)) {
     return %QuoteJSONString(value);
   } else if (IS_NUMBER(value)) {
-    return NUMBER_IS_FINITE(value) ? $String(value) : "null";
+    return JSON_NUMBER_TO_STRING(value);
   } else if (IS_BOOLEAN(value)) {
     return value ? "true" : "false";
   } else if (IS_NULL(value)) {
@@ -164,7 +164,7 @@
       return SerializeArray(value, replacer, stack, indent, gap);
     } else if (IS_NUMBER_WRAPPER(value)) {
       value = ToNumber(value);
-      return NUMBER_IS_FINITE(value) ? ToString(value) : "null";
+      return JSON_NUMBER_TO_STRING(value);
     } else if (IS_STRING_WRAPPER(value)) {
       return %QuoteJSONString(ToString(value));
     } else if (IS_BOOLEAN_WRAPPER(value)) {
@@ -191,31 +191,37 @@
   var val = value[0];
   if (IS_STRING(val)) {
     // First entry is a string. Remaining entries are likely to be strings too.
-    builder.push(%QuoteJSONString(val));
-    for (var i = 1; i < len; i++) {
-      val = value[i];
-      if (IS_STRING(val)) {
-        builder.push(%QuoteJSONStringComma(val));
-      } else {
-        builder.push(",");
-        var before = builder.length;
-        BasicJSONSerialize(i, value[i], stack, builder);
-        if (before == builder.length) builder[before - 1] = ",null";
+    var array_string = %QuoteJSONStringArray(value);
+    if (!IS_UNDEFINED(array_string)) {
+      // array_string also includes bracket characters so we are done.
+      builder[builder.length - 1] = array_string;
+      stack.pop();
+      return;
+    } else {
+      builder.push(%QuoteJSONString(val));
+      for (var i = 1; i < len; i++) {
+        val = value[i];
+        if (IS_STRING(val)) {
+          builder.push(%QuoteJSONStringComma(val));
+        } else {
+          builder.push(",");
+          var before = builder.length;
+          BasicJSONSerialize(i, val, stack, builder);
+          if (before == builder.length) builder[before - 1] = ",null";
+        }
       }
     }
   } else if (IS_NUMBER(val)) {
     // First entry is a number. Remaining entries are likely to be numbers too.
-    builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
+    builder.push(JSON_NUMBER_TO_STRING(val));
     for (var i = 1; i < len; i++) {
       builder.push(",");
       val = value[i];
       if (IS_NUMBER(val)) {
-        builder.push(NUMBER_IS_FINITE(val) 
-                     ? %_NumberToString(val) 
-                     : "null");
+        builder.push(JSON_NUMBER_TO_STRING(val));
       } else {
         var before = builder.length;
-        BasicJSONSerialize(i, value[i], stack, builder);
+        BasicJSONSerialize(i, val, stack, builder);
         if (before == builder.length) builder[before - 1] = ",null";
       }
     }
@@ -226,13 +232,12 @@
     for (var i = 1; i < len; i++) {
       builder.push(",");
       before = builder.length;
-      val = value[i];
-      BasicJSONSerialize(i, val, stack, builder);
+      BasicJSONSerialize(i, value[i], stack, builder);
       if (before == builder.length) builder[before - 1] = ",null";
     }
   }
   stack.pop();
-  builder.push("]"); 
+  builder.push("]");
 }
 
 
@@ -268,14 +273,14 @@
 function BasicJSONSerialize(key, value, stack, builder) {
   if (IS_SPEC_OBJECT(value)) {
     var toJSON = value.toJSON;
-    if (IS_FUNCTION(toJSON)) {
+    if (IS_SPEC_FUNCTION(toJSON)) {
       value = %_CallFunction(value, ToString(key), toJSON);
     }
   }
   if (IS_STRING(value)) {
-    builder.push(%QuoteJSONString(value));
+    builder.push(value !== "" ? %QuoteJSONString(value) : '""');
   } else if (IS_NUMBER(value)) {
-    builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+    builder.push(JSON_NUMBER_TO_STRING(value));
   } else if (IS_BOOLEAN(value)) {
     builder.push(value ? "true" : "false");
   } else if (IS_NULL(value)) {
@@ -285,7 +290,7 @@
     // Unwrap value if necessary
     if (IS_NUMBER_WRAPPER(value)) {
       value = ToNumber(value);
-      builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+      builder.push(JSON_NUMBER_TO_STRING(value));
     } else if (IS_STRING_WRAPPER(value)) {
       builder.push(%QuoteJSONString(ToString(value)));
     } else if (IS_BOOLEAN_WRAPPER(value)) {
@@ -332,11 +337,12 @@
   return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
 }
 
-function SetupJSON() {
+function SetUpJSON() {
+  %CheckIsBootstrapping();
   InstallFunctions($JSON, DONT_ENUM, $Array(
     "parse", JSONParse,
     "stringify", JSONStringify
   ));
 }
 
-SetupJSON();
+SetUpJSON()
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 66b6332..3ebfbdf 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -127,7 +127,7 @@
     return re;
   }
   pattern = FlattenGetString(pattern);
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
   RegExpCompileData parse_result;
   FlatStringReader reader(isolate, pattern);
@@ -212,19 +212,7 @@
   RegExpImpl::SetCapture(array, 1, to);
 }
 
-  /* template <typename SubjectChar>, typename PatternChar>
-static int ReStringMatch(Vector<const SubjectChar> sub_vector,
-                         Vector<const PatternChar> pat_vector,
-                         int start_index) {
 
-  int pattern_length = pat_vector.length();
-  if (pattern_length == 0) return start_index;
-
-  int subject_length = sub_vector.length();
-  if (start_index + pattern_length > subject_length) return -1;
-  return SearchString(sub_vector, pat_vector, start_index);
-}
-  */
 Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
                                     Handle<String> subject,
                                     int index,
@@ -236,38 +224,41 @@
 
   if (!subject->IsFlat()) FlattenString(subject);
   AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
-  // Extract flattened substrings of cons strings before determining asciiness.
-  String* seq_sub = *subject;
-  if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
 
   String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
   int needle_len = needle->length();
+  ASSERT(needle->IsFlat());
 
   if (needle_len != 0) {
-    if (index + needle_len > subject->length())
-        return isolate->factory()->null_value();
+    if (index + needle_len > subject->length()) {
+      return isolate->factory()->null_value();
+    }
 
+    String::FlatContent needle_content = needle->GetFlatContent();
+    String::FlatContent subject_content = subject->GetFlatContent();
+    ASSERT(needle_content.IsFlat());
+    ASSERT(subject_content.IsFlat());
     // dispatch on type of strings
-    index = (needle->IsAsciiRepresentation()
-             ? (seq_sub->IsAsciiRepresentation()
+    index = (needle_content.IsAscii()
+             ? (subject_content.IsAscii()
                 ? SearchString(isolate,
-                               seq_sub->ToAsciiVector(),
-                               needle->ToAsciiVector(),
+                               subject_content.ToAsciiVector(),
+                               needle_content.ToAsciiVector(),
                                index)
                 : SearchString(isolate,
-                               seq_sub->ToUC16Vector(),
-                               needle->ToAsciiVector(),
+                               subject_content.ToUC16Vector(),
+                               needle_content.ToAsciiVector(),
                                index))
-             : (seq_sub->IsAsciiRepresentation()
+             : (subject_content.IsAscii()
                 ? SearchString(isolate,
-                               seq_sub->ToAsciiVector(),
-                               needle->ToUC16Vector(),
+                               subject_content.ToAsciiVector(),
+                               needle_content.ToUC16Vector(),
                                index)
                 : SearchString(isolate,
-                               seq_sub->ToUC16Vector(),
-                               needle->ToUC16Vector(),
+                               subject_content.ToUC16Vector(),
+                               needle_content.ToUC16Vector(),
                                index)));
-    if (index == -1) return FACTORY->null_value();
+    if (index == -1) return isolate->factory()->null_value();
   }
   ASSERT(last_match_info->HasFastElements());
 
@@ -295,31 +286,67 @@
 #else  // V8_INTERPRETED_REGEXP (RegExp native code)
   if (compiled_code->IsCode()) return true;
 #endif
+  // We could potentially have marked this as flushable, but have kept
+  // a saved version if we did not flush it yet.
+  Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+  if (saved_code->IsCode()) {
+    // Reinstate the code in the original place.
+    re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code);
+    ASSERT(compiled_code->IsSmi());
+    return true;
+  }
   return CompileIrregexp(re, is_ascii);
 }
 
 
+static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
+                                            bool is_ascii,
+                                            Handle<String> error_message,
+                                            Isolate* isolate) {
+  Factory* factory = isolate->factory();
+  Handle<FixedArray> elements = factory->NewFixedArray(2);
+  elements->set(0, re->Pattern());
+  elements->set(1, *error_message);
+  Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+  Handle<Object> regexp_err =
+      factory->NewSyntaxError("malformed_regexp", array);
+  isolate->Throw(*regexp_err);
+  return false;
+}
+
+
 bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
+  // If we had a compilation error the last time this is saved at the
+  // saved code index.
   Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
-  if (entry->IsJSObject()) {
-    // If it's a JSObject, a previous compilation failed and threw this object.
-    // Re-throw the object without trying again.
-    isolate->Throw(entry);
+  // When arriving here entry can only be a smi, either representing an
+  // uncompiled regexp, a previous compilation error, or code that has
+  // been flushed.
+  ASSERT(entry->IsSmi());
+  int entry_value = Smi::cast(entry)->value();
+  ASSERT(entry_value == JSRegExp::kUninitializedValue ||
+         entry_value == JSRegExp::kCompilationErrorValue ||
+         (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0));
+
+  if (entry_value == JSRegExp::kCompilationErrorValue) {
+    // A previous compilation failed and threw an error which we store in
+    // the saved code index (we store the error message, not the actual
+    // error). Recreate the error object and throw it.
+    Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+    ASSERT(error_string->IsString());
+    Handle<String> error_message(String::cast(error_string));
+    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
     return false;
   }
-  ASSERT(entry->IsTheHole());
 
   JSRegExp::Flags flags = re->GetFlags();
 
   Handle<String> pattern(re->Pattern());
-  if (!pattern->IsFlat()) {
-    FlattenString(pattern);
-  }
-
+  if (!pattern->IsFlat()) FlattenString(pattern);
   RegExpCompileData compile_data;
   FlatStringReader reader(isolate, pattern);
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
@@ -340,17 +367,9 @@
                             is_ascii);
   if (result.error_message != NULL) {
     // Unable to compile regexp.
-    Factory* factory = isolate->factory();
-    Handle<FixedArray> elements = factory->NewFixedArray(2);
-    elements->set(0, *pattern);
     Handle<String> error_message =
-        factory->NewStringFromUtf8(CStrVector(result.error_message));
-    elements->set(1, *error_message);
-    Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
-    Handle<Object> regexp_err =
-        factory->NewSyntaxError("malformed_regexp", array);
-    isolate->Throw(*regexp_err);
-    re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
+        isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
+    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
     return false;
   }
 
@@ -411,22 +430,12 @@
 
 int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
                                 Handle<String> subject) {
-  if (!subject->IsFlat()) {
-    FlattenString(subject);
-  }
+  if (!subject->IsFlat()) FlattenString(subject);
+
   // Check the asciiness of the underlying storage.
-  bool is_ascii;
-  {
-    AssertNoAllocation no_gc;
-    String* sequential_string = *subject;
-    if (subject->IsConsString()) {
-      sequential_string = ConsString::cast(*subject)->first();
-    }
-    is_ascii = sequential_string->IsAsciiRepresentation();
-  }
-  if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
-    return -1;
-  }
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+  if (!EnsureCompiledIrregexp(regexp, is_ascii)) return -1;
+
 #ifdef V8_INTERPRETED_REGEXP
   // Byte-code regexp needs space allocated for all its registers.
   return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
@@ -451,15 +460,12 @@
   ASSERT(index <= subject->length());
   ASSERT(subject->IsFlat());
 
-  // A flat ASCII string might have a two-byte first part.
-  if (subject->IsConsString()) {
-    subject = Handle<String>(ConsString::cast(*subject)->first(), isolate);
-  }
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
 #ifndef V8_INTERPRETED_REGEXP
   ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
   do {
-    bool is_ascii = subject->IsAsciiRepresentation();
+    EnsureCompiledIrregexp(regexp, is_ascii);
     Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
     NativeRegExpMacroAssembler::Result res =
         NativeRegExpMacroAssembler::Match(code,
@@ -486,13 +492,13 @@
     // being internal and external, and even between being ASCII and UC16,
     // but the characters are always the same).
     IrregexpPrepare(regexp, subject);
+    is_ascii = subject->IsAsciiRepresentationUnderneath();
   } while (true);
   UNREACHABLE();
   return RE_EXCEPTION;
 #else  // V8_INTERPRETED_REGEXP
 
   ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
-  bool is_ascii = subject->IsAsciiRepresentation();
   // We must have done EnsureCompiledIrregexp, so we can get the number of
   // registers.
   int* register_vector = output.start();
@@ -810,7 +816,13 @@
   inline bool ignore_case() { return ignore_case_; }
   inline bool ascii() { return ascii_; }
 
+  int current_expansion_factor() { return current_expansion_factor_; }
+  void set_current_expansion_factor(int value) {
+    current_expansion_factor_ = value;
+  }
+
   static const int kNoRegister = -1;
+
  private:
   EndNode* accept_;
   int next_register_;
@@ -820,6 +832,7 @@
   bool ignore_case_;
   bool ascii_;
   bool reg_exp_too_big_;
+  int current_expansion_factor_;
 };
 
 
@@ -847,7 +860,8 @@
       recursion_depth_(0),
       ignore_case_(ignore_case),
       ascii_(ascii),
-      reg_exp_too_big_(false) {
+      reg_exp_too_big_(false),
+      current_expansion_factor_(1) {
   accept_ = new EndNode(EndNode::ACCEPT);
   ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
 }
@@ -1918,13 +1932,10 @@
   ASSERT(characters_filled_in < details->characters());
   int characters = details->characters();
   int char_mask;
-  int char_shift;
   if (compiler->ascii()) {
     char_mask = String::kMaxAsciiCharCode;
-    char_shift = 8;
   } else {
     char_mask = String::kMaxUC16CharCode;
-    char_shift = 16;
   }
   for (int k = 0; k < elms_->length(); k++) {
     TextElement elm = elms_->at(k);
@@ -2650,7 +2661,8 @@
 // this alternative and back to this choice node.  If there are variable
 // length nodes or other complications in the way then return a sentinel
 // value indicating that a greedy loop cannot be constructed.
-int ChoiceNode::GreedyLoopTextLength(GuardedAlternative* alternative) {
+int ChoiceNode::GreedyLoopTextLengthForAlternative(
+    GuardedAlternative* alternative) {
   int length = 0;
   RegExpNode* node = alternative->node();
   // Later we will generate code for all these text nodes using recursion
@@ -2689,7 +2701,8 @@
 void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
   RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
   if (trace->stop_node() == this) {
-    int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
+    int text_length =
+        GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
     ASSERT(text_length != kNodeIsTooComplexForGreedyLoops);
     // Update the counter-based backtracking info on the stack.  This is an
     // optimization for greedy loops (see below).
@@ -2767,6 +2780,7 @@
   AlternativeGeneration* at(int i) {
     return alt_gens_[i];
   }
+
  private:
   static const int kAFew = 10;
   ZoneList<AlternativeGeneration*> alt_gens_;
@@ -2881,7 +2895,7 @@
 
   Trace* current_trace = trace;
 
-  int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
+  int text_length = GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
   bool greedy_loop = false;
   Label greedy_loop_label;
   Trace counter_backtrack_trace;
@@ -3326,6 +3340,7 @@
     }
     stream()->Add("}}");
   }
+
  private:
   bool first_;
   StringStream* stream() { return stream_; }
@@ -3727,6 +3742,44 @@
 }
 
 
+// Scoped object to keep track of how much we unroll quantifier loops in the
+// regexp graph generator.
+class RegExpExpansionLimiter {
+ public:
+  static const int kMaxExpansionFactor = 6;
+  RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
+      : compiler_(compiler),
+        saved_expansion_factor_(compiler->current_expansion_factor()),
+        ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
+    ASSERT(factor > 0);
+    if (ok_to_expand_) {
+      if (factor > kMaxExpansionFactor) {
+        // Avoid integer overflow of the current expansion factor.
+        ok_to_expand_ = false;
+        compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
+      } else {
+        int new_factor = saved_expansion_factor_ * factor;
+        ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
+        compiler->set_current_expansion_factor(new_factor);
+      }
+    }
+  }
+
+  ~RegExpExpansionLimiter() {
+    compiler_->set_current_expansion_factor(saved_expansion_factor_);
+  }
+
+  bool ok_to_expand() { return ok_to_expand_; }
+
+ private:
+  RegExpCompiler* compiler_;
+  int saved_expansion_factor_;
+  bool ok_to_expand_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
+};
+
+
 RegExpNode* RegExpQuantifier::ToNode(int min,
                                      int max,
                                      bool is_greedy,
@@ -3766,38 +3819,46 @@
   } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
     // Only unroll if there are no captures and the body can't be
     // empty.
-    if (min > 0 && min <= kMaxUnrolledMinMatches) {
-      int new_max = (max == kInfinity) ? max : max - min;
-      // Recurse once to get the loop or optional matches after the fixed ones.
-      RegExpNode* answer = ToNode(
-          0, new_max, is_greedy, body, compiler, on_success, true);
-      // Unroll the forced matches from 0 to min.  This can cause chains of
-      // TextNodes (which the parser does not generate).  These should be
-      // combined if it turns out they hinder good code generation.
-      for (int i = 0; i < min; i++) {
-        answer = body->ToNode(compiler, answer);
-      }
-      return answer;
-    }
-    if (max <= kMaxUnrolledMaxMatches) {
-      ASSERT(min == 0);
-      // Unroll the optional matches up to max.
-      RegExpNode* answer = on_success;
-      for (int i = 0; i < max; i++) {
-        ChoiceNode* alternation = new ChoiceNode(2);
-        if (is_greedy) {
-          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
-                                                                      answer)));
-          alternation->AddAlternative(GuardedAlternative(on_success));
-        } else {
-          alternation->AddAlternative(GuardedAlternative(on_success));
-          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
-                                                                      answer)));
+    {
+      RegExpExpansionLimiter limiter(
+          compiler, min + ((max != min) ? 1 : 0));
+      if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
+        int new_max = (max == kInfinity) ? max : max - min;
+        // Recurse once to get the loop or optional matches after the fixed
+        // ones.
+        RegExpNode* answer = ToNode(
+            0, new_max, is_greedy, body, compiler, on_success, true);
+        // Unroll the forced matches from 0 to min.  This can cause chains of
+        // TextNodes (which the parser does not generate).  These should be
+        // combined if it turns out they hinder good code generation.
+        for (int i = 0; i < min; i++) {
+          answer = body->ToNode(compiler, answer);
         }
-        answer = alternation;
-        if (not_at_start) alternation->set_not_at_start();
+        return answer;
       }
-      return answer;
+    }
+    if (max <= kMaxUnrolledMaxMatches && min == 0) {
+      ASSERT(max > 0);  // Due to the 'if' above.
+      RegExpExpansionLimiter limiter(compiler, max);
+      if (limiter.ok_to_expand()) {
+        // Unroll the optional matches up to max.
+        RegExpNode* answer = on_success;
+        for (int i = 0; i < max; i++) {
+          ChoiceNode* alternation = new ChoiceNode(2);
+          if (is_greedy) {
+            alternation->AddAlternative(
+                GuardedAlternative(body->ToNode(compiler, answer)));
+            alternation->AddAlternative(GuardedAlternative(on_success));
+          } else {
+            alternation->AddAlternative(GuardedAlternative(on_success));
+            alternation->AddAlternative(
+                GuardedAlternative(body->ToNode(compiler, answer)));
+          }
+          answer = alternation;
+          if (not_at_start) alternation->set_not_at_start();
+        }
+        return answer;
+      }
     }
   }
   bool has_min = min > 0;
@@ -4123,12 +4184,6 @@
 }
 
 
-static void AddUncanonicals(Isolate* isolate,
-                            ZoneList<CharacterRange>* ranges,
-                            int bottom,
-                            int top);
-
-
 void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
                                         bool is_ascii) {
   Isolate* isolate = Isolate::Current();
@@ -4289,101 +4344,6 @@
 }
 
 
-static void AddUncanonicals(Isolate* isolate,
-                            ZoneList<CharacterRange>* ranges,
-                            int bottom,
-                            int top) {
-  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-  // Zones with no case mappings.  There is a DEBUG-mode loop to assert that
-  // this table is correct.
-  // 0x0600 - 0x0fff
-  // 0x1100 - 0x1cff
-  // 0x2000 - 0x20ff
-  // 0x2200 - 0x23ff
-  // 0x2500 - 0x2bff
-  // 0x2e00 - 0xa5ff
-  // 0xa800 - 0xfaff
-  // 0xfc00 - 0xfeff
-  const int boundary_count = 18;
-  int boundaries[] = {
-      0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
-      0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
-
-  // Special ASCII rule from spec can save us some work here.
-  if (bottom == 0x80 && top == 0xffff) return;
-
-  if (top <= boundaries[0]) {
-    CharacterRange range(bottom, top);
-    range.AddCaseEquivalents(ranges, false);
-    return;
-  }
-
-  // Split up very large ranges.  This helps remove ranges where there are no
-  // case mappings.
-  for (int i = 0; i < boundary_count; i++) {
-    if (bottom < boundaries[i] && top >= boundaries[i]) {
-      AddUncanonicals(isolate, ranges, bottom, boundaries[i] - 1);
-      AddUncanonicals(isolate, ranges, boundaries[i], top);
-      return;
-    }
-  }
-
-  // If we are completely in a zone with no case mappings then we are done.
-  for (int i = 0; i < boundary_count; i += 2) {
-    if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
-#ifdef DEBUG
-      for (int j = bottom; j <= top; j++) {
-        unsigned current_char = j;
-        int length = isolate->jsregexp_uncanonicalize()->get(current_char,
-                                                             '\0', chars);
-        for (int k = 0; k < length; k++) {
-          ASSERT(chars[k] == current_char);
-        }
-      }
-#endif
-      return;
-    }
-  }
-
-  // Step through the range finding equivalent characters.
-  ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
-  for (int i = bottom; i <= top; i++) {
-    int length = isolate->jsregexp_uncanonicalize()->get(i, '\0', chars);
-    for (int j = 0; j < length; j++) {
-      uc32 chr = chars[j];
-      if (chr != i && (chr < bottom || chr > top)) {
-        characters->Add(chr);
-      }
-    }
-  }
-
-  // Step through the equivalent characters finding simple ranges and
-  // adding ranges to the character class.
-  if (characters->length() > 0) {
-    int new_from = characters->at(0);
-    int new_to = new_from;
-    for (int i = 1; i < characters->length(); i++) {
-      int chr = characters->at(i);
-      if (chr == new_to + 1) {
-        new_to++;
-      } else {
-        if (new_to == new_from) {
-          ranges->Add(CharacterRange::Singleton(new_from));
-        } else {
-          ranges->Add(CharacterRange(new_from, new_to));
-        }
-        new_from = new_to = chr;
-      }
-    }
-    if (new_to == new_from) {
-      ranges->Add(CharacterRange::Singleton(new_from));
-    } else {
-      ranges->Add(CharacterRange(new_from, new_to));
-    }
-  }
-}
-
-
 ZoneList<CharacterRange>* CharacterSet::ranges() {
   if (ranges_ == NULL) {
     ranges_ = new ZoneList<CharacterRange>(2);
@@ -4901,7 +4861,6 @@
       cp_offset += elm.data.u_atom->data().length();
     } else {
       cp_offset++;
-      Vector<const uc16> quarks = elm.data.u_atom->data();
     }
   }
 }
@@ -5340,8 +5299,6 @@
     return CompilationResult(error_message);
   }
 
-  NodeInfo info = *node->info();
-
   // Create the correct assembler for the architecture.
 #ifndef V8_INTERPRETED_REGEXP
   // Native regexp implementation.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index d56b650..54297a4 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -28,7 +28,7 @@
 #ifndef V8_JSREGEXP_H_
 #define V8_JSREGEXP_H_
 
-#include "macro-assembler.h"
+#include "allocation.h"
 #include "zone-inl.h"
 
 namespace v8 {
@@ -255,6 +255,7 @@
     return (bits_ == (kInFirst | kInSecond | kInBoth));
   }
   int value() { return bits_; }
+
  private:
   int bits_;
 };
@@ -404,6 +405,7 @@
 
   template <typename Callback>
   void ForEach(Callback* callback) { return tree()->ForEach(callback); }
+
  private:
   // There can't be a static empty set since it allocates its
   // successors in a zone and caches them.
@@ -793,6 +795,7 @@
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
   virtual ActionNode* Clone() { return new ActionNode(*this); }
   virtual int ComputeFirstCharacterSet(int budget);
+
  private:
   union {
     struct {
@@ -861,6 +864,7 @@
   }
   void CalculateOffsets();
   virtual int ComputeFirstCharacterSet(int budget);
+
  private:
   enum TextEmitPassType {
     NON_ASCII_MATCH,             // Check for characters that can't match.
@@ -925,6 +929,7 @@
   virtual AssertionNode* Clone() { return new AssertionNode(*this); }
   AssertionNodeType type() { return type_; }
   void set_type(AssertionNodeType type) { type_ = type; }
+
  private:
   AssertionNode(AssertionNodeType t, RegExpNode* on_success)
       : SeqRegExpNode(on_success), type_(t) { }
@@ -955,6 +960,7 @@
   }
   virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
   virtual int ComputeFirstCharacterSet(int budget);
+
  private:
   int start_reg_;
   int end_reg_;
@@ -1071,7 +1077,7 @@
   virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
 
  protected:
-  int GreedyLoopTextLength(GuardedAlternative* alternative);
+  int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
   ZoneList<GuardedAlternative>* alternatives_;
 
  private:
@@ -1301,6 +1307,7 @@
   }
   void InvalidateCurrentCharacter();
   void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
+
  private:
   int FindAffectedRegisters(OutSet* affected_registers);
   void PerformDeferredActions(RegExpMacroAssembler* macro,
@@ -1402,6 +1409,7 @@
   void fail(const char* error_message) {
     error_message_ = error_message;
   }
+
  private:
   bool ignore_case_;
   bool is_ascii_;
diff --git a/src/list-inl.h b/src/list-inl.h
index eeaea65..80bccc9 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -46,10 +46,16 @@
 
 template<typename T, class P>
 void List<T, P>::AddAll(const List<T, P>& other) {
-  int result_length = length_ + other.length_;
+  AddAll(other.ToVector());
+}
+
+
+template<typename T, class P>
+void List<T, P>::AddAll(const Vector<T>& other) {
+  int result_length = length_ + other.length();
   if (capacity_ < result_length) Resize(result_length);
-  for (int i = 0; i < other.length_; i++) {
-    data_[length_ + i] = other.data_[i];
+  for (int i = 0; i < other.length(); i++) {
+    data_[length_ + i] = other.at(i);
   }
   length_ = result_length;
 }
@@ -201,6 +207,35 @@
 }
 
 
+template <typename T>
+int SortedListBSearch(
+    const List<T>& list, T elem, int (*cmp)(const T* x, const T* y)) {
+  int low = 0;
+  int high = list.length() - 1;
+  while (low <= high) {
+    int mid = (low + high) / 2;
+    T mid_elem = list[mid];
+
+    if (mid_elem > elem) {
+      high = mid - 1;
+      continue;
+    }
+    if (mid_elem < elem) {
+      low = mid + 1;
+      continue;
+    }
+    // Found the elememt.
+    return mid;
+  }
+  return -1;
+}
+
+
+template <typename T>
+int SortedListBSearch(const List<T>& list, T elem) {
+  return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_LIST_INL_H_
diff --git a/src/list.h b/src/list.h
index 9a2e698..0558709 100644
--- a/src/list.h
+++ b/src/list.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_LIST_H_
 #define V8_LIST_H_
 
+#include "utils.h"
+
 namespace v8 {
 namespace internal {
 
@@ -47,7 +49,6 @@
 template <typename T, class P>
 class List {
  public:
-
   List() { Initialize(0); }
   INLINE(explicit List(int capacity)) { Initialize(capacity); }
   INLINE(~List()) { DeleteData(data_); }
@@ -80,7 +81,7 @@
   INLINE(int length() const) { return length_; }
   INLINE(int capacity() const) { return capacity_; }
 
-  Vector<T> ToVector() { return Vector<T>(data_, length_); }
+  Vector<T> ToVector() const { return Vector<T>(data_, length_); }
 
   Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
 
@@ -91,6 +92,9 @@
   // Add all the elements from the argument list to this list.
   void AddAll(const List<T, P>& other);
 
+  // Add all the elements from the vector to this list.
+  void AddAll(const Vector<T>& other);
+
   // Inserts the element at the specific index.
   void InsertAt(int index, const T& element);
 
@@ -159,6 +163,20 @@
   DISALLOW_COPY_AND_ASSIGN(List);
 };
 
+class Map;
+class Code;
+typedef List<Map*> MapList;
+typedef List<Code*> CodeList;
+
+// Perform binary search for an element in an already sorted
+// list. Returns the index of the element of -1 if it was not found.
+template <typename T>
+int SortedListBSearch(
+    const List<T>& list, T elem, int (*cmp)(const T* x, const T* y));
+template <typename T>
+int SortedListBSearch(const List<T>& list, T elem);
+
 } }  // namespace v8::internal
 
+
 #endif  // V8_LIST_H_
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index c0beaaf..8f660ce 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,27 +62,27 @@
     : instr_(instr),
       limit_(instr->TempCount()),
       current_(0) {
-  current_ = AdvanceToNext(0);
+  SkipUninteresting();
 }
 
 
-bool TempIterator::HasNext() { return current_ < limit_; }
+bool TempIterator::Done() { return current_ >= limit_; }
 
 
-LOperand* TempIterator::Next() {
-  ASSERT(HasNext());
+LOperand* TempIterator::Current() {
+  ASSERT(!Done());
   return instr_->TempAt(current_);
 }
 
 
-int TempIterator::AdvanceToNext(int start) {
-  while (start < limit_ && instr_->TempAt(start) == NULL) start++;
-  return start;
+void TempIterator::SkipUninteresting() {
+  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
 }
 
 
 void TempIterator::Advance() {
-  current_ = AdvanceToNext(current_ + 1);
+  ++current_;
+  SkipUninteresting();
 }
 
 
@@ -90,27 +90,29 @@
     : instr_(instr),
       limit_(instr->InputCount()),
       current_(0) {
-  current_ = AdvanceToNext(0);
+  SkipUninteresting();
 }
 
 
-bool InputIterator::HasNext() { return current_ < limit_; }
+bool InputIterator::Done() { return current_ >= limit_; }
 
 
-LOperand* InputIterator::Next() {
-  ASSERT(HasNext());
+LOperand* InputIterator::Current() {
+  ASSERT(!Done());
   return instr_->InputAt(current_);
 }
 
 
 void InputIterator::Advance() {
-  current_ = AdvanceToNext(current_ + 1);
+  ++current_;
+  SkipUninteresting();
 }
 
 
-int InputIterator::AdvanceToNext(int start) {
-  while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
-  return start;
+void InputIterator::SkipUninteresting() {
+  while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+    ++current_;
+  }
 }
 
 
@@ -118,23 +120,23 @@
     : input_iterator_(instr), env_iterator_(instr->environment()) { }
 
 
-bool UseIterator::HasNext() {
-  return input_iterator_.HasNext() || env_iterator_.HasNext();
+bool UseIterator::Done() {
+  return input_iterator_.Done() && env_iterator_.Done();
 }
 
 
-LOperand* UseIterator::Next() {
-  ASSERT(HasNext());
-  return input_iterator_.HasNext()
-      ? input_iterator_.Next()
-      : env_iterator_.Next();
+LOperand* UseIterator::Current() {
+  ASSERT(!Done());
+  return input_iterator_.Done()
+      ? env_iterator_.Current()
+      : input_iterator_.Current();
 }
 
 
 void UseIterator::Advance() {
-  input_iterator_.HasNext()
-      ? input_iterator_.Advance()
-      : env_iterator_.Advance();
+  input_iterator_.Done()
+      ? env_iterator_.Advance()
+      : input_iterator_.Advance();
 }
 
 } }  // namespace v8::internal
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index f62a7db..4661106 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -303,6 +303,11 @@
   // we need to split use positons in a special way.
   bool split_at_start = false;
 
+  if (current->start().Value() == position.Value()) {
+    // When splitting at start we need to locate the previous use interval.
+    current = first_interval_;
+  }
+
   while (current != NULL) {
     if (current->Contains(position)) {
       current->SplitAt(position);
@@ -352,6 +357,11 @@
   }
   result->first_pos_ = use_after;
 
+  // Discard cached iteration state. It might be pointing
+  // to the use that no longer belongs to this live range.
+  last_processed_use_ = NULL;
+  current_interval_ = NULL;
+
   // Link the new live range in the chain before any of the other
   // ranges linked from the range before the split.
   result->parent_ = (parent_ == NULL) ? this : parent_;
@@ -565,10 +575,10 @@
   BitVector* live_out = new BitVector(next_virtual_register_);
 
   // Process all successor blocks.
-  HBasicBlock* successor = block->end()->FirstSuccessor();
-  while (successor != NULL) {
+  for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
     // Add values live on entry to the successor. Note the successor's
     // live_in will not be computed yet for backwards edges.
+    HBasicBlock* successor = it.Current();
     BitVector* live_in = live_in_sets_[successor->block_id()];
     if (live_in != NULL) live_out->Union(*live_in);
 
@@ -582,11 +592,6 @@
         live_out->Add(phi->OperandAt(index)->id());
       }
     }
-
-    // Check if we are done with second successor.
-    if (successor == block->end()->SecondSuccessor()) break;
-
-    successor = block->end()->SecondSuccessor();
   }
 
   return live_out;
@@ -790,8 +795,8 @@
                                         int gap_index) {
   // Handle fixed temporaries.
   if (first != NULL) {
-    for (TempIterator it(first); it.HasNext(); it.Advance()) {
-      LUnallocated* temp = LUnallocated::cast(it.Next());
+    for (TempIterator it(first); !it.Done(); it.Advance()) {
+      LUnallocated* temp = LUnallocated::cast(it.Current());
       if (temp->HasFixedPolicy()) {
         AllocateFixed(temp, gap_index - 1, false);
       }
@@ -832,8 +837,8 @@
 
   // Handle fixed input operands of second instruction.
   if (second != NULL) {
-    for (UseIterator it(second); it.HasNext(); it.Advance()) {
-      LUnallocated* cur_input = LUnallocated::cast(it.Next());
+    for (UseIterator it(second); !it.Done(); it.Advance()) {
+      LUnallocated* cur_input = LUnallocated::cast(it.Current());
       if (cur_input->HasFixedPolicy()) {
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
         bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -968,8 +973,8 @@
           }
         }
 
-        for (UseIterator it(instr); it.HasNext(); it.Advance()) {
-          LOperand* input = it.Next();
+        for (UseIterator it(instr); !it.Done(); it.Advance()) {
+          LOperand* input = it.Current();
 
           LifetimePosition use_pos;
           if (input->IsUnallocated() &&
@@ -983,8 +988,8 @@
           if (input->IsUnallocated()) live->Add(input->VirtualRegister());
         }
 
-        for (TempIterator it(instr); it.HasNext(); it.Advance()) {
-          LOperand* temp = it.Next();
+        for (TempIterator it(instr); !it.Done(); it.Advance()) {
+          LOperand* temp = it.Current();
           if (instr->IsMarkedAsCall()) {
             if (temp->IsRegister()) continue;
             if (temp->IsUnallocated()) {
@@ -1019,7 +1024,7 @@
         operand = chunk_->DefineConstantOperand(constant);
       } else {
         ASSERT(!op->EmitAtUses());
-        LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+        LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY);
         unalloc->set_virtual_register(op->id());
         operand = unalloc;
       }
@@ -1029,6 +1034,22 @@
       chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
                          operand,
                          phi_operand);
+
+      // We are going to insert a move before the branch instruction.
+      // Some branch instructions (e.g. loops' back edges)
+      // can potentially cause a GC so they have a pointer map.
+      // By inserting a move we essentially create a copy of a
+      // value which is invisible to PopulatePointerMaps(), because we store
+      // it into a location different from the operand of a live range
+      // covering a branch instruction.
+      // Thus we need to manually record a pointer.
+      if (phi->representation().IsTagged()) {
+        LInstruction* branch =
+            InstructionAt(cur_block->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          branch->pointer_map()->RecordPointer(phi_operand);
+        }
+      }
     }
 
     LiveRange* live_range = LiveRangeFor(phi->id());
@@ -1116,7 +1137,7 @@
         // We are going to insert a move before the branch instruction.
         // Some branch instructions (e.g. loops' back edges)
         // can potentially cause a GC so they have a pointer map.
-        // By insterting a move we essentially create a copy of a
+        // By inserting a move we essentially create a copy of a
         // value which is invisible to PopulatePointerMaps(), because we store
         // it into a location different from the operand of a live range
         // covering a branch instruction.
@@ -2013,12 +2034,12 @@
   // We have no choice
   if (start_instr == end_instr) return end;
 
-  HBasicBlock* end_block = GetBlock(start);
-  HBasicBlock* start_block = GetBlock(end);
+  HBasicBlock* start_block = GetBlock(start);
+  HBasicBlock* end_block = GetBlock(end);
 
   if (end_block == start_block) {
-    // The interval is split in the same basic block. Split at latest possible
-    // position.
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
     return end;
   }
 
@@ -2029,7 +2050,9 @@
     block = block->parent_loop_header();
   }
 
-  if (block == end_block) return end;
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
 
   return LifetimePosition::FromInstructionIndex(
       block->first_instruction_index());
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index f109c45..e4e6497 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 
 #include "v8.h"
 
-#include "data-flow.h"
+#include "allocation.h"
 #include "lithium.h"
 #include "zone.h"
 
@@ -162,12 +162,12 @@
 class TempIterator BASE_EMBEDDED {
  public:
   inline explicit TempIterator(LInstruction* instr);
-  inline bool HasNext();
-  inline LOperand* Next();
+  inline bool Done();
+  inline LOperand* Current();
   inline void Advance();
 
  private:
-  inline int AdvanceToNext(int start);
+  inline void SkipUninteresting();
   LInstruction* instr_;
   int limit_;
   int current_;
@@ -178,12 +178,12 @@
 class InputIterator BASE_EMBEDDED {
  public:
   inline explicit InputIterator(LInstruction* instr);
-  inline bool HasNext();
-  inline LOperand* Next();
+  inline bool Done();
+  inline LOperand* Current();
   inline void Advance();
 
  private:
-  inline int AdvanceToNext(int start);
+  inline void SkipUninteresting();
   LInstruction* instr_;
   int limit_;
   int current_;
@@ -193,8 +193,8 @@
 class UseIterator BASE_EMBEDDED {
  public:
   inline explicit UseIterator(LInstruction* instr);
-  inline bool HasNext();
-  inline LOperand* Next();
+  inline bool Done();
+  inline LOperand* Current();
   inline void Advance();
 
  private:
diff --git a/src/lithium.cc b/src/lithium.cc
index aeac2db..5410f6f 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -166,4 +166,30 @@
 }
 
 
+int ElementsKindToShiftSize(ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return 0;
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return 1;
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      return 2;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+      return 3;
+    case FAST_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      return kPointerSizeLog2;
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/lithium.h b/src/lithium.h
index 280da47..a933f72 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -28,6 +28,7 @@
 #ifndef V8_LITHIUM_H_
 #define V8_LITHIUM_H_
 
+#include "allocation.h"
 #include "hydrogen.h"
 #include "safepoint-table.h"
 
@@ -164,8 +165,7 @@
   }
   Policy policy() const { return PolicyField::decode(value_); }
   void set_policy(Policy policy) {
-    value_ &= ~PolicyField::mask();
-    value_ |= PolicyField::encode(policy);
+    value_ = PolicyField::update(value_, policy);
   }
   int fixed_index() const {
     return static_cast<int>(value_) >> kFixedIndexShift;
@@ -176,8 +176,7 @@
   }
 
   void set_virtual_register(unsigned id) {
-    value_ &= ~VirtualRegisterField::mask();
-    value_ |= VirtualRegisterField::encode(id);
+    value_ = VirtualRegisterField::update(value_, id);
   }
 
   LUnallocated* CopyUnconstrained() {
@@ -443,6 +442,7 @@
         translation_index_(-1),
         ast_id_(ast_id),
         parameter_count_(parameter_count),
+        pc_offset_(-1),
         values_(value_count),
         representations_(value_count),
         spilled_registers_(NULL),
@@ -456,6 +456,7 @@
   int translation_index() const { return translation_index_; }
   int ast_id() const { return ast_id_; }
   int parameter_count() const { return parameter_count_; }
+  int pc_offset() const { return pc_offset_; }
   LOperand** spilled_registers() const { return spilled_registers_; }
   LOperand** spilled_double_registers() const {
     return spilled_double_registers_;
@@ -472,10 +473,13 @@
     return representations_[index].IsTagged();
   }
 
-  void Register(int deoptimization_index, int translation_index) {
+  void Register(int deoptimization_index,
+                int translation_index,
+                int pc_offset) {
     ASSERT(!HasBeenRegistered());
     deoptimization_index_ = deoptimization_index;
     translation_index_ = translation_index;
+    pc_offset_ = pc_offset;
   }
   bool HasBeenRegistered() const {
     return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
@@ -496,6 +500,7 @@
   int translation_index_;
   int ast_id_;
   int parameter_count_;
+  int pc_offset_;
   ZoneList<LOperand*> values_;
   ZoneList<Representation> representations_;
 
@@ -518,34 +523,34 @@
       : env_(env),
         limit_(env != NULL ? env->values()->length() : 0),
         current_(0) {
-    current_ = AdvanceToNext(0);
+    SkipUninteresting();
   }
 
-  inline bool HasNext() {
-    return env_ != NULL && current_ < limit_;
-  }
+  bool Done() { return current_ >= limit_; }
 
-  inline LOperand* Next() {
-    ASSERT(HasNext());
+  LOperand* Current() {
+    ASSERT(!Done());
     return env_->values()->at(current_);
   }
 
-  inline void Advance() {
-    current_ = AdvanceToNext(current_ + 1);
+  void Advance() {
+    ASSERT(!Done());
+    ++current_;
+    SkipUninteresting();
   }
 
-  inline LEnvironment* env() { return env_; }
+  LEnvironment* env() { return env_; }
 
  private:
-  inline bool ShouldSkip(LOperand* op) {
+  bool ShouldSkip(LOperand* op) {
     return op == NULL || op->IsConstantOperand() || op->IsArgument();
   }
 
-  inline int AdvanceToNext(int start) {
-    while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
-      start++;
+  // Skip until something interesting, beginning with and including current_.
+  void SkipUninteresting() {
+    while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
+      ++current_;
     }
-    return start;
   }
 
   LEnvironment* env_;
@@ -558,36 +563,36 @@
 class DeepIterator BASE_EMBEDDED {
  public:
   explicit DeepIterator(LEnvironment* env)
-      : current_iterator_(env) { }
-
-  inline bool HasNext() {
-    if (current_iterator_.HasNext()) return true;
-    if (current_iterator_.env() == NULL) return false;
-    AdvanceToOuter();
-    return current_iterator_.HasNext();
+      : current_iterator_(env) {
+    SkipUninteresting();
   }
 
-  inline LOperand* Next() {
-    ASSERT(current_iterator_.HasNext());
-    return current_iterator_.Next();
+  bool Done() { return current_iterator_.Done(); }
+
+  LOperand* Current() {
+    ASSERT(!current_iterator_.Done());
+    return current_iterator_.Current();
   }
 
-  inline void Advance() {
-    if (current_iterator_.HasNext()) {
-      current_iterator_.Advance();
-    } else {
-      AdvanceToOuter();
-    }
+  void Advance() {
+    current_iterator_.Advance();
+    SkipUninteresting();
   }
 
  private:
-  inline void AdvanceToOuter() {
-    current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+  void SkipUninteresting() {
+    while (current_iterator_.env() != NULL && current_iterator_.Done()) {
+      current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+    }
   }
 
   ShallowIterator current_iterator_;
 };
 
+
+int ElementsKindToShiftSize(ElementsKind elements_kind);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 1466766..d44c2fc 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,8 +30,8 @@
 
 #include "liveedit.h"
 
-#include "compiler.h"
 #include "compilation-cache.h"
+#include "compiler.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "global-handles.h"
@@ -66,7 +66,7 @@
 class Differencer {
  public:
   explicit Differencer(Comparator::Input* input)
-      : input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
+      : input_(input), len1_(input->GetLength1()), len2_(input->GetLength2()) {
     buffer_ = NewArray<int>(len1_ * len2_);
   }
   ~Differencer() {
@@ -151,7 +151,7 @@
         if (cached_res == kEmptyCellValue) {
           Direction dir;
           int res;
-          if (input_->equals(pos1, pos2)) {
+          if (input_->Equals(pos1, pos2)) {
             res = CompareUpToTail(pos1 + 1, pos2 + 1);
             dir = EQ;
           } else {
@@ -268,17 +268,10 @@
 }
 
 
-static bool CompareSubstrings(Isolate* isolate, Handle<String> s1, int pos1,
+static bool CompareSubstrings(Handle<String> s1, int pos1,
                               Handle<String> s2, int pos2, int len) {
-  StringInputBuffer& buf1 = *isolate->liveedit_compare_substrings_buf1();
-  StringInputBuffer& buf2 = *isolate->liveedit_compare_substrings_buf2();
-  buf1.Reset(*s1);
-  buf1.Seek(pos1);
-  buf2.Reset(*s2);
-  buf2.Seek(pos2);
   for (int i = 0; i < len; i++) {
-    ASSERT(buf1.has_more() && buf2.has_more());
-    if (buf1.GetNext() != buf2.GetNext()) {
+    if (s1->Get(i + pos1) != s2->Get(i + pos2)) {
       return false;
     }
   }
@@ -286,6 +279,70 @@
 }
 
 
+// Additional to Input interface. Lets switch Input range to subrange.
+// More elegant way would be to wrap one Input as another Input object
+// and translate positions there, but that would cost us additional virtual
+// call per comparison.
+class SubrangableInput : public Comparator::Input {
+ public:
+  virtual void SetSubrange1(int offset, int len) = 0;
+  virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+class SubrangableOutput : public Comparator::Output {
+ public:
+  virtual void SetSubrange1(int offset, int len) = 0;
+  virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+static int min(int a, int b) {
+  return a < b ? a : b;
+}
+
+
+// Finds common prefix and suffix in input. This parts shouldn't take space in
+// linear programming table. Enable subranging in input and output.
+static void NarrowDownInput(SubrangableInput* input,
+    SubrangableOutput* output) {
+  const int len1 = input->GetLength1();
+  const int len2 = input->GetLength2();
+
+  int common_prefix_len;
+  int common_suffix_len;
+
+  {
+    common_prefix_len = 0;
+    int prefix_limit = min(len1, len2);
+    while (common_prefix_len < prefix_limit &&
+        input->Equals(common_prefix_len, common_prefix_len)) {
+      common_prefix_len++;
+    }
+
+    common_suffix_len = 0;
+    int suffix_limit = min(len1 - common_prefix_len, len2 - common_prefix_len);
+
+    while (common_suffix_len < suffix_limit &&
+        input->Equals(len1 - common_suffix_len - 1,
+        len2 - common_suffix_len - 1)) {
+      common_suffix_len++;
+    }
+  }
+
+  if (common_prefix_len > 0 || common_suffix_len > 0) {
+    int new_len1 = len1 - common_suffix_len - common_prefix_len;
+    int new_len2 = len2 - common_suffix_len - common_prefix_len;
+
+    input->SetSubrange1(common_prefix_len, new_len1);
+    input->SetSubrange2(common_prefix_len, new_len2);
+
+    output->SetSubrange1(common_prefix_len, new_len1);
+    output->SetSubrange2(common_prefix_len, new_len2);
+  }
+}
+
+
 // A helper class that writes chunk numbers into JSArray.
 // Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
 class CompareOutputArrayWriter {
@@ -326,13 +383,13 @@
       : s1_(s1), offset1_(offset1), len1_(len1),
         s2_(s2), offset2_(offset2), len2_(len2) {
   }
-  virtual int getLength1() {
+  virtual int GetLength1() {
     return len1_;
   }
-  virtual int getLength2() {
+  virtual int GetLength2() {
     return len2_;
   }
-  bool equals(int index1, int index2) {
+  bool Equals(int index1, int index2) {
     return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
   }
 
@@ -408,20 +465,26 @@
 
 
 // Represents 2 strings as 2 arrays of lines.
-class LineArrayCompareInput : public Comparator::Input {
+class LineArrayCompareInput : public SubrangableInput {
  public:
-  LineArrayCompareInput(Isolate* isolate, Handle<String> s1, Handle<String> s2,
+  LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
                         LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
-      : isolate_(isolate), s1_(s1), s2_(s2), line_ends1_(line_ends1),
-        line_ends2_(line_ends2) {
+      : s1_(s1), s2_(s2), line_ends1_(line_ends1),
+        line_ends2_(line_ends2),
+        subrange_offset1_(0), subrange_offset2_(0),
+        subrange_len1_(line_ends1_.length()),
+        subrange_len2_(line_ends2_.length()) {
   }
-  int getLength1() {
-    return line_ends1_.length();
+  int GetLength1() {
+    return subrange_len1_;
   }
-  int getLength2() {
-    return line_ends2_.length();
+  int GetLength2() {
+    return subrange_len2_;
   }
-  bool equals(int index1, int index2) {
+  bool Equals(int index1, int index2) {
+    index1 += subrange_offset1_;
+    index2 += subrange_offset2_;
+
     int line_start1 = line_ends1_.GetLineStart(index1);
     int line_start2 = line_ends2_.GetLineStart(index2);
     int line_end1 = line_ends1_.GetLineEnd(index1);
@@ -431,30 +494,45 @@
     if (len1 != len2) {
       return false;
     }
-    return CompareSubstrings(isolate_, s1_, line_start1, s2_, line_start2,
+    return CompareSubstrings(s1_, line_start1, s2_, line_start2,
                              len1);
   }
+  void SetSubrange1(int offset, int len) {
+    subrange_offset1_ = offset;
+    subrange_len1_ = len;
+  }
+  void SetSubrange2(int offset, int len) {
+    subrange_offset2_ = offset;
+    subrange_len2_ = len;
+  }
 
  private:
-  Isolate* isolate_;
   Handle<String> s1_;
   Handle<String> s2_;
   LineEndsWrapper line_ends1_;
   LineEndsWrapper line_ends2_;
+  int subrange_offset1_;
+  int subrange_offset2_;
+  int subrange_len1_;
+  int subrange_len2_;
 };
 
 
 // Stores compare result in JSArray. For each chunk tries to conduct
 // a fine-grained nested diff token-wise.
-class TokenizingLineArrayCompareOutput : public Comparator::Output {
+class TokenizingLineArrayCompareOutput : public SubrangableOutput {
  public:
   TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
                                    LineEndsWrapper line_ends2,
                                    Handle<String> s1, Handle<String> s2)
-      : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
+      : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
+        subrange_offset1_(0), subrange_offset2_(0) {
   }
 
   void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
+    line_pos1 += subrange_offset1_;
+    line_pos2 += subrange_offset2_;
+
     int char_pos1 = line_ends1_.GetLineStart(line_pos1);
     int char_pos2 = line_ends2_.GetLineStart(line_pos2);
     int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
@@ -474,6 +552,12 @@
       array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
     }
   }
+  void SetSubrange1(int offset, int len) {
+    subrange_offset1_ = offset;
+  }
+  void SetSubrange2(int offset, int len) {
+    subrange_offset2_ = offset;
+  }
 
   Handle<JSArray> GetResult() {
     return array_writer_.GetResult();
@@ -487,18 +571,24 @@
   LineEndsWrapper line_ends2_;
   Handle<String> s1_;
   Handle<String> s2_;
+  int subrange_offset1_;
+  int subrange_offset2_;
 };
 
 
 Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
                                          Handle<String> s2) {
+  s1 = FlattenGetString(s1);
+  s2 = FlattenGetString(s2);
+
   LineEndsWrapper line_ends1(s1);
   LineEndsWrapper line_ends2(s2);
 
-  LineArrayCompareInput
-      input(Isolate::Current(), s1, s2, line_ends1, line_ends2);
+  LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
   TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
 
+  NarrowDownInput(&input, &output);
+
   Comparator::CalculateDifference(&input, &output);
 
   return output.GetResult();
@@ -533,12 +623,12 @@
 
 // Wraps any object into a OpaqueReference, that will hide the object
 // from JavaScript.
-static Handle<JSValue> WrapInJSValue(Object* object) {
+static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
   Handle<JSFunction> constructor =
       Isolate::Current()->opaque_reference_function();
   Handle<JSValue> result =
       Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
-  result->set_value(object);
+  result->set_value(*object);
   return result;
 }
 
@@ -605,17 +695,17 @@
   }
   void SetFunctionCode(Handle<Code> function_code,
       Handle<Object> code_scope_info) {
-    Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
+    Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
     this->SetField(kCodeOffset_, code_wrapper);
 
-    Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
+    Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
     this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
   }
   void SetOuterScopeInfo(Handle<Object> scope_info_array) {
     this->SetField(kOuterScopeInfoOffset_, scope_info_array);
   }
   void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
-    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    Handle<JSValue> info_holder = WrapInJSValue(info);
     this->SetField(kSharedFunctionInfoOffset_, info_holder);
   }
   int GetParentIndex() {
@@ -672,7 +762,7 @@
                      Handle<SharedFunctionInfo> info) {
     HandleScope scope;
     this->SetField(kFunctionNameOffset_, name);
-    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    Handle<JSValue> info_holder = WrapInJSValue(info);
     this->SetField(kSharedInfoOffset_, info_holder);
     this->SetSmiValueField(kStartPositionOffset_, start_position);
     this->SetSmiValueField(kEndPositionOffset_, end_position);
@@ -770,8 +860,7 @@
       int j = 0;
       for (int i = 0; i < list.length(); i++) {
         Variable* var1 = list[i];
-        Slot* slot = var1->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+        if (var1->IsContextSlot()) {
           if (j != i) {
             list[j] = var1;
           }
@@ -783,7 +872,7 @@
       for (int k = 1; k < j; k++) {
         int l = k;
         for (int m = k + 1; m < j; m++) {
-          if (list[l]->AsSlot()->index() > list[m]->AsSlot()->index()) {
+          if (list[l]->index() > list[m]->index()) {
             l = m;
           }
         }
@@ -797,7 +886,7 @@
         SetElementNonStrict(
             scope_info_list,
             scope_info_length,
-            Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
+            Handle<Smi>(Smi::FromInt(list[i]->index())));
         scope_info_length++;
       }
       SetElementNonStrict(scope_info_list,
@@ -820,7 +909,7 @@
 JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
                                      Handle<String> source) {
   Isolate* isolate = Isolate::Current();
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
 
   FunctionInfoListener listener;
   Handle<Object> original_source = Handle<Object>(script->source());
@@ -914,7 +1003,7 @@
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
 
   ReferenceCollectorVisitor visitor(original);
 
@@ -1361,7 +1450,7 @@
                                StackFrame* bottom_frame) {
   Address* pointer_address =
       &Memory::Address_at(Isolate::Current()->get_address_from_id(
-          Isolate::k_handler_address));
+          Isolate::kHandlerAddress));
 
   while (*pointer_address < top_frame->sp()) {
     pointer_address = &Memory::Address_at(*pointer_address);
@@ -1411,6 +1500,9 @@
           Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+  } else if (pre_top_frame_code ==
+      isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
+    *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
       pre_top_frame_code->major_key()) {
     // Entry from our unit tests, it's fine, we support this case.
@@ -1461,8 +1553,9 @@
 // removing all listed function if possible and if do_drop is true.
 static const char* DropActivationsInActiveThread(
     Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
-  Debug* debug = Isolate::Current()->debug();
-  ZoneScope scope(DELETE_ON_EXIT);
+  Isolate* isolate = Isolate::Current();
+  Debug* debug = isolate->debug();
+  ZoneScope scope(isolate, DELETE_ON_EXIT);
   Vector<StackFrame*> frames = CreateStackMap();
 
   int array_len = Smi::cast(shared_info_array->length())->value();
@@ -1682,7 +1775,7 @@
 }
 
 
-bool LiveEditFunctionTracker::IsActive() {
+bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
   return false;
 }
 
diff --git a/src/liveedit.h b/src/liveedit.h
index 36c2c76..4ee4466 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -49,6 +49,7 @@
 // instantiate newly compiled functions.
 
 
+#include "allocation.h"
 #include "compiler.h"
 
 namespace v8 {
@@ -142,14 +143,13 @@
 // A general-purpose comparator between 2 arrays.
 class Comparator {
  public:
-
   // Holds 2 arrays of some elements allowing to compare any pair of
   // element from the first array and element from the second array.
   class Input {
    public:
-    virtual int getLength1() = 0;
-    virtual int getLength2() = 0;
-    virtual bool equals(int index1, int index2) = 0;
+    virtual int GetLength1() = 0;
+    virtual int GetLength2() = 0;
+    virtual bool Equals(int index1, int index2) = 0;
 
    protected:
     virtual ~Input() {}
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 5795a6b..957c051 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -36,11 +36,12 @@
 #include "global-handles.h"
 #include "heap.h"
 #include "inspector.h"
+#include "isolate.h"
 #include "list-inl.h"
 #include "liveobjectlist-inl.h"
 #include "string-stream.h"
-#include "top.h"
 #include "v8utils.h"
+#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
@@ -109,7 +110,7 @@
   \
   v(Context, "meta: Context") \
   v(ByteArray, "meta: ByteArray") \
-  v(PixelArray, "meta: PixelArray") \
+  v(ExternalPixelArray, "meta: PixelArray") \
   v(ExternalArray, "meta: ExternalArray") \
   v(FixedArray, "meta: FixedArray") \
   v(String, "String") \
@@ -118,7 +119,7 @@
   v(Code, "meta: Code") \
   v(Map, "meta: Map") \
   v(Oddball, "Oddball") \
-  v(Proxy, "meta: Proxy") \
+  v(Foreign, "meta: Foreign") \
   v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
   v(Struct, "meta: Struct") \
   \
@@ -183,7 +184,7 @@
 const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1);
 
 static AllocationSpace FindSpaceFor(String* space_str) {
-  SmartPointer<char> s =
+  SmartArrayPointer<char> s =
       space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
 
   const char* key_str = *s;
@@ -211,8 +212,9 @@
 
 
 static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
+  Heap* heap = ISOLATE->heap();
   if (space != LO_SPACE) {
-    return Heap::InSpace(heap_obj, space);
+    return heap->InSpace(heap_obj, space);
   }
 
   // This is an optimization to speed up the check for an object in the LO
@@ -224,17 +226,17 @@
   int first_space = static_cast<int>(FIRST_SPACE);
   int last_space = static_cast<int>(LO_SPACE);
   for (int sp = first_space; sp < last_space; sp++) {
-    if (Heap::InSpace(heap_obj, static_cast<AllocationSpace>(sp))) {
+    if (heap->InSpace(heap_obj, static_cast<AllocationSpace>(sp))) {
       return false;
     }
   }
-  SLOW_ASSERT(Heap::InSpace(heap_obj, LO_SPACE));
+  SLOW_ASSERT(heap->InSpace(heap_obj, LO_SPACE));
   return true;
 }
 
 
 static LiveObjectType FindTypeFor(String* type_str) {
-  SmartPointer<char> s =
+  SmartArrayPointer<char> s =
       type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
 
 #define CHECK_OBJECT_TYPE(type_, name) { \
@@ -285,7 +287,7 @@
 
 
 void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) {
-  Handle<String> type_sym = Factory::LookupAsciiSymbol("type");
+  Handle<String> type_sym = FACTORY->LookupAsciiSymbol("type");
   MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym);
   Object* type_obj;
   if (maybe_result->ToObject(&type_obj)) {
@@ -301,7 +303,7 @@
 
 
 void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) {
-  Handle<String> space_sym = Factory::LookupAsciiSymbol("space");
+  Handle<String> space_sym = FACTORY->LookupAsciiSymbol("space");
   MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym);
   Object* space_obj;
   if (maybe_result->ToObject(&space_obj)) {
@@ -317,7 +319,7 @@
 
 
 void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) {
-  Handle<String> prop_sym = Factory::LookupAsciiSymbol("prop");
+  Handle<String> prop_sym = FACTORY->LookupAsciiSymbol("prop");
   MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym);
   Object* prop_obj;
   if (maybe_result->ToObject(&prop_obj)) {
@@ -501,10 +503,10 @@
     // We'll only dump 80 of them after we compact them.
     const int kMaxCharToDump = 80;
     const int kMaxBufferSize = kMaxCharToDump * 2;
-    SmartPointer<char> str_sp = str->ToCString(DISALLOW_NULLS,
-                                               ROBUST_STRING_TRAVERSAL,
-                                               0,
-                                               kMaxBufferSize);
+    SmartArrayPointer<char> str_sp = str->ToCString(DISALLOW_NULLS,
+                                                    ROBUST_STRING_TRAVERSAL,
+                                                    0,
+                                                    kMaxBufferSize);
     char* str_cstr = *str_sp;
     int length = CompactString(str_cstr);
     OS::SNPrintF(buffer_v,
@@ -524,14 +526,14 @@
     }
 
     String* name = sinfo->DebugName();
-    SmartPointer<char> name_sp =
+    SmartArrayPointer<char> name_sp =
         name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
     char* name_cstr = *name_sp;
 
     HeapStringAllocator string_allocator;
     StringStream stream(&string_allocator);
     sinfo->SourceCodePrint(&stream, 50);
-    SmartPointer<const char> source_sp = stream.ToCString();
+    SmartArrayPointer<const char> source_sp = stream.ToCString();
     const char* source_cstr = *source_sp;
 
     OS::SNPrintF(buffer_v,
@@ -571,7 +573,9 @@
                          Handle<JSObject> detail,
                          Handle<String> desc,
                          Handle<Object> error) {
-  detail = Factory::NewJSObject(Top::object_function());
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  detail = factory->NewJSObject(isolate->object_function());
   if (detail->IsFailure()) {
     error = detail;
     return false;
@@ -586,7 +590,7 @@
     desc_str = buffer;
     size = obj->Size();
   }
-  desc = Factory::NewStringFromAscii(CStrVector(desc_str));
+  desc = factory->NewStringFromAscii(CStrVector(desc_str));
   if (desc->IsFailure()) {
     error = desc;
     return false;
@@ -663,10 +667,13 @@
     int index = 0;
     int count = 0;
 
+    Isolate* isolate = Isolate::Current();
+    Factory* factory = isolate->factory();
+
     // Prefetch some needed symbols.
-    Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
-    Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
-    Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
+    Handle<String> id_sym = factory->LookupAsciiSymbol("id");
+    Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
+    Handle<String> size_sym = factory->LookupAsciiSymbol("size");
 
     // Fill the array with the lol object details.
     Handle<JSObject> detail;
@@ -1089,7 +1096,9 @@
 
 // Captures a current snapshot of all objects in the heap.
 MaybeObject* LiveObjectList::Capture() {
-  HandleScope scope;
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  HandleScope scope(isolate);
 
   // Count the number of objects in the heap.
   int total_count = CountHeapObjects();
@@ -1139,11 +1148,11 @@
 #endif
   }
 
-  Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
-  Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
-  Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
+  Handle<String> id_sym = factory->LookupAsciiSymbol("id");
+  Handle<String> count_sym = factory->LookupAsciiSymbol("count");
+  Handle<String> size_sym = factory->LookupAsciiSymbol("size");
 
-  Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
+  Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
 
   { MaybeObject* maybe_result = result->SetProperty(*id_sym,
@@ -1259,7 +1268,10 @@
                                          int start,
                                          int dump_limit,
                                          LolFilter* filter) {
-  HandleScope scope;
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
+  HandleScope scope(isolate);
 
   // Calculate the number of entries of the dump.
   int count = -1;
@@ -1277,7 +1289,7 @@
   }
 
   // Allocate an array to hold the result.
-  Handle<FixedArray> elements_arr = Factory::NewFixedArray(dump_limit);
+  Handle<FixedArray> elements_arr = factory->NewFixedArray(dump_limit);
   if (elements_arr->IsFailure()) return Object::cast(*elements_arr);
 
   // Fill in the dump.
@@ -1292,11 +1304,11 @@
   MaybeObject* maybe_result;
 
   // Allocate the result body.
-  Handle<JSObject> body = Factory::NewJSObject(Top::object_function());
+  Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
   if (body->IsFailure()) return Object::cast(*body);
 
   // Set the updated body.count.
-  Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
+  Handle<String> count_sym = factory->LookupAsciiSymbol("count");
   maybe_result = body->SetProperty(*count_sym,
                                    Smi::FromInt(count),
                                    NONE,
@@ -1305,7 +1317,7 @@
 
   // Set the updated body.size if appropriate.
   if (size >= 0) {
-    Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
+    Handle<String> size_sym = factory->LookupAsciiSymbol("size");
     maybe_result = body->SetProperty(*size_sym,
                                      Smi::FromInt(size),
                                      NONE,
@@ -1314,7 +1326,7 @@
   }
 
   // Set body.first_index.
-  Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
+  Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
   maybe_result = body->SetProperty(*first_sym,
                                    Smi::FromInt(start),
                                    NONE,
@@ -1322,12 +1334,12 @@
   if (maybe_result->IsFailure()) return maybe_result;
 
   // Allocate the JSArray of the elements.
-  Handle<JSObject> elements = Factory::NewJSObject(Top::array_function());
+  Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
   if (elements->IsFailure()) return Object::cast(*elements);
   Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
 
   // Set body.elements.
-  Handle<String> elements_sym = Factory::LookupAsciiSymbol("elements");
+  Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
   maybe_result = body->SetProperty(*elements_sym,
                                    *elements,
                                    NONE,
@@ -1381,6 +1393,9 @@
   LiveObjectSummary summary(filter);
   writer->Write(&summary);
 
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
   // The result body will look like this:
   // body: {
   //   count: <total_count>,
@@ -1398,21 +1413,21 @@
   // }
 
   // Prefetch some needed symbols.
-  Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
-  Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
-  Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
-  Handle<String> summary_sym = Factory::LookupAsciiSymbol("summary");
+  Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
+  Handle<String> count_sym = factory->LookupAsciiSymbol("count");
+  Handle<String> size_sym = factory->LookupAsciiSymbol("size");
+  Handle<String> summary_sym = factory->LookupAsciiSymbol("summary");
 
   // Allocate the summary array.
   int entries_count = summary.GetNumberOfEntries();
   Handle<FixedArray> summary_arr =
-      Factory::NewFixedArray(entries_count);
+      factory->NewFixedArray(entries_count);
   if (summary_arr->IsFailure()) return Object::cast(*summary_arr);
 
   int idx = 0;
   for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) {
     // Allocate the summary record.
-    Handle<JSObject> detail = Factory::NewJSObject(Top::object_function());
+    Handle<JSObject> detail = factory->NewJSObject(isolate->object_function());
     if (detail->IsFailure()) return Object::cast(*detail);
 
     // Fill in the summary record.
@@ -1420,7 +1435,7 @@
     int count = summary.Count(type);
     if (count) {
       const char* desc_cstr = GetObjectTypeDesc(type);
-      Handle<String> desc = Factory::LookupAsciiSymbol(desc_cstr);
+      Handle<String> desc = factory->LookupAsciiSymbol(desc_cstr);
       int size = summary.Size(type);
 
       maybe_result = detail->SetProperty(*desc_sym,
@@ -1444,12 +1459,13 @@
   }
 
   // Wrap the summary fixed array in a JS array.
-  Handle<JSObject> summary_obj = Factory::NewJSObject(Top::array_function());
+  Handle<JSObject> summary_obj =
+    factory->NewJSObject(isolate->array_function());
   if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
   Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
 
   // Create the body object.
-  Handle<JSObject> body = Factory::NewJSObject(Top::object_function());
+  Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
   if (body->IsFailure()) return Object::cast(*body);
 
   // Fill out the body object.
@@ -1470,9 +1486,9 @@
   if (is_tracking_roots) {
     int found_root = summary.found_root();
     int found_weak_root = summary.found_weak_root();
-    Handle<String> root_sym = Factory::LookupAsciiSymbol("found_root");
+    Handle<String> root_sym = factory->LookupAsciiSymbol("found_root");
     Handle<String> weak_root_sym =
-        Factory::LookupAsciiSymbol("found_weak_root");
+        factory->LookupAsciiSymbol("found_weak_root");
     maybe_result = body->SetProperty(*root_sym,
                                      Smi::FromInt(found_root),
                                      NONE,
@@ -1499,7 +1515,10 @@
 // Note: only dumps the section starting at start_idx and only up to
 // dump_limit entries.
 MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
-  HandleScope scope;
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
+  HandleScope scope(isolate);
   MaybeObject* maybe_result;
 
   int total_count = LiveObjectList::list_count();
@@ -1519,13 +1538,13 @@
   }
 
   // Allocate an array to hold the result.
-  Handle<FixedArray> list = Factory::NewFixedArray(dump_count);
+  Handle<FixedArray> list = factory->NewFixedArray(dump_count);
   if (list->IsFailure()) return Object::cast(*list);
 
   // Prefetch some needed symbols.
-  Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
-  Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
-  Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
+  Handle<String> id_sym = factory->LookupAsciiSymbol("id");
+  Handle<String> count_sym = factory->LookupAsciiSymbol("count");
+  Handle<String> size_sym = factory->LookupAsciiSymbol("size");
 
   // Fill the array with the lol details.
   int idx = 0;
@@ -1543,7 +1562,8 @@
       int size;
       count = lol->GetTotalObjCountAndSize(&size);
 
-      Handle<JSObject> detail = Factory::NewJSObject(Top::object_function());
+      Handle<JSObject> detail =
+          factory->NewJSObject(isolate->object_function());
       if (detail->IsFailure()) return Object::cast(*detail);
 
       maybe_result = detail->SetProperty(*id_sym,
@@ -1568,10 +1588,10 @@
   }
 
   // Return the result as a JS array.
-  Handle<JSObject> lols = Factory::NewJSObject(Top::array_function());
+  Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
   Handle<JSArray>::cast(lols)->SetContent(*list);
 
-  Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
+  Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
 
   maybe_result = result->SetProperty(*count_sym,
@@ -1580,14 +1600,14 @@
                                      kNonStrictMode);
   if (maybe_result->IsFailure()) return maybe_result;
 
-  Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
+  Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
   maybe_result = result->SetProperty(*first_sym,
                                      Smi::FromInt(start_idx),
                                      NONE,
                                      kNonStrictMode);
   if (maybe_result->IsFailure()) return maybe_result;
 
-  Handle<String> lists_sym = Factory::LookupAsciiSymbol("lists");
+  Handle<String> lists_sym = factory->LookupAsciiSymbol("lists");
   maybe_result = result->SetProperty(*lists_sym,
                                      *lols,
                                      NONE,
@@ -1618,7 +1638,7 @@
   if (element != NULL) {
     return Object::cast(element->obj_);
   }
-  return Heap::undefined_value();
+  return HEAP->undefined_value();
 }
 
 
@@ -1636,11 +1656,14 @@
 
 // Gets the obj id for the specified address if valid.
 Object* LiveObjectList::GetObjId(Handle<String> address) {
-  SmartPointer<char> addr_str =
+  SmartArrayPointer<char> addr_str =
       address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
 
+  Isolate* isolate = Isolate::Current();
+
   // Extract the address value from the string.
-  int value = static_cast<int>(StringToInt(*address, 16));
+  int value =
+      static_cast<int>(StringToInt(isolate->unicode_cache(), *address, 16));
   Object* obj = reinterpret_cast<Object*>(value);
   return Smi::FromInt(GetObjId(obj));
 }
@@ -1649,7 +1672,6 @@
 // Helper class for copying HeapObjects.
 class LolVisitor: public ObjectVisitor {
  public:
-
   LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
       : target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
 
@@ -1761,10 +1783,13 @@
   Handle<String> desc;
   Handle<HeapObject> retainer;
 
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
   // Prefetch some needed symbols.
-  Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
-  Handle<String> desc_sym = Factory::LookupAsciiSymbol("desc");
-  Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
+  Handle<String> id_sym = factory->LookupAsciiSymbol("id");
+  Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
+  Handle<String> size_sym = factory->LookupAsciiSymbol("size");
 
   NoHandleAllocation ha;
   int count = 0;
@@ -1775,7 +1800,7 @@
 
   // Iterate roots.
   LolVisitor lol_visitor(*target, target);
-  Heap::IterateStrongRoots(&lol_visitor, VISIT_ALL);
+  isolate->heap()->IterateStrongRoots(&lol_visitor, VISIT_ALL);
   if (!AddRootRetainerIfFound(lol_visitor,
                               filter,
                               summary,
@@ -1795,7 +1820,7 @@
   }
 
   lol_visitor.reset();
-  Heap::IterateWeakRoots(&lol_visitor, VISIT_ALL);
+  isolate->heap()->IterateWeakRoots(&lol_visitor, VISIT_ALL);
   if (!AddRootRetainerIfFound(lol_visitor,
                               filter,
                               summary,
@@ -1904,11 +1929,15 @@
                                              int start,
                                              int dump_limit,
                                              Handle<JSObject> filter_obj) {
-  HandleScope scope;
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  Heap* heap = isolate->heap();
+
+  HandleScope scope(isolate);
 
   // Get the target object.
   HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id));
-  if (heap_obj == Heap::undefined_value()) {
+  if (heap_obj == heap->undefined_value()) {
     return heap_obj;
   }
 
@@ -1916,7 +1945,7 @@
 
   // Get the constructor function for context extension and arguments array.
   JSObject* arguments_boilerplate =
-      Top::context()->global_context()->arguments_boilerplate();
+      isolate->context()->global_context()->arguments_boilerplate();
   JSFunction* arguments_function =
       JSFunction::cast(arguments_boilerplate->map()->constructor());
 
@@ -1938,7 +1967,7 @@
 
     // Set body.id.
     Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj));
-    Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
+    Handle<String> id_sym = factory->LookupAsciiSymbol("id");
     maybe_result = body->SetProperty(*id_sym,
                                      Smi::FromInt(obj_id),
                                      NONE,
@@ -1953,13 +1982,17 @@
 Object* LiveObjectList::PrintObj(int obj_id) {
   Object* obj = GetObj(obj_id);
   if (!obj) {
-    return Heap::undefined_value();
+    return HEAP->undefined_value();
   }
 
   EmbeddedVector<char, 128> temp_filename;
   static int temp_count = 0;
   const char* path_prefix = ".";
 
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  Heap* heap = isolate->heap();
+
   if (FLAG_lol_workdir) {
     path_prefix = FLAG_lol_workdir;
   }
@@ -1988,13 +2021,13 @@
   if (resource->exists() && !resource->is_empty()) {
     ASSERT(resource->IsAscii());
     Handle<String> dump_string =
-        Factory::NewExternalStringFromAscii(resource);
-    ExternalStringTable::AddString(*dump_string);
+        factory->NewExternalStringFromAscii(resource);
+    heap->external_string_table()->AddString(*dump_string);
     return *dump_string;
   } else {
     delete resource;
   }
-  return Heap::undefined_value();
+  return HEAP->undefined_value();
 }
 
 
@@ -2082,6 +2115,10 @@
 
   FILE* f = OS::FOpen(temp_filename.start(), "w+");
 
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  Heap* heap = isolate->heap();
+
   // Save the previous verbosity.
   bool prev_verbosity = FLAG_use_verbose_printer;
   FLAG_use_verbose_printer = false;
@@ -2097,15 +2134,14 @@
       // Check for ObjectGroups that references this object.
       // TODO(mlam): refactor this to be more modular.
       {
-        List<ObjectGroup*>* groups = GlobalHandles::ObjectGroups();
+        List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
         for (int i = 0; i < groups->length(); i++) {
           ObjectGroup* group = groups->at(i);
           if (group == NULL) continue;
 
           bool found_group = false;
-          List<Object**>& objects = group->objects_;
-          for (int j = 0; j < objects.length(); j++) {
-            Object* object = *objects[j];
+          for (size_t j = 0; j < group->length_; j++) {
+            Object* object = *(group->objects_[j]);
             HeapObject* hobj = HeapObject::cast(object);
             if (obj2 == hobj) {
               found_group = true;
@@ -2118,8 +2154,8 @@
                    "obj %p is a member of object group %p {\n",
                    reinterpret_cast<void*>(obj2),
                    reinterpret_cast<void*>(group));
-            for (int j = 0; j < objects.length(); j++) {
-              Object* object = *objects[j];
+            for (size_t j = 0; j < group->length_; j++) {
+              Object* object = *(group->objects_[j]);
               if (!object->IsHeapObject()) continue;
 
               HeapObject* hobj = HeapObject::cast(object);
@@ -2144,12 +2180,12 @@
       }
 
       PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2));
-      Heap::IterateRoots(&tracer, VISIT_ONLY_STRONG);
+      heap->IterateRoots(&tracer, VISIT_ONLY_STRONG);
       found = tracer.found();
 
       if (!found) {
         PrintF(f, "  No paths found. Checking symbol tables ...\n");
-        SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+        SymbolTable* symbol_table = HEAP->raw_unchecked_symbol_table();
         tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table),
                              reinterpret_cast<Object**>(&symbol_table)+1);
         found = tracer.found();
@@ -2162,7 +2198,7 @@
       if (!found) {
         PrintF(f, "  No paths found. Checking weak roots ...\n");
         // Check weak refs next.
-        GlobalHandles::IterateWeakRoots(&tracer);
+        isolate->global_handles()->IterateWeakRoots(&tracer);
         found = tracer.found();
       }
 
@@ -2192,13 +2228,13 @@
   if (resource->exists() && !resource->is_empty()) {
     ASSERT(resource->IsAscii());
     Handle<String> path_string =
-        Factory::NewExternalStringFromAscii(resource);
-    ExternalStringTable::AddString(*path_string);
+        factory->NewExternalStringFromAscii(resource);
+    heap->external_string_table()->AddString(*path_string);
     return *path_string;
   } else {
     delete resource;
   }
-  return Heap::undefined_value();
+  return heap->undefined_value();
 }
 
 
@@ -2211,13 +2247,13 @@
   HeapObject* obj1 = NULL;
   if (obj_id1 != 0) {
     obj1 = HeapObject::cast(GetObj(obj_id1));
-    if (obj1 == Heap::undefined_value()) {
+    if (obj1 == HEAP->undefined_value()) {
       return obj1;
     }
   }
 
   HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2));
-  if (obj2 == Heap::undefined_value()) {
+  if (obj2 == HEAP->undefined_value()) {
     return obj2;
   }
 
@@ -2571,12 +2607,13 @@
 void LiveObjectList::VerifyNotInFromSpace() {
   OS::Print("VerifyNotInFromSpace() ...\n");
   LolIterator it(NULL, last());
+  Heap* heap = ISOLATE->heap();
   int i = 0;
   for (it.Init(); !it.Done(); it.Next()) {
     HeapObject* heap_obj = it.Obj();
-    if (Heap::InFromSpace(heap_obj)) {
+    if (heap->InFromSpace(heap_obj)) {
       OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
-                i++, heap_obj, Heap::new_space()->FromSpaceLow());
+                i++, heap_obj, heap->new_space()->FromSpaceLow());
     }
   }
 }
@@ -2586,4 +2623,3 @@
 } }  // namespace v8::internal
 
 #endif  // LIVE_OBJECT_LIST
-
diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h
index 23e418d..65470d7 100644
--- a/src/liveobjectlist.h
+++ b/src/liveobjectlist.h
@@ -114,7 +114,6 @@
   static Object* PrintObj(int obj_id);
 
  private:
-
   struct Element {
     int id_;
     HeapObject* obj_;
@@ -224,7 +223,6 @@
 // Helper class for updating the LiveObjectList HeapObject pointers.
 class UpdateLiveObjectListVisitor: public ObjectVisitor {
  public:
-
   void VisitPointer(Object** p) { UpdatePointer(p); }
 
   void VisitPointers(Object** start, Object** end) {
@@ -237,10 +235,10 @@
   // to live new space objects, and not actually keep them alive.
   void UpdatePointer(Object** p) {
     Object* object = *p;
-    if (!Heap::InNewSpace(object)) return;
+    if (!HEAP->InNewSpace(object)) return;
 
     HeapObject* heap_obj = HeapObject::cast(object);
-    ASSERT(Heap::InFromSpace(heap_obj));
+    ASSERT(HEAP->InFromSpace(heap_obj));
 
     // We use the first word (where the map pointer usually is) of a heap
     // object to record the forwarding pointer.  A forwarding pointer can
@@ -319,4 +317,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_LIVEOBJECTLIST_H_
-
diff --git a/src/log-inl.h b/src/log-inl.h
index 02238fe..8aebbc7 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -34,8 +34,6 @@
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
                                                   Script* script) {
   if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
@@ -51,8 +49,6 @@
   }
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 
 } }  // namespace v8::internal
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index a854ade..7bd7baa 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -33,101 +33,14 @@
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 
-LogDynamicBuffer::LogDynamicBuffer(
-    int block_size, int max_size, const char* seal, int seal_size)
-    : block_size_(block_size),
-      max_size_(max_size - (max_size % block_size_)),
-      seal_(seal),
-      seal_size_(seal_size),
-      blocks_(max_size_ / block_size_ + 1),
-      write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
-  ASSERT(BlocksCount() > 0);
-  AllocateBlock(0);
-  for (int i = 1; i < BlocksCount(); ++i) {
-    blocks_[i] = NULL;
-  }
-}
+const char* const Log::kLogToTemporaryFile = "&";
 
 
-LogDynamicBuffer::~LogDynamicBuffer() {
-  for (int i = 0; i < BlocksCount(); ++i) {
-    DeleteArray(blocks_[i]);
-  }
-}
-
-
-int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
-  if (buf_size == 0) return 0;
-  int read_pos = from_pos;
-  int block_read_index = BlockIndex(from_pos);
-  int block_read_pos = PosInBlock(from_pos);
-  int dest_buf_pos = 0;
-  // Read until dest_buf is filled, or write_pos_ encountered.
-  while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
-    const int read_size = Min(write_pos_ - read_pos,
-        Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
-    memcpy(dest_buf + dest_buf_pos,
-           blocks_[block_read_index] + block_read_pos, read_size);
-    block_read_pos += read_size;
-    dest_buf_pos += read_size;
-    read_pos += read_size;
-    if (block_read_pos == block_size_) {
-      block_read_pos = 0;
-      ++block_read_index;
-    }
-  }
-  return dest_buf_pos;
-}
-
-
-int LogDynamicBuffer::Seal() {
-  WriteInternal(seal_, seal_size_);
-  is_sealed_ = true;
-  return 0;
-}
-
-
-int LogDynamicBuffer::Write(const char* data, int data_size) {
-  if (is_sealed_) {
-    return 0;
-  }
-  if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
-    return WriteInternal(data, data_size);
-  } else {
-    return Seal();
-  }
-}
-
-
-int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
-  int data_pos = 0;
-  while (data_pos < data_size) {
-    const int write_size =
-        Min(data_size - data_pos, block_size_ - block_write_pos_);
-    memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
-           write_size);
-    block_write_pos_ += write_size;
-    data_pos += write_size;
-    if (block_write_pos_ == block_size_) {
-      block_write_pos_ = 0;
-      AllocateBlock(++block_index_);
-    }
-  }
-  write_pos_ += data_size;
-  return data_size;
-}
-
-// Must be the same message as in Logger::PauseProfiler.
-const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
-
 Log::Log(Logger* logger)
-  : write_to_file_(false),
-    is_stopped_(false),
+  : is_stopped_(false),
     output_handle_(NULL),
-    output_code_handle_(NULL),
-    output_buffer_(NULL),
+    ll_output_handle_(NULL),
     mutex_(NULL),
     message_buffer_(NULL),
     logger_(logger) {
@@ -142,7 +55,6 @@
 
 
 void Log::Initialize() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   mutex_ = OS::CreateMutex();
   message_buffer_ = NewArray<char>(kMessageBufferSize);
 
@@ -166,19 +78,17 @@
     FLAG_prof_auto = false;
   }
 
-  bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+  bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
       || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
-      || FLAG_log_regexp || FLAG_log_state_changes;
-
-  bool open_log_file = start_logging || FLAG_prof_lazy;
+      || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
 
   // If we're logging anything, we need to open the log file.
   if (open_log_file) {
     if (strcmp(FLAG_logfile, "-") == 0) {
       OpenStdout();
-    } else if (strcmp(FLAG_logfile, "*") == 0) {
-      OpenMemoryBuffer();
-    } else  {
+    } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
+      OpenTemporaryFile();
+    } else {
       if (strchr(FLAG_logfile, '%') != NULL ||
           !Isolate::Current()->IsDefaultIsolate()) {
         // If there's a '%' in the log file name we have to expand
@@ -215,63 +125,63 @@
             stream.Put(*p);
           }
         }
-        SmartPointer<const char> expanded = stream.ToCString();
+        SmartArrayPointer<const char> expanded = stream.ToCString();
         OpenFile(*expanded);
       } else {
         OpenFile(FLAG_logfile);
       }
     }
   }
-#endif
 }
 
 
 void Log::OpenStdout() {
   ASSERT(!IsEnabled());
   output_handle_ = stdout;
-  write_to_file_ = true;
 }
 
 
-static const char kCodeLogExt[] = ".code";
+void Log::OpenTemporaryFile() {
+  ASSERT(!IsEnabled());
+  output_handle_ = i::OS::OpenTemporaryFile();
+}
+
+
+// Extension added to V8 log file name to get the low-level log name.
+static const char kLowLevelLogExt[] = ".ll";
+
+// File buffer size of the low-level log. We don't use the default to
+// minimize the associated overhead.
+static const int kLowLevelLogBufferSize = 2 * MB;
 
 
 void Log::OpenFile(const char* name) {
   ASSERT(!IsEnabled());
   output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
-  write_to_file_ = true;
   if (FLAG_ll_prof) {
-    // Open a file for logging the contents of code objects so that
-    // they can be disassembled later.
-    size_t name_len = strlen(name);
-    ScopedVector<char> code_name(
-        static_cast<int>(name_len + sizeof(kCodeLogExt)));
-    memcpy(code_name.start(), name, name_len);
-    memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
-    output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
+    // Open the low-level log file.
+    size_t len = strlen(name);
+    ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
+    memcpy(ll_name.start(), name, len);
+    memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
+    ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+    setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
   }
 }
 
 
-void Log::OpenMemoryBuffer() {
-  ASSERT(!IsEnabled());
-  output_buffer_ = new LogDynamicBuffer(
-      kDynamicBufferBlockSize, kMaxDynamicBufferSize,
-      kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
-  write_to_file_ = false;
-}
-
-
-void Log::Close() {
-  if (write_to_file_) {
-    if (output_handle_ != NULL) fclose(output_handle_);
-    output_handle_ = NULL;
-    if (output_code_handle_ != NULL) fclose(output_code_handle_);
-    output_code_handle_ = NULL;
-  } else {
-    delete output_buffer_;
-    output_buffer_ = NULL;
+FILE* Log::Close() {
+  FILE* result = NULL;
+  if (output_handle_ != NULL) {
+    if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) {
+      fclose(output_handle_);
+    } else {
+      result = output_handle_;
+    }
   }
+  output_handle_ = NULL;
+  if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
+  ll_output_handle_ = NULL;
 
   DeleteArray(message_buffer_);
   message_buffer_ = NULL;
@@ -280,27 +190,7 @@
   mutex_ = NULL;
 
   is_stopped_ = false;
-}
-
-
-int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
-  if (write_to_file_) return 0;
-  ASSERT(output_buffer_ != NULL);
-  ASSERT(from_pos >= 0);
-  ASSERT(max_size >= 0);
-  int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
-  ASSERT(actual_size <= max_size);
-  if (actual_size == 0) return 0;
-
-  // Find previous log line boundary.
-  char* end_pos = dest_buf + actual_size - 1;
-  while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
-  actual_size = static_cast<int>(end_pos - dest_buf + 1);
-  // If the assertion below is hit, it means that there was no line end
-  // found --- something wrong has happened.
-  ASSERT(actual_size > 0);
-  ASSERT(actual_size <= max_size);
-  return actual_size;
+  return result;
 }
 
 
@@ -361,6 +251,7 @@
 
 
 void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+  if (str == NULL) return;
   AssertNoAllocation no_heap_allocation;  // Ensure string stay valid.
   int len = str->length();
   if (len > 0x1000)
@@ -408,9 +299,7 @@
 
 void LogMessageBuilder::WriteToLogFile() {
   ASSERT(pos_ <= Log::kMessageBufferSize);
-  const int written = log_->write_to_file_ ?
-      log_->WriteToFile(log_->message_buffer_, pos_) :
-      log_->WriteToMemory(log_->message_buffer_, pos_);
+  const int written = log_->WriteToFile(log_->message_buffer_, pos_);
   if (written != pos_) {
     log_->stop();
     log_->logger_->LogFailure();
@@ -418,6 +307,4 @@
 }
 
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 } }  // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
index 255c73c..d0cb828 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -28,72 +28,16 @@
 #ifndef V8_LOG_UTILS_H_
 #define V8_LOG_UTILS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 class Logger;
 
-// A memory buffer that increments its size as you write in it.  Size
-// is incremented with 'block_size' steps, never exceeding 'max_size'.
-// During growth, memory contents are never copied.  At the end of the
-// buffer an amount of memory specified in 'seal_size' is reserved.
-// When writing position reaches max_size - seal_size, buffer auto-seals
-// itself with 'seal' and allows no further writes. Data pointed by
-// 'seal' must be available during entire LogDynamicBuffer lifetime.
-//
-// An instance of this class is created dynamically by Log.
-class LogDynamicBuffer {
- public:
-  LogDynamicBuffer(
-      int block_size, int max_size, const char* seal, int seal_size);
-
-  ~LogDynamicBuffer();
-
-  // Reads contents of the buffer starting from 'from_pos'.  Upon
-  // return, 'dest_buf' is filled with the data. Actual amount of data
-  // filled is returned, it is <= 'buf_size'.
-  int Read(int from_pos, char* dest_buf, int buf_size);
-
-  // Writes 'data' to the buffer, making it larger if necessary.  If
-  // data is too big to fit in the buffer, it doesn't get written at
-  // all. In that case, buffer auto-seals itself and stops to accept
-  // any incoming writes. Returns amount of data written (it is either
-  // 'data_size', or 0, if 'data' is too big).
-  int Write(const char* data, int data_size);
-
- private:
-  void AllocateBlock(int index) {
-    blocks_[index] = NewArray<char>(block_size_);
-  }
-
-  int BlockIndex(int pos) const { return pos / block_size_; }
-
-  int BlocksCount() const { return BlockIndex(max_size_) + 1; }
-
-  int PosInBlock(int pos) const { return pos % block_size_; }
-
-  int Seal();
-
-  int WriteInternal(const char* data, int data_size);
-
-  const int block_size_;
-  const int max_size_;
-  const char* seal_;
-  const int seal_size_;
-  ScopedVector<char*> blocks_;
-  int write_pos_;
-  int block_index_;
-  int block_write_pos_;
-  bool is_sealed_;
-};
-
-
 // Functions and data for performing output of log messages.
 class Log {
  public:
-
   // Performs process-wide initialization.
   void Initialize();
 
@@ -101,18 +45,21 @@
   void stop() { is_stopped_ = true; }
 
   // Frees all resources acquired in Initialize and Open... functions.
-  void Close();
-
-  // See description in include/v8.h.
-  int GetLogLines(int from_pos, char* dest_buf, int max_size);
+  // When a temporary file is used for the log, returns its stream descriptor,
+  // leaving the file open.
+  FILE* Close();
 
   // Returns whether logging is enabled.
   bool IsEnabled() {
-    return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
+    return !is_stopped_ && output_handle_ != NULL;
   }
 
   // Size of buffer used for formatting log messages.
-  static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
+  static const int kMessageBufferSize = 2048;
+
+  // This mode is only used in tests, as temporary files are automatically
+  // deleted on close and thus can't be accessed afterwards.
+  static const char* const kLogToTemporaryFile;
 
  private:
   explicit Log(Logger* logger);
@@ -123,8 +70,8 @@
   // Opens file for logging.
   void OpenFile(const char* name);
 
-  // Opens memory buffer for logging.
-  void OpenMemoryBuffer();
+  // Opens a temporary file for logging.
+  void OpenTemporaryFile();
 
   // Implementation of writing to a log file.
   int WriteToFile(const char* msg, int length) {
@@ -136,37 +83,15 @@
     return length;
   }
 
-  // Implementation of writing to a memory buffer.
-  int WriteToMemory(const char* msg, int length) {
-    ASSERT(output_buffer_ != NULL);
-    return output_buffer_->Write(msg, length);
-  }
-
-  bool write_to_file_;
-
   // Whether logging is stopped (e.g. due to insufficient resources).
   bool is_stopped_;
 
-  // When logging is active, either output_handle_ or output_buffer_ is used
-  // to store a pointer to log destination. If logging was opened via OpenStdout
-  // or OpenFile, then output_handle_ is used. If logging was opened
-  // via OpenMemoryBuffer, then output_buffer_ is used.
-  // mutex_ should be acquired before using output_handle_ or output_buffer_.
+  // When logging is active output_handle_ is used to store a pointer to log
+  // destination.  mutex_ should be acquired before using output_handle_.
   FILE* output_handle_;
 
-  // Used when low-level profiling is active to save code object contents.
-  FILE* output_code_handle_;
-
-  LogDynamicBuffer* output_buffer_;
-
-  // Size of dynamic buffer block (and dynamic buffer initial size).
-  static const int kDynamicBufferBlockSize = 65536;
-
-  // Maximum size of dynamic buffer.
-  static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
-
-  // Message to "seal" dynamic buffer with.
-  static const char* const kDynamicBufferSeal;
+  // Used when low-level profiling is active.
+  FILE* ll_output_handle_;
 
   // mutex_ is a Mutex used for enforcing exclusive
   // access to the formatting buffer and the log file or log memory buffer.
@@ -216,14 +141,11 @@
   void WriteToLogFile();
 
  private:
-
   Log* log_;
   ScopedLock sl;
   int pos_;
 };
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 } }  // namespace v8::internal
 
 #endif  // V8_LOG_UTILS_H_
diff --git a/src/log.cc b/src/log.cc
index 3ce2072..3d66b5f 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,8 +43,6 @@
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 //
 // Sliding state window.  Updates counters to keep track of the last
 // window of kBufferSize states.  This is useful to track where we
@@ -122,6 +120,7 @@
   // Returns the next index in the cyclic buffer.
   int Succ(int index) { return (index + 1) % kBufferSize; }
 
+  Isolate* isolate_;
   // Cyclic buffer for communicating profiling samples
   // between the signal handler and the worker thread.
   static const int kBufferSize = 128;
@@ -148,10 +147,6 @@
 void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
   ASSERT(isolate->IsInitialized());
 
-  sample->tos = NULL;
-  sample->frames_count = 0;
-  sample->has_external_callback = false;
-
   // Avoid collecting traces while doing GC.
   if (sample->state == GC) return;
 
@@ -271,7 +266,8 @@
 // Profiler implementation.
 //
 Profiler::Profiler(Isolate* isolate)
-    : Thread(isolate, "v8:Profiler"),
+    : Thread("v8:Profiler"),
+      isolate_(isolate),
       head_(0),
       tail_(0),
       overflow_(false),
@@ -326,18 +322,196 @@
 void Profiler::Run() {
   TickSample sample;
   bool overflow = Remove(&sample);
-  i::Isolate* isolate = ISOLATE;
   while (running_) {
-    LOG(isolate, TickEvent(&sample, overflow));
+    LOG(isolate_, TickEvent(&sample, overflow));
     overflow = Remove(&sample);
   }
 }
 
 
+// Low-level profiling event structures.
+
+struct LowLevelCodeCreateStruct {
+  static const char kTag = 'C';
+
+  int32_t name_size;
+  Address code_address;
+  int32_t code_size;
+};
+
+
+struct LowLevelCodeMoveStruct {
+  static const char kTag = 'M';
+
+  Address from_address;
+  Address to_address;
+};
+
+
+struct LowLevelCodeDeleteStruct {
+  static const char kTag = 'D';
+
+  Address address;
+};
+
+
+struct LowLevelSnapshotPositionStruct {
+  static const char kTag = 'P';
+
+  Address address;
+  int32_t position;
+};
+
+
+static const char kCodeMovingGCTag = 'G';
+
+
 //
 // Logger class implementation.
 //
 
+class Logger::NameMap {
+ public:
+  NameMap() : impl_(&PointerEquals) {}
+
+  ~NameMap() {
+    for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+      DeleteArray(static_cast<const char*>(p->value));
+    }
+  }
+
+  void Insert(Address code_address, const char* name, int name_size) {
+    HashMap::Entry* entry = FindOrCreateEntry(code_address);
+    if (entry->value == NULL) {
+      entry->value = CopyName(name, name_size);
+    }
+  }
+
+  const char* Lookup(Address code_address) {
+    HashMap::Entry* entry = FindEntry(code_address);
+    return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+  }
+
+  void Remove(Address code_address) {
+    HashMap::Entry* entry = FindEntry(code_address);
+    if (entry != NULL) {
+      DeleteArray(static_cast<char*>(entry->value));
+      RemoveEntry(entry);
+    }
+  }
+
+  void Move(Address from, Address to) {
+    if (from == to) return;
+    HashMap::Entry* from_entry = FindEntry(from);
+    ASSERT(from_entry != NULL);
+    void* value = from_entry->value;
+    RemoveEntry(from_entry);
+    HashMap::Entry* to_entry = FindOrCreateEntry(to);
+    ASSERT(to_entry->value == NULL);
+    to_entry->value = value;
+  }
+
+ private:
+  static bool PointerEquals(void* lhs, void* rhs) {
+    return lhs == rhs;
+  }
+
+  static char* CopyName(const char* name, int name_size) {
+    char* result = NewArray<char>(name_size + 1);
+    for (int i = 0; i < name_size; ++i) {
+      char c = name[i];
+      if (c == '\0') c = ' ';
+      result[i] = c;
+    }
+    result[name_size] = '\0';
+    return result;
+  }
+
+  HashMap::Entry* FindOrCreateEntry(Address code_address) {
+    return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
+  }
+
+  HashMap::Entry* FindEntry(Address code_address) {
+    return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
+  }
+
+  void RemoveEntry(HashMap::Entry* entry) {
+    impl_.Remove(entry->key, entry->hash);
+  }
+
+  HashMap impl_;
+
+  DISALLOW_COPY_AND_ASSIGN(NameMap);
+};
+
+
+class Logger::NameBuffer {
+ public:
+  NameBuffer() { Reset(); }
+
+  void Reset() {
+    utf8_pos_ = 0;
+  }
+
+  void AppendString(String* str) {
+    if (str == NULL) return;
+    if (str->HasOnlyAsciiChars()) {
+      int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
+      String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
+      utf8_pos_ += utf8_length;
+      return;
+    }
+    int uc16_length = Min(str->length(), kUc16BufferSize);
+    String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
+    for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
+      uc16 c = uc16_buffer_[i];
+      if (c <= String::kMaxAsciiCharCodeU) {
+        utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
+      } else {
+        int char_length = unibrow::Utf8::Length(c);
+        if (utf8_pos_ + char_length > kUtf8BufferSize) break;
+        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
+        utf8_pos_ += char_length;
+      }
+    }
+  }
+
+  void AppendBytes(const char* bytes, int size) {
+    size = Min(size, kUtf8BufferSize - utf8_pos_);
+    memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
+    utf8_pos_ += size;
+  }
+
+  void AppendBytes(const char* bytes) {
+    AppendBytes(bytes, StrLength(bytes));
+  }
+
+  void AppendByte(char c) {
+    if (utf8_pos_ >= kUtf8BufferSize) return;
+    utf8_buffer_[utf8_pos_++] = c;
+  }
+
+  void AppendInt(int n) {
+    Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
+    int size = OS::SNPrintF(buffer, "%d", n);
+    if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+      utf8_pos_ += size;
+    }
+  }
+
+  const char* get() { return utf8_buffer_; }
+  int size() const { return utf8_pos_; }
+
+ private:
+  static const int kUtf8BufferSize = 512;
+  static const int kUc16BufferSize = 128;
+
+  int utf8_pos_;
+  char utf8_buffer_[kUtf8BufferSize];
+  uc16 uc16_buffer_[kUc16BufferSize];
+};
+
+
 Logger::Logger()
   : ticker_(NULL),
     profiler_(NULL),
@@ -345,8 +519,9 @@
     log_events_(NULL),
     logging_nesting_(0),
     cpu_profiler_nesting_(0),
-    heap_profiler_nesting_(0),
     log_(new Log(this)),
+    name_buffer_(new NameBuffer),
+    address_to_name_map_(NULL),
     is_initialized_(false),
     last_address_(NULL),
     prev_sp_(NULL),
@@ -355,10 +530,14 @@
     prev_code_(NULL) {
 }
 
+
 Logger::~Logger() {
+  delete address_to_name_map_;
+  delete name_buffer_;
   delete log_;
 }
 
+
 #define DECLARE_EVENT(ignore1, name) name,
 static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
   LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
@@ -373,71 +552,54 @@
   msg.WriteToLogFile();
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 
 void Logger::StringEvent(const char* name, const char* value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log) UncheckedStringEvent(name, value);
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::UncheckedStringEvent(const char* name, const char* value) {
   if (!log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,\"%s\"\n", name, value);
   msg.WriteToLogFile();
 }
-#endif
 
 
 void Logger::IntEvent(const char* name, int value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log) UncheckedIntEvent(name, value);
-#endif
 }
 
 
 void Logger::IntPtrTEvent(const char* name, intptr_t value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log) UncheckedIntPtrTEvent(name, value);
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::UncheckedIntEvent(const char* name, int value) {
   if (!log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%d\n", name, value);
   msg.WriteToLogFile();
 }
-#endif
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
   if (!log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
   msg.WriteToLogFile();
 }
-#endif
 
 
 void Logger::HandleEvent(const char* name, Object** location) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_handles) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
   msg.WriteToLogFile();
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 // ApiEvent is private so all the calls come from the Logger class.  It is the
 // caller's responsibility to ensure that log is enabled and that
 // FLAG_log_api is true.
@@ -450,14 +612,12 @@
   va_end(ap);
   msg.WriteToLogFile();
 }
-#endif
 
 
 void Logger::ApiNamedSecurityCheck(Object* key) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   if (key->IsString()) {
-    SmartPointer<char> str =
+    SmartArrayPointer<char> str =
         String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
     ApiEvent("api,check-security,\"%s\"\n", *str);
   } else if (key->IsUndefined()) {
@@ -465,14 +625,12 @@
   } else {
     ApiEvent("api,check-security,['no-name']\n");
   }
-#endif
 }
 
 
 void Logger::SharedLibraryEvent(const char* library_path,
                                 uintptr_t start,
                                 uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg(this);
   msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@@ -480,14 +638,12 @@
              start,
              end);
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::SharedLibraryEvent(const wchar_t* library_path,
                                 uintptr_t start,
                                 uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg(this);
   msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@@ -495,11 +651,9 @@
              start,
              end);
   msg.WriteToLogFile();
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
   // Prints "/" + re.source + "/" +
   //      (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
@@ -540,23 +694,19 @@
 
   msg.WriteToLogFile();
 }
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_regexp) return;
   LogMessageBuilder msg(this);
   msg.Append("regexp-compile,");
   LogRegExpSource(regexp);
   msg.Append(in_cache ? ",hit\n" : ",miss\n");
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_runtime) return;
   HandleScope scope;
   LogMessageBuilder msg(this);
@@ -597,82 +747,67 @@
   }
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::ApiIndexedSecurityCheck(uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   ApiEvent("api,check-security,%u\n", index);
-#endif
 }
 
 
 void Logger::ApiNamedPropertyAccess(const char* tag,
                                     JSObject* holder,
                                     Object* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   ASSERT(name->IsString());
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
-  SmartPointer<char> class_name =
+  SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  SmartPointer<char> property_name =
+  SmartArrayPointer<char> property_name =
       String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
-#endif
 }
 
 void Logger::ApiIndexedPropertyAccess(const char* tag,
                                       JSObject* holder,
                                       uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
-  SmartPointer<char> class_name =
+  SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
-#endif
 }
 
 void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = object->class_name();
-  SmartPointer<char> class_name =
+  SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
-#endif
 }
 
 
 void Logger::ApiEntryCall(const char* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   ApiEvent("api,%s\n", name);
-#endif
 }
 
 
 void Logger::NewEvent(const char* name, void* object, size_t size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg(this);
   msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
              static_cast<unsigned int>(size));
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::DeleteEvent(const char* name, void* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg(this);
   msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
   msg.WriteToLogFile();
-#endif
 }
 
 
@@ -685,7 +820,6 @@
   LOGGER->DeleteEvent(name, object);
 }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::CallbackEventInternal(const char* prefix, const char* name,
                                    Address entry_point) {
   if (!log_->IsEnabled() || !FLAG_log_code) return;
@@ -698,44 +832,49 @@
   msg.Append('\n');
   msg.WriteToLogFile();
 }
-#endif
 
 
 void Logger::CallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_code) return;
-  SmartPointer<char> str =
+  SmartArrayPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   CallbackEventInternal("", *str, entry_point);
-#endif
 }
 
 
 void Logger::GetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_code) return;
-  SmartPointer<char> str =
+  SmartArrayPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   CallbackEventInternal("get ", *str, entry_point);
-#endif
 }
 
 
 void Logger::SetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_code) return;
-  SmartPointer<char> str =
+  SmartArrayPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   CallbackEventInternal("set ", *str, entry_point);
-#endif
 }
 
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              const char* comment) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(comment);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -749,29 +888,41 @@
     msg.Append(*p);
   }
   msg.Append('"');
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (name != NULL) {
-    SmartPointer<char> str =
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    CodeCreateEvent(tag, code, *str);
-  } else {
-    CodeCreateEvent(tag, code, "");
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendString(name);
   }
-#endif
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
+  LogMessageBuilder msg(this);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
+  msg.AppendDetailed(name, false);
+  msg.Append('"');
+  msg.Append('\n');
+  msg.WriteToLogFile();
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 // ComputeMarker must only be used when SharedFunctionInfo is known.
 static const char* ComputeMarker(Code* code) {
   switch (code->kind()) {
@@ -780,21 +931,33 @@
     default: return "";
   }
 }
-#endif
 
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              SharedFunctionInfo* shared,
                              String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(ComputeMarker(code));
+    name_buffer_->AppendString(name);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   if (code == Isolate::Current()->builtins()->builtin(
       Builtins::kLazyCompile))
     return;
 
   LogMessageBuilder msg(this);
-  SmartPointer<char> str =
+  SmartArrayPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -803,10 +966,8 @@
   msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
@@ -817,12 +978,29 @@
                              Code* code,
                              SharedFunctionInfo* shared,
                              String* source, int line) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(ComputeMarker(code));
+    name_buffer_->AppendString(shared->DebugName());
+    name_buffer_->AppendByte(' ');
+    name_buffer_->AppendString(source);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendInt(line);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
-  SmartPointer<char> name =
+  SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  SmartPointer<char> sourcestr =
+  SmartArrayPointer<char> sourcestr =
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -835,43 +1013,59 @@
              line);
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendInt(args_count);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
              kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::CodeMovingGCEvent() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
-  LogMessageBuilder msg(this);
-  msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
-  msg.WriteToLogFile();
+  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
+  LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
   OS::SignalCodeMovingGC();
-#endif
 }
 
 
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendString(source);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -880,48 +1074,61 @@
   msg.Append(",%d,\"", code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append('\"');
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::CodeMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    address_to_name_map_->Move(from, to);
+  }
   MoveEventInternal(CODE_MOVE_EVENT, from, to);
-#endif
 }
 
 
 void Logger::CodeDeleteEvent(Address from) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    address_to_name_map_->Remove(from);
+  }
   DeleteEventInternal(CODE_DELETE_EVENT, from);
-#endif
 }
 
 
 void Logger::SnapshotPositionEvent(Address addr, int pos) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_snapshot_positions) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    const char* code_name = address_to_name_map_->Lookup(addr);
+    if (code_name == NULL) return;  // Not a code object.
+    LogMessageBuilder msg(this);
+    msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+    for (const char* p = code_name; *p != '\0'; ++p) {
+      if (*p == '"') msg.Append('\\');
+      msg.Append(*p);
+    }
+    msg.Append("\"\n");
+    msg.WriteToLogFile();
+  }
+  if (!FLAG_log_snapshot_positions) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
   msg.AppendAddress(addr);
   msg.Append(",%d", pos);
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::MoveEventInternal(LogEventsAndTags event,
                                Address from,
                                Address to) {
@@ -934,10 +1141,8 @@
   msg.Append('\n');
   msg.WriteToLogFile();
 }
-#endif
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
   if (!log_->IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg(this);
@@ -946,11 +1151,9 @@
   msg.Append('\n');
   msg.WriteToLogFile();
 }
-#endif
 
 
 void Logger::ResourceEvent(const char* name, const char* tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,", name, tag);
@@ -963,12 +1166,10 @@
 
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::SuspectReadEvent(String* name, Object* obj) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_suspect) return;
   LogMessageBuilder msg(this);
   String* class_name = obj->IsJSObject()
@@ -982,12 +1183,10 @@
   msg.Append('"');
   msg.Append('\n');
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg(this);
   // Using non-relative system time in order to be able to synchronize with
@@ -995,121 +1194,34 @@
   msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
              space, kind, OS::TimeCurrentMillis());
   msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleStats(const char* space, const char* kind,
-                             intptr_t capacity, intptr_t used) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_gc) return;
-  LogMessageBuilder msg(this);
-  msg.Append("heap-sample-stats,\"%s\",\"%s\","
-                 "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
-             space, kind, capacity, used);
-  msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg(this);
   msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg(this);
   msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
   msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleJSConstructorEvent(const char* constructor,
-                                          int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_gc) return;
-  LogMessageBuilder msg(this);
-  msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
-  msg.WriteToLogFile();
-#endif
-}
-
-// Event starts with comma, so we don't have it in the format string.
-static const char kEventText[] = "heap-js-ret-item,%s";
-// We take placeholder strings into account, but it's OK to be conservative.
-static const int kEventTextLen = sizeof(kEventText)/sizeof(kEventText[0]);
-
-void Logger::HeapSampleJSRetainersEvent(
-    const char* constructor, const char* event) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_gc) return;
-  const int cons_len = StrLength(constructor);
-  const int event_len = StrLength(event);
-  int pos = 0;
-  // Retainer lists can be long. We may need to split them into multiple events.
-  do {
-    LogMessageBuilder msg(this);
-    msg.Append(kEventText, constructor);
-    int to_write = event_len - pos;
-    if (to_write > Log::kMessageBufferSize - (cons_len + kEventTextLen)) {
-      int cut_pos = pos + Log::kMessageBufferSize - (cons_len + kEventTextLen);
-      ASSERT(cut_pos < event_len);
-      while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
-      if (event[cut_pos] != ',') {
-        // Crash in debug mode, skip in release mode.
-        ASSERT(false);
-        return;
-      }
-      // Append a piece of event that fits, without trailing comma.
-      msg.AppendStringPart(event + pos, cut_pos - pos);
-      // Start next piece with comma.
-      pos = cut_pos;
-    } else {
-      msg.Append("%s", event + pos);
-      pos += event_len;
-    }
-    msg.Append('\n');
-    msg.WriteToLogFile();
-  } while (pos < event_len);
-#endif
-}
-
-
-void Logger::HeapSampleJSProducerEvent(const char* constructor,
-                                       Address* stack) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_gc) return;
-  LogMessageBuilder msg(this);
-  msg.Append("heap-js-prod-item,%s", constructor);
-  while (*stack != NULL) {
-    msg.Append(",0x%" V8PRIxPTR, *stack++);
-  }
-  msg.Append("\n");
-  msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::DebugTag(const char* call_site_tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg(this);
   msg.Append("debug-tag,%s\n", call_site_tag);
   msg.WriteToLogFile();
-#endif
 }
 
 
 void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (!log_->IsEnabled() || !FLAG_log) return;
   StringBuilder s(parameter.length() + 1);
   for (int i = 0; i < parameter.length(); ++i) {
@@ -1123,11 +1235,9 @@
              parameter_string);
   DeleteArray(parameter_string);
   msg.WriteToLogFile();
-#endif
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!log_->IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg(this);
@@ -1155,21 +1265,14 @@
 }
 
 
-int Logger::GetActiveProfilerModules() {
-  int result = PROFILER_MODULE_NONE;
-  if (profiler_ != NULL && !profiler_->paused()) {
-    result |= PROFILER_MODULE_CPU;
-  }
-  if (FLAG_log_gc) {
-    result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
-  }
-  return result;
+bool Logger::IsProfilerPaused() {
+  return profiler_ == NULL || profiler_->paused();
 }
 
 
-void Logger::PauseProfiler(int flags, int tag) {
+void Logger::PauseProfiler() {
   if (!log_->IsEnabled()) return;
-  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+  if (profiler_ != NULL) {
     // It is OK to have negative nesting.
     if (--cpu_profiler_nesting_ == 0) {
       profiler_->pause();
@@ -1178,31 +1281,17 @@
           ticker_->Stop();
         }
         FLAG_log_code = false;
-        // Must be the same message as Log::kDynamicBufferSeal.
         LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
       }
       --logging_nesting_;
     }
   }
-  if (flags &
-      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
-    if (--heap_profiler_nesting_ == 0) {
-      FLAG_log_gc = false;
-      --logging_nesting_;
-    }
-  }
-  if (tag != 0) {
-    UncheckedIntEvent("close-tag", tag);
-  }
 }
 
 
-void Logger::ResumeProfiler(int flags, int tag) {
+void Logger::ResumeProfiler() {
   if (!log_->IsEnabled()) return;
-  if (tag != 0) {
-    UncheckedIntEvent("open-tag", tag);
-  }
-  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+  if (profiler_ != NULL) {
     if (cpu_profiler_nesting_++ == 0) {
       ++logging_nesting_;
       if (FLAG_prof_lazy) {
@@ -1218,20 +1307,13 @@
       profiler_->resume();
     }
   }
-  if (flags &
-      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
-    if (heap_profiler_nesting_++ == 0) {
-      ++logging_nesting_;
-      FLAG_log_gc = true;
-    }
-  }
 }
 
 
 // This function can be called when Log's mutex is acquired,
 // either from main or Profiler's thread.
 void Logger::LogFailure() {
-  PauseProfiler(PROFILER_MODULE_CPU, 0);
+  PauseProfiler();
 }
 
 
@@ -1240,11 +1322,6 @@
 }
 
 
-int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
-  return log_->GetLogLines(from_pos, dest_buf, max_size);
-}
-
-
 class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
  public:
   EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
@@ -1256,8 +1333,12 @@
   virtual void LeaveContext(Context* context) {}
 
   virtual void VisitFunction(JSFunction* function) {
+    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
+    Object* maybe_script = sfi->script();
+    if (maybe_script->IsScript()
+        && !Script::cast(maybe_script)->HasValidSource()) return;
     if (sfis_ != NULL) {
-      sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
+      sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
     }
     if (code_objects_ != NULL) {
       ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
@@ -1308,7 +1389,7 @@
 
 
 void Logger::LogCodeObject(Object* object) {
-  if (FLAG_log_code) {
+  if (FLAG_log_code || FLAG_ll_prof) {
     Code* code_object = Code::cast(object);
     LogEventsAndTags tag = Logger::STUB_TAG;
     const char* description = "Unknown code from the snapshot";
@@ -1316,8 +1397,10 @@
       case Code::FUNCTION:
       case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
+      case Code::UNARY_OP_IC:   // fall through
+      case Code::BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
+      case Code::TO_BOOLEAN_IC:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
@@ -1333,10 +1416,6 @@
         description = "A keyed load IC from the snapshot";
         tag = Logger::KEYED_LOAD_IC_TAG;
         break;
-      case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
-        description = "A keyed external array load IC from the snapshot";
-        tag = Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG;
-        break;
       case Code::LOAD_IC:
         description = "A load IC from the snapshot";
         tag = Logger::LOAD_IC_TAG;
@@ -1349,10 +1428,6 @@
         description = "A keyed store IC from the snapshot";
         tag = Logger::KEYED_STORE_IC_TAG;
         break;
-      case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
-        description = "A keyed external array store IC from the snapshot";
-        tag = Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG;
-        break;
       case Code::CALL_IC:
         description = "A call IC from the snapshot";
         tag = Logger::CALL_IC_TAG;
@@ -1368,8 +1443,7 @@
 
 
 void Logger::LogCodeInfo() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
 #if V8_TARGET_ARCH_IA32
   const char arch[] = "ia32";
 #elif V8_TARGET_ARCH_X64
@@ -1379,21 +1453,68 @@
 #else
   const char arch[] = "unknown";
 #endif
-  LogMessageBuilder msg(this);
-  msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
-  msg.WriteToLogFile();
-#endif  // ENABLE_LOGGING_AND_PROFILING
+  LowLevelLogWriteBytes(arch, sizeof(arch));
 }
 
 
-void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
-  if (!FLAG_ll_prof || log_->output_code_handle_ == NULL) return;
-  int pos = static_cast<int>(ftell(log_->output_code_handle_));
-  size_t rv = fwrite(code->instruction_start(), 1, code->instruction_size(),
-                     log_->output_code_handle_);
-  ASSERT(static_cast<size_t>(code->instruction_size()) == rv);
+void Logger::RegisterSnapshotCodeName(Code* code,
+                                      const char* name,
+                                      int name_size) {
+  ASSERT(Serializer::enabled());
+  if (address_to_name_map_ == NULL) {
+    address_to_name_map_ = new NameMap;
+  }
+  address_to_name_map_->Insert(code->address(), name, name_size);
+}
+
+
+void Logger::LowLevelCodeCreateEvent(Code* code,
+                                     const char* name,
+                                     int name_size) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeCreateStruct event;
+  event.name_size = name_size;
+  event.code_address = code->instruction_start();
+  ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+  event.code_size = code->instruction_size();
+  LowLevelLogWriteStruct(event);
+  LowLevelLogWriteBytes(name, name_size);
+  LowLevelLogWriteBytes(
+      reinterpret_cast<const char*>(code->instruction_start()),
+      code->instruction_size());
+}
+
+
+void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeMoveStruct event;
+  event.from_address = from + Code::kHeaderSize;
+  event.to_address = to + Code::kHeaderSize;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelCodeDeleteEvent(Address from) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeDeleteStruct event;
+  event.address = from + Code::kHeaderSize;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelSnapshotPositionStruct event;
+  event.address = addr + Code::kHeaderSize;
+  event.position = pos;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
+  size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
+  ASSERT(static_cast<size_t>(size) == rv);
   USE(rv);
-  msg->Append(",%d", pos);
 }
 
 
@@ -1406,6 +1527,51 @@
 }
 
 
+void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
+                                 Handle<Code> code) {
+  Handle<String> func_name(shared->DebugName());
+  if (shared->script()->IsScript()) {
+    Handle<Script> script(Script::cast(shared->script()));
+    if (script->name()->IsString()) {
+      Handle<String> script_name(String::cast(script->name()));
+      int line_num = GetScriptLineNumber(script, shared->start_position());
+      if (line_num > 0) {
+        PROFILE(ISOLATE,
+                CodeCreateEvent(
+                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+                    *code, *shared,
+                    *script_name, line_num + 1));
+      } else {
+        // Can't distinguish eval and script here, so always use Script.
+        PROFILE(ISOLATE,
+                CodeCreateEvent(
+                    Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+                    *code, *shared, *script_name));
+      }
+    } else {
+      PROFILE(ISOLATE,
+              CodeCreateEvent(
+                  Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+                  *code, *shared, *func_name));
+    }
+  } else if (shared->IsApiFunction()) {
+    // API function.
+    FunctionTemplateInfo* fun_data = shared->get_api_func_data();
+    Object* raw_call_data = fun_data->call_code();
+    if (!raw_call_data->IsUndefined()) {
+      CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+      Object* callback_obj = call_data->callback();
+      Address entry_point = v8::ToCData<Address>(callback_obj);
+      PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
+    }
+  } else {
+    PROFILE(ISOLATE,
+            CodeCreateEvent(
+                Logger::LAZY_COMPILE_TAG, *code, *shared, *func_name));
+  }
+}
+
+
 void Logger::LogCompiledFunctions() {
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
@@ -1419,48 +1585,7 @@
     if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
         Builtins::kLazyCompile))
       continue;
-    Handle<SharedFunctionInfo> shared = sfis[i];
-    Handle<String> func_name(shared->DebugName());
-    if (shared->script()->IsScript()) {
-      Handle<Script> script(Script::cast(shared->script()));
-      if (script->name()->IsString()) {
-        Handle<String> script_name(String::cast(script->name()));
-        int line_num = GetScriptLineNumber(script, shared->start_position());
-        if (line_num > 0) {
-          PROFILE(ISOLATE,
-                  CodeCreateEvent(
-                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-                    *code_objects[i], *shared,
-                    *script_name, line_num + 1));
-        } else {
-          // Can't distinguish eval and script here, so always use Script.
-          PROFILE(ISOLATE,
-                  CodeCreateEvent(
-                      Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-                      *code_objects[i], *shared, *script_name));
-        }
-      } else {
-        PROFILE(ISOLATE,
-                CodeCreateEvent(
-                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-                    *code_objects[i], *shared, *func_name));
-      }
-    } else if (shared->IsApiFunction()) {
-      // API function.
-      FunctionTemplateInfo* fun_data = shared->get_api_func_data();
-      Object* raw_call_data = fun_data->call_code();
-      if (!raw_call_data->IsUndefined()) {
-        CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
-        Object* callback_obj = call_data->callback();
-        Address entry_point = v8::ToCData<Address>(callback_obj);
-        PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
-      }
-    } else {
-      PROFILE(ISOLATE,
-              CodeCreateEvent(
-                  Logger::LAZY_COMPILE_TAG, *code_objects[i],
-                  *shared, *func_name));
-    }
+    LogExistingFunction(sfis[i], code_objects[i]);
   }
 }
 
@@ -1485,18 +1610,14 @@
   }
 }
 
-#endif
-
 
 bool Logger::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
   if (is_initialized_) return true;
   is_initialized_ = true;
 
   // --ll-prof implies --log-code and --log-snapshot-positions.
   if (FLAG_ll_prof) {
-    FLAG_log_code = true;
     FLAG_log_snapshot_positions = true;
   }
 
@@ -1514,16 +1635,16 @@
 
   if (FLAG_ll_prof) LogCodeInfo();
 
-  ticker_ = new Ticker(Isolate::Current(), kSamplingIntervalMs);
-
   Isolate* isolate = Isolate::Current();
+  ticker_ = new Ticker(isolate, kSamplingIntervalMs);
+
   if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
     sliding_state_window_ = new SlidingStateWindow(isolate);
   }
 
   bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
     || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
-    || FLAG_log_regexp || FLAG_log_state_changes;
+    || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
 
   if (start_logging) {
     logging_nesting_ = 1;
@@ -1542,10 +1663,6 @@
   }
 
   return true;
-
-#else
-  return false;
-#endif
 }
 
 
@@ -1555,23 +1672,18 @@
 
 
 void Logger::EnsureTickerStarted() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   ASSERT(ticker_ != NULL);
   if (!ticker_->IsActive()) ticker_->Start();
-#endif
 }
 
 
 void Logger::EnsureTickerStopped() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-#endif
 }
 
 
-void Logger::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!is_initialized_) return;
+FILE* Logger::TearDown() {
+  if (!is_initialized_) return NULL;
   is_initialized_ = false;
 
   // Stop the profiler before closing the file.
@@ -1587,13 +1699,11 @@
   delete ticker_;
   ticker_ = NULL;
 
-  log_->Close();
-#endif
+  return log_->Close();
 }
 
 
 void Logger::EnableSlidingStateWindow() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // If the ticker is NULL, Logger::Setup has not been called yet.  In
   // that case, we set the sliding_state_window flag so that the
   // sliding window computation will be started when Logger::Setup is
@@ -1607,7 +1717,6 @@
   if (sliding_state_window_ == NULL) {
     sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
   }
-#endif
 }
 
 
diff --git a/src/log.h b/src/log.h
index 1fa86d2..50358ce 100644
--- a/src/log.h
+++ b/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_LOG_H_
 #define V8_LOG_H_
 
+#include "allocation.h"
 #include "platform.h"
 #include "log-utils.h"
 
@@ -69,14 +70,14 @@
 // tick profiler requires code events, so --prof implies --log-code.
 
 // Forward declarations.
-class Ticker;
+class HashMap;
+class LogMessageBuilder;
 class Profiler;
 class Semaphore;
 class SlidingStateWindow;
-class LogMessageBuilder;
+class Ticker;
 
 #undef LOG
-#ifdef ENABLE_LOGGING_AND_PROFILING
 #define LOG(isolate, Call)                          \
   do {                                              \
     v8::internal::Logger* logger =                  \
@@ -84,52 +85,52 @@
     if (logger->is_logging())                       \
       logger->Call;                                 \
   } while (false)
-#else
-#define LOG(isolate, Call) ((void) 0)
-#endif
 
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
-  V(CODE_CREATION_EVENT,            "code-creation")            \
-  V(CODE_MOVE_EVENT,                "code-move")                \
-  V(CODE_DELETE_EVENT,              "code-delete")              \
-  V(CODE_MOVING_GC,                 "code-moving-gc")           \
-  V(SHARED_FUNC_MOVE_EVENT,         "sfi-move")                 \
-  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")             \
-  V(TICK_EVENT,                     "tick")                     \
-  V(REPEAT_META_EVENT,              "repeat")                   \
-  V(BUILTIN_TAG,                    "Builtin")                  \
-  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")           \
-  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")   \
-  V(CALL_IC_TAG,                    "CallIC")                   \
-  V(CALL_INITIALIZE_TAG,            "CallInitialize")           \
-  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")          \
-  V(CALL_MISS_TAG,                  "CallMiss")                 \
-  V(CALL_NORMAL_TAG,                "CallNormal")               \
-  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")       \
-  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")      \
-  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                       \
-    "KeyedCallDebugPrepareStepIn")                              \
-  V(KEYED_CALL_IC_TAG,              "KeyedCallIC")              \
-  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")      \
-  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")     \
-  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")            \
-  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")          \
-  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")  \
-  V(CALLBACK_TAG,                   "Callback")                 \
-  V(EVAL_TAG,                       "Eval")                     \
-  V(FUNCTION_TAG,                   "Function")                 \
-  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")              \
-  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
-  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")             \
-  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")\
-  V(LAZY_COMPILE_TAG,               "LazyCompile")              \
-  V(LOAD_IC_TAG,                    "LoadIC")                   \
-  V(REG_EXP_TAG,                    "RegExp")                   \
-  V(SCRIPT_TAG,                     "Script")                   \
-  V(STORE_IC_TAG,                   "StoreIC")                  \
-  V(STUB_TAG,                       "Stub")                     \
-  V(NATIVE_FUNCTION_TAG,            "Function")                 \
-  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")              \
+#define LOG_EVENTS_AND_TAGS_LIST(V)                                     \
+  V(CODE_CREATION_EVENT,            "code-creation")                    \
+  V(CODE_MOVE_EVENT,                "code-move")                        \
+  V(CODE_DELETE_EVENT,              "code-delete")                      \
+  V(CODE_MOVING_GC,                 "code-moving-gc")                   \
+  V(SHARED_FUNC_MOVE_EVENT,         "sfi-move")                         \
+  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")                     \
+  V(SNAPSHOT_CODE_NAME_EVENT,       "snapshot-code-name")               \
+  V(TICK_EVENT,                     "tick")                             \
+  V(REPEAT_META_EVENT,              "repeat")                           \
+  V(BUILTIN_TAG,                    "Builtin")                          \
+  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")                   \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")           \
+  V(CALL_IC_TAG,                    "CallIC")                           \
+  V(CALL_INITIALIZE_TAG,            "CallInitialize")                   \
+  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")                  \
+  V(CALL_MISS_TAG,                  "CallMiss")                         \
+  V(CALL_NORMAL_TAG,                "CallNormal")                       \
+  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")               \
+  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")              \
+  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                               \
+    "KeyedCallDebugPrepareStepIn")                                      \
+  V(KEYED_CALL_IC_TAG,              "KeyedCallIC")                      \
+  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")              \
+  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")             \
+  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")                    \
+  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")                  \
+  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")          \
+  V(CALLBACK_TAG,                   "Callback")                         \
+  V(EVAL_TAG,                       "Eval")                             \
+  V(FUNCTION_TAG,                   "Function")                         \
+  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")                      \
+  V(KEYED_LOAD_MEGAMORPHIC_IC_TAG,  "KeyedLoadMegamorphicIC")           \
+  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")       \
+  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")                     \
+  V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC")          \
+  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")     \
+  V(LAZY_COMPILE_TAG,               "LazyCompile")                      \
+  V(LOAD_IC_TAG,                    "LoadIC")                           \
+  V(REG_EXP_TAG,                    "RegExp")                           \
+  V(SCRIPT_TAG,                     "Script")                           \
+  V(STORE_IC_TAG,                   "StoreIC")                          \
+  V(STUB_TAG,                       "Stub")                             \
+  V(NATIVE_FUNCTION_TAG,            "Function")                         \
+  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")                      \
   V(NATIVE_SCRIPT_TAG,              "Script")
 // Note that 'NATIVE_' cases for functions and scripts are mapped onto
 // original tags when writing to the log.
@@ -156,7 +157,9 @@
   Sampler* sampler();
 
   // Frees resources acquired in Setup.
-  void TearDown();
+  // When a temporary file is used for the log, returns its stream descriptor,
+  // leaving the file open.
+  FILE* TearDown();
 
   // Enable the computation of a sliding window of states.
   void EnableSlidingStateWindow();
@@ -267,7 +270,6 @@
   // Log an event reported from generated code
   void LogRuntime(Vector<const char> format, JSArray* args);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   bool is_logging() {
     return logging_nesting_ > 0;
   }
@@ -275,14 +277,12 @@
   // Pause/Resume collection of profiling data.
   // When data collection is paused, CPU Tick events are discarded until
   // data collection is Resumed.
-  void PauseProfiler(int flags, int tag);
-  void ResumeProfiler(int flags, int tag);
-  int GetActiveProfilerModules();
+  void PauseProfiler();
+  void ResumeProfiler();
+  bool IsProfilerPaused();
 
-  // If logging is performed into a memory buffer, allows to
-  // retrieve previously written messages. See v8.h.
-  int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
+  void LogExistingFunction(Handle<SharedFunctionInfo> shared,
+                           Handle<Code> code);
   // Logs all compiled functions found in the heap.
   void LogCompiledFunctions();
   // Logs all accessor callbacks found in the heap.
@@ -306,6 +306,9 @@
   void LogFailure();
 
  private:
+  class NameBuffer;
+  class NameMap;
+
   Logger();
   ~Logger();
 
@@ -332,8 +335,26 @@
   // Emits general information about generated code.
   void LogCodeInfo();
 
-  // Handles code creation when low-level profiling is active.
-  void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+  void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
+
+  // Low-level logging support.
+
+  void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
+
+  void LowLevelCodeMoveEvent(Address from, Address to);
+
+  void LowLevelCodeDeleteEvent(Address from);
+
+  void LowLevelSnapshotPositionEvent(Address addr, int pos);
+
+  void LowLevelLogWriteBytes(const char* bytes, int size);
+
+  template <typename T>
+  void LowLevelLogWriteStruct(const T& s) {
+    char tag = T::kTag;
+    LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
+    LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
+  }
 
   // Emits a profiler tick event. Used by the profiler thread.
   void TickEvent(TickSample* sample, bool overflow);
@@ -381,10 +402,13 @@
 
   int logging_nesting_;
   int cpu_profiler_nesting_;
-  int heap_profiler_nesting_;
 
   Log* log_;
 
+  NameBuffer* name_buffer_;
+
+  NameMap* address_to_name_map_;
+
   // Guards against multiple calls to TearDown() that can happen in some tests.
   // 'true' between Setup() and TearDown().
   bool is_initialized_;
@@ -401,9 +425,6 @@
   Address prev_code_;
 
   friend class CpuProfiler;
-#else
-  bool is_logging() { return false; }
-#endif
 };
 
 
diff --git a/src/macros.py b/src/macros.py
index 69f36c0..7a493ca 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -38,12 +38,13 @@
 const SETTER = 1;
 
 # These definitions must match the index of the properties in objects.h.
-const kApiTagOffset               = 0;
-const kApiPropertyListOffset      = 1;
-const kApiSerialNumberOffset      = 2;
-const kApiConstructorOffset       = 2;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset    = 6;
+const kApiTagOffset                 = 0;
+const kApiPropertyListOffset        = 1;
+const kApiSerialNumberOffset        = 2;
+const kApiConstructorOffset         = 2;
+const kApiPrototypeTemplateOffset   = 5;
+const kApiParentTemplateOffset      = 6;
+const kApiFlagOffset                = 14;
 
 const NO_HINT     = 0;
 const NUMBER_HINT = 1;
@@ -64,6 +65,7 @@
 
 # For apinatives.js
 const kUninitialized = -1;
+const kReadOnlyPrototypeBit = 3;  # For FunctionTemplateInfo, matches objects.h
 
 # Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
 const kInvalidDate        = 'Invalid Date';
@@ -114,20 +116,29 @@
 
 # Macro for ECMAScript 5 queries of the type:
 # "Type(O) is object."
-# This is the same as being either a function or an object in V8 terminology.
+# This is the same as being either a function or an object in V8 terminology
+# (including proxies).
 # In addition, an undetectable object is also included by this.
-macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
+macro IS_SPEC_OBJECT(arg)   = (%_IsSpecObject(arg));
+
+# Macro for ECMAScript 5 queries of the type:
+# "IsCallable(O)"
+# We assume here that this is the same as being either a function or a function
+# proxy. That ignores host objects with [[Call]] methods, but in most situations
+# we cannot handle those anyway.
+macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
 
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
-macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || arg - arg == 0);
+macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
 macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
 macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
 macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
 macro TO_UINT32(arg) = (arg >>> 0);
 macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
 macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-
+macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
+macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
 
 # Macros implemented in Python.
 python macro CHAR_CODE(str) = ord(str[1]);
@@ -167,7 +178,7 @@
 const CAPTURE0 = 3;
 const CAPTURE1 = 4;
 
-# PropertyDescriptor return value indices - must match 
+# PropertyDescriptor return value indices - must match
 # PropertyDescriptorIndices in runtime.cc.
 const IS_ACCESSOR_INDEX = 0;
 const VALUE_INDEX = 1;
@@ -176,3 +187,17 @@
 const WRITABLE_INDEX = 4;
 const ENUMERABLE_INDEX = 5;
 const CONFIGURABLE_INDEX = 6;
+
+# For messages.js
+# Matches Script::Type from objects.h
+const TYPE_NATIVE = 0;
+const TYPE_EXTENSION = 1;
+const TYPE_NORMAL = 2;
+
+# Matches Script::CompilationType from objects.h
+const COMPILATION_TYPE_HOST = 0;
+const COMPILATION_TYPE_EVAL = 1;
+const COMPILATION_TYPE_JSON = 2;
+
+# Matches Messages::kNoLineNumberInfo from v8.h
+const kNoLineNumberInfo = 0;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 68a5062..9b0d5fc 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -64,13 +64,15 @@
       live_bytes_(0),
 #endif
       heap_(NULL),
-      code_flusher_(NULL) { }
+      code_flusher_(NULL),
+      encountered_weak_maps_(NULL) { }
 
 
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   ASSERT(state_ == PREPARE_GC);
+  ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
 
   // Prepare has selected whether to compact the old generation or not.
   // Tell the tracer.
@@ -80,6 +82,8 @@
 
   if (FLAG_collect_maps) ClearNonLiveTransitions();
 
+  ClearWeakMaps();
+
   SweepLargeObjectSpace();
 
   if (IsCompacting()) {
@@ -305,13 +309,11 @@
     *GetNextCandidateField(candidate) = next_candidate;
   }
 
-  STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
     Code* code = candidate->unchecked_code();
     return reinterpret_cast<SharedFunctionInfo**>(
-        code->address() + Code::kHeaderPaddingStart);
+        code->address() + Code::kNextCodeFlushingCandidateOffset);
   }
 
   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
@@ -392,12 +394,18 @@
                                       ConsString::BodyDescriptor,
                                       void>::Visit);
 
+    table_.Register(kVisitSlicedString,
+                    &FixedBodyVisitor<StaticMarkingVisitor,
+                                      SlicedString::BodyDescriptor,
+                                      void>::Visit);
 
     table_.Register(kVisitFixedArray,
                     &FlexibleBodyVisitor<StaticMarkingVisitor,
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
+    table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+
     table_.Register(kVisitGlobalContext,
                     &FixedBodyVisitor<StaticMarkingVisitor,
                                       Context::MarkCompactBodyDescriptor,
@@ -407,6 +415,8 @@
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
 
+    table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
+
     table_.Register(kVisitOddball,
                     &FixedBodyVisitor<StaticMarkingVisitor,
                                       Oddball::BodyDescriptor,
@@ -424,6 +434,9 @@
     table_.Register(kVisitJSFunction,
                     &VisitJSFunctionAndFlushCode);
 
+    table_.Register(kVisitJSRegExp,
+                    &VisitRegExpAndFlushCode);
+
     table_.Register(kVisitPropertyCell,
                     &FixedBodyVisitor<StaticMarkingVisitor,
                                       JSGlobalPropertyCell::BodyDescriptor,
@@ -459,7 +472,7 @@
   static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
+    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
       IC::Clear(rinfo->pc());
       // Please note targets for cleared inline cached do not have to be
       // marked since they are contained in HEAP->non_monomorphic_cache().
@@ -553,6 +566,34 @@
                               StructBodyDescriptor,
                               void> StructObjectVisitor;
 
+  static void VisitJSWeakMap(Map* map, HeapObject* object) {
+    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
+
+    // Enqueue weak map in linked list of encountered weak maps.
+    ASSERT(weak_map->next() == Smi::FromInt(0));
+    weak_map->set_next(collector->encountered_weak_maps());
+    collector->set_encountered_weak_maps(weak_map);
+
+    // Skip visiting the backing hash table containing the mappings.
+    int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
+    BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+        map->heap(),
+        object,
+        JSWeakMap::BodyDescriptor::kStartOffset,
+        JSWeakMap::kTableOffset);
+    BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+        map->heap(),
+        object,
+        JSWeakMap::kTableOffset + kPointerSize,
+        object_size);
+
+    // Mark the backing hash table without pushing it on the marking stack.
+    ASSERT(!weak_map->unchecked_table()->IsMarked());
+    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
+    collector->SetMark(weak_map->unchecked_table());
+  }
+
   static void VisitCode(Map* map, HeapObject* object) {
     reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
         map->heap());
@@ -564,6 +605,8 @@
   // flushed.
   static const int kCodeAgeThreshold = 5;
 
+  static const int kRegExpCodeThreshold = 5;
+
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
     Object* undefined = heap->raw_unchecked_undefined_value();
     return (info->script() != undefined) &&
@@ -622,13 +665,19 @@
     }
 
     // Only flush code for functions.
-    if (shared_info->code()->kind() != Code::FUNCTION) return false;
+    if (shared_info->code()->kind() != Code::FUNCTION) {
+      return false;
+    }
 
     // Function must be lazy compilable.
-    if (!shared_info->allows_lazy_compilation()) return false;
+    if (!shared_info->allows_lazy_compilation()) {
+      return false;
+    }
 
     // If this is a full script wrapped in a function we do no flush the code.
-    if (shared_info->is_toplevel()) return false;
+    if (shared_info->is_toplevel()) {
+      return false;
+    }
 
     // Age this shared function info.
     if (shared_info->code_age() < kCodeAgeThreshold) {
@@ -672,8 +721,9 @@
 
     Map* map = SafeMap(ctx);
     Heap* heap = map->heap();
-    if (!(map == heap->raw_unchecked_context_map() ||
+    if (!(map == heap->raw_unchecked_function_context_map() ||
           map == heap->raw_unchecked_catch_context_map() ||
+          map == heap->raw_unchecked_with_context_map() ||
           map == heap->raw_unchecked_global_context_map())) {
       return false;
     }
@@ -699,6 +749,68 @@
   }
 
 
+  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
+                                          JSRegExp* re,
+                                          bool is_ascii) {
+    // Make sure that the fixed array is in fact initialized on the RegExp.
+    // We could potentially trigger a GC when initializing the RegExp.
+    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+
+    // Make sure this is a RegExp that actually contains code.
+    if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
+
+    Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
+    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+      // Save a copy that can be reinstated if we need the code again.
+      re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
+                             code,
+                             heap);
+      // Set a number in the 0-255 range to guarantee no smi overflow.
+      re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
+                             Smi::FromInt(heap->sweep_generation() & 0xff),
+                             heap);
+    } else if (code->IsSmi()) {
+      int value = Smi::cast(code)->value();
+      // The regexp has not been compiled yet or there was a compilation error.
+      if (value == JSRegExp::kUninitializedValue ||
+          value == JSRegExp::kCompilationErrorValue) {
+        return;
+      }
+
+      // Check if we should flush now.
+      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+        re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
+                               Smi::FromInt(JSRegExp::kUninitializedValue),
+                               heap);
+        re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
+                               Smi::FromInt(JSRegExp::kUninitializedValue),
+                               heap);
+      }
+    }
+  }
+
+
+  // Works by setting the current sweep_generation (as a smi) in the
+  // code object place in the data array of the RegExp and keeps a copy
+  // around that can be reinstated if we reuse the RegExp before flushing.
+  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
+  // we flush the code.
+  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
+    Heap* heap = map->heap();
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    if (!collector->is_code_flushing_enabled()) {
+      VisitJSRegExpFields(map, object);
+      return;
+    }
+    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
+    // Flush code or set age on both ascii and two byte code.
+    UpdateRegExpCodeAgeAndFlush(heap, re, true);
+    UpdateRegExpCodeAgeAndFlush(heap, re, false);
+    // Visit the fields of the RegExp, including the updated FixedArray.
+    VisitJSRegExpFields(map, object);
+  }
+
+
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
     MarkCompactCollector* collector = map->heap()->mark_compact_collector();
@@ -758,21 +870,7 @@
       collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
 
       if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        // For optimized functions we should retain both non-optimized version
-        // of it's code and non-optimized version of all inlined functions.
-        // This is required to support bailing out from inlined code.
-        DeoptimizationInputData* data =
-            reinterpret_cast<DeoptimizationInputData*>(
-                jsfunction->unchecked_code()->unchecked_deoptimization_data());
-
-        FixedArray* literals = data->UncheckedLiteralArray();
-
-        for (int i = 0, count = data->InlinedFunctionCount()->value();
-             i < count;
-             i++) {
-          JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
-          collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
-        }
+        collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
       }
     }
 
@@ -829,6 +927,15 @@
     // Don't visit the next function list field as it is a weak reference.
   }
 
+  static inline void VisitJSRegExpFields(Map* map,
+                                         HeapObject* object) {
+    int last_property_offset =
+        JSRegExp::kSize + kPointerSize * map->inobject_properties();
+    VisitPointers(map->heap(),
+                  SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
+                  SLOT_ADDR(object, last_property_offset));
+  }
+
 
   static void VisitSharedFunctionInfoFields(Heap* heap,
                                             HeapObject* object,
@@ -868,18 +975,6 @@
     StaticMarkingVisitor::VisitPointers(heap_, start, end);
   }
 
-  void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
-  }
-
-  void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
-  }
-
-  void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
-  }
-
  private:
   Heap* heap_;
 };
@@ -891,9 +986,7 @@
       : collector_(collector) {}
 
   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-      collector_->MarkObject(it.frame()->unchecked_code());
-    }
+    collector_->PrepareThreadForCodeFlushing(isolate, top);
   }
 
  private:
@@ -924,6 +1017,42 @@
 };
 
 
+void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
+  // For optimized functions we should retain both non-optimized version
+  // of it's code and non-optimized version of all inlined functions.
+  // This is required to support bailing out from inlined code.
+  DeoptimizationInputData* data =
+      reinterpret_cast<DeoptimizationInputData*>(
+          code->unchecked_deoptimization_data());
+
+  FixedArray* literals = data->UncheckedLiteralArray();
+
+  for (int i = 0, count = data->InlinedFunctionCount()->value();
+       i < count;
+       i++) {
+    JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
+    MarkObject(inlined->unchecked_shared()->unchecked_code());
+  }
+}
+
+
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
+                                                        ThreadLocalTop* top) {
+  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    // Note: for the frame that has a pending lazy deoptimization
+    // StackFrame::unchecked_code will return a non-optimized code object for
+    // the outermost function and StackFrame::LookupCode will return
+    // actual optimized code object.
+    StackFrame* frame = it.frame();
+    Code* code = frame->unchecked_code();
+    MarkObject(code);
+    if (frame->is_optimized()) {
+      MarkInlinedFunctionsCode(frame->LookupCode());
+    }
+  }
+}
+
+
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
@@ -947,9 +1076,8 @@
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
-  for (StackFrameIterator it; !it.done(); it.Advance()) {
-    MarkObject(it.frame()->unchecked_code());
-  }
+  PrepareThreadForCodeFlushing(heap()->isolate(),
+                               heap()->isolate()->thread_local_top());
 
   // Iterate the archived stacks in all threads to check if
   // the code is referenced.
@@ -1032,6 +1160,7 @@
   int PointersRemoved() {
     return pointers_removed_;
   }
+
  private:
   Heap* heap_;
   int pointers_removed_;
@@ -1058,13 +1187,16 @@
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
-    if (FLAG_cleanup_caches_in_maps_at_gc) {
+    if (FLAG_cleanup_code_caches_at_gc) {
       map->ClearCodeCache(heap());
     }
     SetMark(map);
-    if (FLAG_collect_maps &&
-        map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
-        map->instance_type() <= JS_FUNCTION_TYPE) {
+
+    // When map collection is enabled we have to mark through map's transitions
+    // in a special way to make transition links weak.
+    // Only maps for subclasses of JSReceiver can have transitions.
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
       MarkMapContents(map);
     } else {
       marking_stack_.Push(map);
@@ -1083,8 +1215,13 @@
   FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
   if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
 
-  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
-      *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
+  Object* raw_descriptor_array =
+      *HeapObject::RawField(map,
+                            Map::kInstanceDescriptorsOrBitField3Offset);
+  if (!raw_descriptor_array->IsSmi()) {
+    MarkDescriptorArray(
+        reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+  }
 
   // Mark the Object* fields of the Map.
   // Since the descriptor array has been marked already, it is fine
@@ -1141,8 +1278,8 @@
        next_object != NULL; next_object = iterator.next()) {
     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
       Map* map = Map::cast(next_object);
-      if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
-          map->instance_type() <= JS_FUNCTION_TYPE) {
+      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+      if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
         map->CreateBackPointers();
       } else {
         ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
@@ -1295,20 +1432,26 @@
 // marking stack have been marked, or are overflowed in the heap.
 void MarkCompactCollector::EmptyMarkingStack() {
   while (!marking_stack_.is_empty()) {
-    HeapObject* object = marking_stack_.Pop();
-    ASSERT(object->IsHeapObject());
-    ASSERT(heap()->Contains(object));
-    ASSERT(object->IsMarked());
-    ASSERT(!object->IsOverflowed());
+    while (!marking_stack_.is_empty()) {
+      HeapObject* object = marking_stack_.Pop();
+      ASSERT(object->IsHeapObject());
+      ASSERT(heap()->Contains(object));
+      ASSERT(object->IsMarked());
+      ASSERT(!object->IsOverflowed());
 
-    // Because the object is marked, we have to recover the original map
-    // pointer and use it to mark the object's body.
-    MapWord map_word = object->map_word();
-    map_word.ClearMark();
-    Map* map = map_word.ToMap();
-    MarkObject(map);
+      // Because the object is marked, we have to recover the original map
+      // pointer and use it to mark the object's body.
+      MapWord map_word = object->map_word();
+      map_word.ClearMark();
+      Map* map = map_word.ToMap();
+      MarkObject(map);
 
-    StaticMarkingVisitor::IterateBody(map, object);
+      StaticMarkingVisitor::IterateBody(map, object);
+    }
+
+    // Process encountered weak maps, mark objects only reachable by those
+    // weak maps and repeat until fix-point is reached.
+    ProcessWeakMaps();
   }
 }
 
@@ -1426,6 +1569,12 @@
   // reachable from the weak roots.
   ProcessExternalMarking();
 
+  // Object literal map caches reference symbols (cache keys) and maps
+  // (cache values). At this point still useful maps have already been
+  // marked. Mark the keys for the alive values before we process the
+  // symbol table.
+  ProcessMapCaches();
+
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
@@ -1454,6 +1603,57 @@
 }
 
 
+void MarkCompactCollector::ProcessMapCaches() {
+  Object* raw_context = heap()->global_contexts_list_;
+  while (raw_context != heap()->undefined_value()) {
+    Context* context = reinterpret_cast<Context*>(raw_context);
+    if (context->IsMarked()) {
+      HeapObject* raw_map_cache =
+          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
+      // A map cache may be reachable from the stack. In this case
+      // it's already transitively marked and it's too late to clean
+      // up its parts.
+      if (!raw_map_cache->IsMarked() &&
+          raw_map_cache != heap()->undefined_value()) {
+        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
+        int existing_elements = map_cache->NumberOfElements();
+        int used_elements = 0;
+        for (int i = MapCache::kElementsStartIndex;
+             i < map_cache->length();
+             i += MapCache::kEntrySize) {
+          Object* raw_key = map_cache->get(i);
+          if (raw_key == heap()->undefined_value() ||
+              raw_key == heap()->null_value()) continue;
+          STATIC_ASSERT(MapCache::kEntrySize == 2);
+          Object* raw_map = map_cache->get(i + 1);
+          if (raw_map->IsHeapObject() &&
+              HeapObject::cast(raw_map)->IsMarked()) {
+            ++used_elements;
+          } else {
+            // Delete useless entries with unmarked maps.
+            ASSERT(raw_map->IsMap());
+            map_cache->set_null_unchecked(heap(), i);
+            map_cache->set_null_unchecked(heap(), i + 1);
+          }
+        }
+        if (used_elements == 0) {
+          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
+        } else {
+          // Note: we don't actually shrink the cache here to avoid
+          // extra complexity during GC. We rely on subsequent cache
+          // usages (EnsureCapacity) to do this.
+          map_cache->ElementsRemoved(existing_elements - used_elements);
+          MarkObject(map_cache);
+        }
+      }
+    }
+    // Move to next element in the list.
+    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
+  }
+  ProcessMarkingStack();
+}
+
+
 #ifdef DEBUG
 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
   live_bytes_ += obj->Size();
@@ -1518,8 +1718,8 @@
 
     ASSERT(SafeIsMap(map));
     // Only JSObject and subtypes have map transitions and back pointers.
-    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
-    if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
 
     if (map->IsMarked() && map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
@@ -1529,38 +1729,48 @@
     }
 
     // Clear dead prototype transitions.
-    FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
-    if (prototype_transitions->length() > 0) {
-      int finger = Smi::cast(prototype_transitions->get(0))->value();
-      int new_finger = 1;
-      for (int i = 1; i < finger; i += 2) {
-        Object* prototype = prototype_transitions->get(i);
-        Object* cached_map = prototype_transitions->get(i + 1);
+    int number_of_transitions = map->NumberOfProtoTransitions();
+    if (number_of_transitions > 0) {
+      FixedArray* prototype_transitions =
+          map->unchecked_prototype_transitions();
+      int new_number_of_transitions = 0;
+      const int header = Map::kProtoTransitionHeaderSize;
+      const int proto_offset =
+          header + Map::kProtoTransitionPrototypeOffset;
+      const int map_offset = header + Map::kProtoTransitionMapOffset;
+      const int step = Map::kProtoTransitionElementsPerEntry;
+      for (int i = 0; i < number_of_transitions; i++) {
+        Object* prototype = prototype_transitions->get(proto_offset + i * step);
+        Object* cached_map = prototype_transitions->get(map_offset + i * step);
         if (HeapObject::cast(prototype)->IsMarked() &&
             HeapObject::cast(cached_map)->IsMarked()) {
-          if (new_finger != i) {
-            prototype_transitions->set_unchecked(heap_,
-                                                 new_finger,
-                                                 prototype,
-                                                 UPDATE_WRITE_BARRIER);
-            prototype_transitions->set_unchecked(heap_,
-                                                 new_finger + 1,
-                                                 cached_map,
-                                                 SKIP_WRITE_BARRIER);
+          if (new_number_of_transitions != i) {
+            prototype_transitions->set_unchecked(
+                heap_,
+                proto_offset + new_number_of_transitions * step,
+                prototype,
+                UPDATE_WRITE_BARRIER);
+            prototype_transitions->set_unchecked(
+                heap_,
+                map_offset + new_number_of_transitions * step,
+                cached_map,
+                SKIP_WRITE_BARRIER);
           }
-          new_finger += 2;
+          new_number_of_transitions++;
         }
       }
 
       // Fill slots that became free with undefined value.
       Object* undefined = heap()->raw_unchecked_undefined_value();
-      for (int i = new_finger; i < finger; i++) {
+      for (int i = new_number_of_transitions * step;
+           i < number_of_transitions * step;
+           i++) {
         prototype_transitions->set_unchecked(heap_,
-                                             i,
+                                             header + i,
                                              undefined,
                                              SKIP_WRITE_BARRIER);
       }
-      prototype_transitions->set_unchecked(0, Smi::FromInt(new_finger));
+      map->SetNumberOfProtoTransitions(new_number_of_transitions);
     }
 
     // Follow the chain of back pointers to find the prototype.
@@ -1594,6 +1804,45 @@
   }
 }
 
+
+void MarkCompactCollector::ProcessWeakMaps() {
+  Object* weak_map_obj = encountered_weak_maps();
+  while (weak_map_obj != Smi::FromInt(0)) {
+    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
+    ObjectHashTable* table = weak_map->unchecked_table();
+    for (int i = 0; i < table->Capacity(); i++) {
+      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+        Object* value = table->get(table->EntryToValueIndex(i));
+        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
+        table->set_unchecked(heap(),
+                             table->EntryToValueIndex(i),
+                             value,
+                             UPDATE_WRITE_BARRIER);
+      }
+    }
+    weak_map_obj = weak_map->next();
+  }
+}
+
+
+void MarkCompactCollector::ClearWeakMaps() {
+  Object* weak_map_obj = encountered_weak_maps();
+  while (weak_map_obj != Smi::FromInt(0)) {
+    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
+    ObjectHashTable* table = weak_map->unchecked_table();
+    for (int i = 0; i < table->Capacity(); i++) {
+      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+        table->RemoveEntry(i, heap());
+      }
+    }
+    weak_map_obj = weak_map->next();
+    weak_map->set_next(Smi::FromInt(0));
+  }
+  set_encountered_weak_maps(Smi::FromInt(0));
+}
+
 // -------------------------------------------------------------------------
 // Phase 2: Encode forwarding addresses.
 // When compacting, forwarding addresses for objects in old space and map
@@ -1922,6 +2171,7 @@
     VisitPointer(&target);
     rinfo->set_call_address(Code::cast(target)->instruction_start());
   }
+
  private:
   Heap* heap_;
 };
@@ -2055,7 +2305,7 @@
   }
 
   // Update roots.
-  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
   LiveObjectList::IterateElements(&updating_visitor);
 
   // Update pointers in old spaces.
@@ -3116,11 +3366,9 @@
     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
   }
 #endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (obj->IsCode()) {
     PROFILE(isolate, CodeDeleteEvent(obj->address()));
   }
-#endif
 }
 
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 04d0ff6..f72c813 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -96,6 +96,10 @@
 };
 
 
+// Defined in isolate.h.
+class ThreadLocalTop;
+
+
 // -------------------------------------------------------------------------
 // Mark-Compact collector
 
@@ -193,6 +197,11 @@
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
   void EnableCodeFlushing(bool enable);
 
+  inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
+  inline void set_encountered_weak_maps(Object* weak_map) {
+    encountered_weak_maps_ = weak_map;
+  }
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
@@ -248,6 +257,14 @@
   friend class CodeMarkingVisitor;
   friend class SharedFunctionInfoMarkingVisitor;
 
+  // Mark non-optimize code for functions inlined into the given optimized
+  // code. This will prevent it from being flushed.
+  void MarkInlinedFunctionsCode(Code* code);
+
+  // Mark code objects that are active on the stack to prevent them
+  // from being flushed.
+  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
+
   void PrepareForCodeFlushing();
 
   // Marking operations for objects reachable from roots.
@@ -306,6 +323,10 @@
   // flag on the marking stack.
   void RefillMarkingStack();
 
+  // After reachable maps have been marked process per context object
+  // literal map caches removing unmarked entries.
+  void ProcessMapCaches();
+
   // Callback function for telling whether the object *p is an unmarked
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
@@ -325,6 +346,16 @@
   // We replace them with a null descriptor, with the same key.
   void ClearNonLiveTransitions();
 
+  // Mark all values associated with reachable keys in weak maps encountered
+  // so far.  This might push new object or even new weak maps onto the
+  // marking stack.
+  void ProcessWeakMaps();
+
+  // After all reachable objects have been marked those weak map entries
+  // with an unreachable key are removed from all encountered weak maps.
+  // The linked list of all encountered weak maps is destroyed.
+  void ClearWeakMaps();
+
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
   // a non-compacting collection, or else computing and encoding
@@ -495,6 +526,7 @@
   Heap* heap_;
   MarkingStack marking_stack_;
   CodeFlusher* code_flusher_;
+  Object* encountered_weak_maps_;
 
   friend class Heap;
   friend class OverflowedObjectsScanner;
diff --git a/src/math.js b/src/math.js
index 70b8c57..b5a6d18 100644
--- a/src/math.js
+++ b/src/math.js
@@ -38,7 +38,7 @@
 function MathConstructor() {}
 %FunctionSetInstanceClassName(MathConstructor, 'Math');
 const $Math = new MathConstructor();
-$Math.__proto__ = global.Object.prototype;
+$Math.__proto__ = $Object.prototype;
 %SetProperty(global, "Math", $Math, DONT_ENUM);
 
 // ECMA 262 - 15.8.2.1
@@ -195,8 +195,9 @@
 
 // -------------------------------------------------------------------
 
-function SetupMath() {
-  // Setup math constants.
+function SetUpMath() {
+  %CheckIsBootstrapping();
+  // Set up math constants.
   // ECMA-262, section 15.8.1.1.
   %OptimizeObjectForAddingMultipleProperties($Math, 8);
   %SetProperty($Math,
@@ -236,7 +237,7 @@
                DONT_ENUM |  DONT_DELETE | READ_ONLY);
   %ToFastProperties($Math);
 
-  // Setup non-enumerable functions of the Math object and
+  // Set up non-enumerable functions of the Math object and
   // set their names.
   InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
     "random", MathRandom,
@@ -258,7 +259,6 @@
     "max", MathMax,
     "min", MathMin
   ));
-};
+}
 
-
-SetupMath();
+SetUpMath();
diff --git a/src/messages.cc b/src/messages.cc
index abc2537..b6ad5ac 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -41,13 +41,13 @@
 // by default.
 void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
                                           Handle<Object> message_obj) {
-  SmartPointer<char> str = GetLocalizedMessage(message_obj);
+  SmartArrayPointer<char> str = GetLocalizedMessage(message_obj);
   if (loc == NULL) {
     PrintF("%s\n", *str);
   } else {
     HandleScope scope;
     Handle<Object> data(loc->script()->name());
-    SmartPointer<char> data_str;
+    SmartArrayPointer<char> data_str;
     if (data->IsString())
       data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
     PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
@@ -125,13 +125,13 @@
       HandleScope scope;
       if (global_listeners.get(i)->IsUndefined()) continue;
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
-      Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+      Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
-        v8::TryCatch tryCatch;
+        v8::TryCatch try_catch;
         callback(api_message_obj, v8::Utils::ToLocal(callback_data));
       }
       if (isolate->has_scheduled_exception()) {
@@ -170,7 +170,8 @@
 }
 
 
-SmartPointer<char> MessageHandler::GetLocalizedMessage(Handle<Object> data) {
+SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
+    Handle<Object> data) {
   HandleScope scope;
   return GetMessage(data)->ToCString(DISALLOW_NULLS);
 }
diff --git a/src/messages.h b/src/messages.h
index fc2162d..358509e 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -105,7 +105,7 @@
   static void DefaultMessageReport(const MessageLocation* loc,
                                    Handle<Object> message_obj);
   static Handle<String> GetMessage(Handle<Object> data);
-  static SmartPointer<char> GetLocalizedMessage(Handle<Object> data);
+  static SmartArrayPointer<char> GetLocalizedMessage(Handle<Object> data);
 };
 
 } }  // namespace v8::internal
diff --git a/src/messages.js b/src/messages.js
index d8810dc..a9993af 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,27 +28,14 @@
 
 // -------------------------------------------------------------------
 //
-// Matches Script::Type from objects.h
-var TYPE_NATIVE = 0;
-var TYPE_EXTENSION = 1;
-var TYPE_NORMAL = 2;
-
-// Matches Script::CompilationType from objects.h
-var COMPILATION_TYPE_HOST = 0;
-var COMPILATION_TYPE_EVAL = 1;
-var COMPILATION_TYPE_JSON = 2;
-
-// Matches Messages::kNoLineNumberInfo from v8.h
-var kNoLineNumberInfo = 0;
-
 // If this object gets passed to an error constructor the error will
 // get an accessor for .message that constructs a descriptive error
 // message on access.
-var kAddMessageAccessorsMarker = { };
+const kAddMessageAccessorsMarker = { };
 
-var kMessages = 0;
-
-var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
+// This will be lazily initialized when first needed (and forcibly
+// overwritten even though it's const).
+const kMessages = 0;
 
 function FormatString(format, message) {
   var args = %MessageGetArguments(message);
@@ -56,12 +43,16 @@
   var arg_num = 0;
   for (var i = 0; i < format.length; i++) {
     var str = format[i];
-    for (arg_num = 0; arg_num < kReplacementMarkers.length; arg_num++) {
-      if (format[i] !== kReplacementMarkers[arg_num]) continue;
-      try {
-        str = ToDetailString(args[arg_num]);
-      } catch (e) {
-        str = "#<error>";
+    if (str.length == 2 && %_StringCharCodeAt(str, 0) == 0x25) {
+      // Two-char string starts with "%".
+      var arg_num = (%_StringCharCodeAt(str, 1) - 0x30) >>> 0;
+      if (arg_num < 4) {
+        // str is one of %0, %1, %2 or %3.
+        try {
+          str = ToDetailString(args[arg_num]);
+        } catch (e) {
+          str = "#<error>";
+        }
       }
     }
     result += str;
@@ -100,17 +91,16 @@
 
 
 function ToDetailString(obj) {
-  if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
+  if (obj != null && IS_OBJECT(obj) && obj.toString === ObjectToString) {
     var constructor = obj.constructor;
-    if (!constructor) return ToStringCheckErrorObject(obj);
-    var constructorName = constructor.name;
-    if (!constructorName || !IS_STRING(constructorName)) {
-      return ToStringCheckErrorObject(obj);
+    if (typeof constructor == "function") {
+      var constructorName = constructor.name;
+      if (IS_STRING(constructorName) && constructorName !== "") {
+        return "#<" + constructorName + ">";
+      }
     }
-    return "#<" + constructorName + ">";
-  } else {
-    return ToStringCheckErrorObject(obj);
   }
+  return ToStringCheckErrorObject(obj);
 }
 
 
@@ -126,10 +116,11 @@
 
 
 /**
- * Setup the Script function and constructor.
+ * Set up the Script function and constructor.
  */
 %FunctionSetInstanceClassName(Script, 'Script');
-%SetProperty(Script.prototype, 'constructor', Script, DONT_ENUM);
+%SetProperty(Script.prototype, 'constructor', Script,
+             DONT_ENUM | DONT_DELETE | READ_ONLY);
 %SetCode(Script, function(x) {
   // Script objects can only be created by the VM.
   throw new $Error("Not supported");
@@ -139,106 +130,135 @@
 // Helper functions; called from the runtime system.
 function FormatMessage(message) {
   if (kMessages === 0) {
-    kMessages = {
+    var messagesDictionary = [
       // Error
-      cyclic_proto:                 ["Cyclic __proto__ value"],
+      "cyclic_proto",                 ["Cyclic __proto__ value"],
+      "code_gen_from_strings",        ["Code generation from strings disallowed for this context"],
       // TypeError
-      unexpected_token:             ["Unexpected token ", "%0"],
-      unexpected_token_number:      ["Unexpected number"],
-      unexpected_token_string:      ["Unexpected string"],
-      unexpected_token_identifier:  ["Unexpected identifier"],
-      unexpected_strict_reserved:   ["Unexpected strict mode reserved word"],
-      unexpected_eos:               ["Unexpected end of input"],
-      malformed_regexp:             ["Invalid regular expression: /", "%0", "/: ", "%1"],
-      unterminated_regexp:          ["Invalid regular expression: missing /"],
-      regexp_flags:                 ["Cannot supply flags when constructing one RegExp from another"],
-      incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
-      invalid_lhs_in_assignment:    ["Invalid left-hand side in assignment"],
-      invalid_lhs_in_for_in:        ["Invalid left-hand side in for-in"],
-      invalid_lhs_in_postfix_op:    ["Invalid left-hand side expression in postfix operation"],
-      invalid_lhs_in_prefix_op:     ["Invalid left-hand side expression in prefix operation"],
-      multiple_defaults_in_switch:  ["More than one default clause in switch statement"],
-      newline_after_throw:          ["Illegal newline after throw"],
-      redeclaration:                ["%0", " '", "%1", "' has already been declared"],
-      no_catch_or_finally:          ["Missing catch or finally after try"],
-      unknown_label:                ["Undefined label '", "%0", "'"],
-      uncaught_exception:           ["Uncaught ", "%0"],
-      stack_trace:                  ["Stack Trace:\n", "%0"],
-      called_non_callable:          ["%0", " is not a function"],
-      undefined_method:             ["Object ", "%1", " has no method '", "%0", "'"],
-      property_not_function:        ["Property '", "%0", "' of object ", "%1", " is not a function"],
-      cannot_convert_to_primitive:  ["Cannot convert object to primitive value"],
-      not_constructor:              ["%0", " is not a constructor"],
-      not_defined:                  ["%0", " is not defined"],
-      non_object_property_load:     ["Cannot read property '", "%0", "' of ", "%1"],
-      non_object_property_store:    ["Cannot set property '", "%0", "' of ", "%1"],
-      non_object_property_call:     ["Cannot call method '", "%0", "' of ", "%1"],
-      with_expression:              ["%0", " has no properties"],
-      illegal_invocation:           ["Illegal invocation"],
-      no_setter_in_callback:        ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
-      apply_non_function:           ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
-      apply_wrong_args:             ["Function.prototype.apply: Arguments list has wrong type"],
-      invalid_in_operator_use:      ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
-      instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
-      instanceof_nonobject_proto:   ["Function has non-object prototype '", "%0", "' in instanceof check"],
-      null_to_object:               ["Cannot convert null to object"],
-      reduce_no_initial:            ["Reduce of empty array with no initial value"],
-      getter_must_be_callable:      ["Getter must be a function: ", "%0"],
-      setter_must_be_callable:      ["Setter must be a function: ", "%0"],
-      value_and_accessor:           ["Invalid property.  A property cannot both have accessors and be writable or have a value: ", "%0"],
-      proto_object_or_null:         ["Object prototype may only be an Object or null"],
-      property_desc_object:         ["Property description must be an object: ", "%0"],
-      redefine_disallowed:          ["Cannot redefine property: ", "%0"],
-      define_disallowed:            ["Cannot define property, object is not extensible: ", "%0"],
-      non_extensible_proto:         ["%0", " is not extensible"],
+      "unexpected_token",             ["Unexpected token ", "%0"],
+      "unexpected_token_number",      ["Unexpected number"],
+      "unexpected_token_string",      ["Unexpected string"],
+      "unexpected_token_identifier",  ["Unexpected identifier"],
+      "unexpected_reserved",          ["Unexpected reserved word"],
+      "unexpected_strict_reserved",   ["Unexpected strict mode reserved word"],
+      "unexpected_eos",               ["Unexpected end of input"],
+      "malformed_regexp",             ["Invalid regular expression: /", "%0", "/: ", "%1"],
+      "unterminated_regexp",          ["Invalid regular expression: missing /"],
+      "regexp_flags",                 ["Cannot supply flags when constructing one RegExp from another"],
+      "incompatible_method_receiver", ["Method ", "%0", " called on incompatible receiver ", "%1"],
+      "invalid_lhs_in_assignment",    ["Invalid left-hand side in assignment"],
+      "invalid_lhs_in_for_in",        ["Invalid left-hand side in for-in"],
+      "invalid_lhs_in_postfix_op",    ["Invalid left-hand side expression in postfix operation"],
+      "invalid_lhs_in_prefix_op",     ["Invalid left-hand side expression in prefix operation"],
+      "multiple_defaults_in_switch",  ["More than one default clause in switch statement"],
+      "newline_after_throw",          ["Illegal newline after throw"],
+      "redeclaration",                ["%0", " '", "%1", "' has already been declared"],
+      "no_catch_or_finally",          ["Missing catch or finally after try"],
+      "unknown_label",                ["Undefined label '", "%0", "'"],
+      "uncaught_exception",           ["Uncaught ", "%0"],
+      "stack_trace",                  ["Stack Trace:\n", "%0"],
+      "called_non_callable",          ["%0", " is not a function"],
+      "undefined_method",             ["Object ", "%1", " has no method '", "%0", "'"],
+      "property_not_function",        ["Property '", "%0", "' of object ", "%1", " is not a function"],
+      "cannot_convert_to_primitive",  ["Cannot convert object to primitive value"],
+      "not_constructor",              ["%0", " is not a constructor"],
+      "not_defined",                  ["%0", " is not defined"],
+      "non_object_property_load",     ["Cannot read property '", "%0", "' of ", "%1"],
+      "non_object_property_store",    ["Cannot set property '", "%0", "' of ", "%1"],
+      "non_object_property_call",     ["Cannot call method '", "%0", "' of ", "%1"],
+      "with_expression",              ["%0", " has no properties"],
+      "illegal_invocation",           ["Illegal invocation"],
+      "no_setter_in_callback",        ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
+      "apply_non_function",           ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
+      "apply_wrong_args",             ["Function.prototype.apply: Arguments list has wrong type"],
+      "invalid_in_operator_use",      ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
+      "instanceof_function_expected", ["Expecting a function in instanceof check, but got ", "%0"],
+      "instanceof_nonobject_proto",   ["Function has non-object prototype '", "%0", "' in instanceof check"],
+      "null_to_object",               ["Cannot convert null to object"],
+      "reduce_no_initial",            ["Reduce of empty array with no initial value"],
+      "getter_must_be_callable",      ["Getter must be a function: ", "%0"],
+      "setter_must_be_callable",      ["Setter must be a function: ", "%0"],
+      "value_and_accessor",           ["Invalid property.  A property cannot both have accessors and be writable or have a value, ", "%0"],
+      "proto_object_or_null",         ["Object prototype may only be an Object or null"],
+      "property_desc_object",         ["Property description must be an object: ", "%0"],
+      "redefine_disallowed",          ["Cannot redefine property: ", "%0"],
+      "define_disallowed",            ["Cannot define property:", "%0", ", object is not extensible."],
+      "non_extensible_proto",         ["%0", " is not extensible"],
+      "handler_non_object",           ["Proxy.", "%0", " called with non-object as handler"],
+      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
+      "handler_trap_missing",         ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+      "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
+      "handler_returned_false",       ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
+      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
+      "proxy_prop_not_configurable",  ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
+      "proxy_non_object_prop_names",  ["Trap ", "%1", " returned non-object ", "%0"],
+      "proxy_repeated_prop_name",     ["Trap ", "%1", " returned repeated property name ", "%2"],
+      "invalid_weakmap_key",          ["Invalid value used as weak map key"],
       // RangeError
-      invalid_array_length:         ["Invalid array length"],
-      stack_overflow:               ["Maximum call stack size exceeded"],
+      "invalid_array_length",         ["Invalid array length"],
+      "stack_overflow",               ["Maximum call stack size exceeded"],
       // SyntaxError
-      unable_to_parse:              ["Parse error"],
-      duplicate_regexp_flag:        ["Duplicate RegExp flag ", "%0"],
-      invalid_regexp:               ["Invalid RegExp pattern /", "%0", "/"],
-      illegal_break:                ["Illegal break statement"],
-      illegal_continue:             ["Illegal continue statement"],
-      illegal_return:               ["Illegal return statement"],
-      error_loading_debugger:       ["Error loading debugger"],
-      no_input_to_regexp:           ["No input to ", "%0"],
-      invalid_json:                 ["String '", "%0", "' is not valid JSON"],
-      circular_structure:           ["Converting circular structure to JSON"],
-      obj_ctor_property_non_object: ["Object.", "%0", " called on non-object"],
-      array_indexof_not_defined:    ["Array.getIndexOf: Argument undefined"],
-      object_not_extensible:        ["Can't add property ", "%0", ", object is not extensible"],
-      illegal_access:               ["Illegal access"],
-      invalid_preparser_data:       ["Invalid preparser data for function ", "%0"],
-      strict_mode_with:             ["Strict mode code may not include a with statement"],
-      strict_catch_variable:        ["Catch variable may not be eval or arguments in strict mode"],
-      too_many_arguments:           ["Too many arguments in function call (only 32766 allowed)"],
-      too_many_parameters:          ["Too many parameters in function definition (only 32766 allowed)"],
-      too_many_variables:           ["Too many variables declared (only 32767 allowed)"],
-      strict_param_name:            ["Parameter name eval or arguments is not allowed in strict mode"],
-      strict_param_dupe:            ["Strict mode function may not have duplicate parameter names"],
-      strict_var_name:              ["Variable name may not be eval or arguments in strict mode"],
-      strict_function_name:         ["Function name may not be eval or arguments in strict mode"],
-      strict_octal_literal:         ["Octal literals are not allowed in strict mode."],
-      strict_duplicate_property:    ["Duplicate data property in object literal not allowed in strict mode"],
-      accessor_data_property:       ["Object literal may not have data and accessor property with the same name"],
-      accessor_get_set:             ["Object literal may not have multiple get/set accessors with the same name"],
-      strict_lhs_assignment:        ["Assignment to eval or arguments is not allowed in strict mode"],
-      strict_lhs_postfix:           ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
-      strict_lhs_prefix:            ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
-      strict_reserved_word:         ["Use of future reserved word in strict mode"],
-      strict_delete:                ["Delete of an unqualified identifier in strict mode."],
-      strict_delete_property:       ["Cannot delete property '", "%0", "' of ", "%1"],
-      strict_const:                 ["Use of const in strict mode."],
-      strict_function:              ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
-      strict_read_only_property:    ["Cannot assign to read only property '", "%0", "' of ", "%1"],
-      strict_cannot_assign:         ["Cannot assign to read only '", "%0", "' in strict mode"],
-      strict_arguments_callee:      ["Cannot access property 'callee' of strict mode arguments"],
-      strict_arguments_caller:      ["Cannot access property 'caller' of strict mode arguments"],
-      strict_function_caller:       ["Cannot access property 'caller' of a strict mode function"],
-      strict_function_arguments:    ["Cannot access property 'arguments' of a strict mode function"],
-      strict_caller:                ["Illegal access to a strict mode caller function."],
-    };
+      "unable_to_parse",              ["Parse error"],
+      "invalid_regexp_flags",         ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
+      "invalid_regexp",               ["Invalid RegExp pattern /", "%0", "/"],
+      "illegal_break",                ["Illegal break statement"],
+      "illegal_continue",             ["Illegal continue statement"],
+      "illegal_return",               ["Illegal return statement"],
+      "error_loading_debugger",       ["Error loading debugger"],
+      "no_input_to_regexp",           ["No input to ", "%0"],
+      "invalid_json",                 ["String '", "%0", "' is not valid JSON"],
+      "circular_structure",           ["Converting circular structure to JSON"],
+      "obj_ctor_property_non_object", ["Object.", "%0", " called on non-object"],
+      "called_on_null_or_undefined",  ["%0", " called on null or undefined"],
+      "array_indexof_not_defined",    ["Array.getIndexOf: Argument undefined"],
+      "object_not_extensible",        ["Can't add property ", "%0", ", object is not extensible"],
+      "illegal_access",               ["Illegal access"],
+      "invalid_preparser_data",       ["Invalid preparser data for function ", "%0"],
+      "strict_mode_with",             ["Strict mode code may not include a with statement"],
+      "strict_catch_variable",        ["Catch variable may not be eval or arguments in strict mode"],
+      "too_many_arguments",           ["Too many arguments in function call (only 32766 allowed)"],
+      "too_many_parameters",          ["Too many parameters in function definition (only 32766 allowed)"],
+      "too_many_variables",           ["Too many variables declared (only 32767 allowed)"],
+      "strict_param_name",            ["Parameter name eval or arguments is not allowed in strict mode"],
+      "strict_param_dupe",            ["Strict mode function may not have duplicate parameter names"],
+      "strict_var_name",              ["Variable name may not be eval or arguments in strict mode"],
+      "strict_function_name",         ["Function name may not be eval or arguments in strict mode"],
+      "strict_octal_literal",         ["Octal literals are not allowed in strict mode."],
+      "strict_duplicate_property",    ["Duplicate data property in object literal not allowed in strict mode"],
+      "accessor_data_property",       ["Object literal may not have data and accessor property with the same name"],
+      "accessor_get_set",             ["Object literal may not have multiple get/set accessors with the same name"],
+      "strict_lhs_assignment",        ["Assignment to eval or arguments is not allowed in strict mode"],
+      "strict_lhs_postfix",           ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
+      "strict_lhs_prefix",            ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+      "strict_reserved_word",         ["Use of future reserved word in strict mode"],
+      "strict_delete",                ["Delete of an unqualified identifier in strict mode."],
+      "strict_delete_property",       ["Cannot delete property '", "%0", "' of ", "%1"],
+      "strict_const",                 ["Use of const in strict mode."],
+      "strict_function",              ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
+      "strict_read_only_property",    ["Cannot assign to read only property '", "%0", "' of ", "%1"],
+      "strict_cannot_assign",         ["Cannot assign to read only '", "%0", "' in strict mode"],
+      "strict_poison_pill",           ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
+      "strict_caller",                ["Illegal access to a strict mode caller function."],
+      "unprotected_let",              ["Illegal let declaration in unprotected statement context."],
+      "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
+      "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+    ];
+    var messages = { __proto__ : null };
+    var desc = new PropertyDescriptor();
+    desc.setConfigurable(false);
+    desc.setEnumerable(false);
+    desc.setWritable(false);
+    for (var i = 0; i < messagesDictionary.length; i += 2) {
+      var key = messagesDictionary[i];
+      var format = messagesDictionary[i + 1];
+      ObjectFreeze(format);
+      desc.setValue(format);
+      DefineOwnProperty(messages, key, desc);
+    }
+    %PreventExtensions(messages);
+    %IgnoreAttributesAndSetProperty(builtins, "kMessages",
+                                    messages,
+                                    DONT_DELETE | DONT_ENUM | READ_ONLY);
   }
   var message_type = %MessageGetType(message);
   var format = kMessages[message_type];
@@ -304,7 +324,7 @@
  * @return {number} 0 if input too small, -1 if input too large,
        else the line number.
  */
-Script.prototype.lineFromPosition = function(position) {
+function ScriptLineFromPosition(position) {
   var lower = 0;
   var upper = this.lineCount() - 1;
   var line_ends = this.line_ends;
@@ -343,8 +363,8 @@
  * @return {SourceLocation}
  *     If line is negative or not in the source null is returned.
  */
-Script.prototype.locationFromPosition = function (position,
-                                                  include_resource_offset) {
+function ScriptLocationFromPosition(position,
+                                    include_resource_offset) {
   var line = this.lineFromPosition(position);
   if (line == -1) return null;
 
@@ -352,7 +372,9 @@
   var line_ends = this.line_ends;
   var start = line == 0 ? 0 : line_ends[line - 1] + 1;
   var end = line_ends[line];
-  if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') end--;
+  if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
+    end--;
+  }
   var column = position - start;
 
   // Adjust according to the offset within the resource.
@@ -377,11 +399,12 @@
  * @param {number} opt_line The line within the source. Default value is 0
  * @param {number} opt_column The column in within the line. Default value is 0
  * @param {number} opt_offset_position The offset from the begining of the
- *     source from where the line and column calculation starts. Default value is 0
+ *     source from where the line and column calculation starts.
+ *     Default value is 0
  * @return {SourceLocation}
  *     If line is negative or not in the source null is returned.
  */
-Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_position) {
+function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
   // Default is the first line in the script. Lines in the script is relative
   // to the offset within the resource.
   var line = 0;
@@ -423,7 +446,7 @@
  * @return {SourceSlice} The source slice or null of the parameters where
  *     invalid
  */
-Script.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+function ScriptSourceSlice(opt_from_line, opt_to_line) {
   var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
   var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
 
@@ -450,7 +473,7 @@
 }
 
 
-Script.prototype.sourceLine = function (opt_line) {
+function ScriptSourceLine(opt_line) {
   // Default is the first line in the script. Lines in the script are relative
   // to the offset within the resource.
   var line = 0;
@@ -476,7 +499,7 @@
  * @return {number}
  *     Number of source lines.
  */
-Script.prototype.lineCount = function() {
+function ScriptLineCount() {
   // Return number of source lines.
   return this.line_ends.length;
 };
@@ -492,9 +515,10 @@
  * @return {?string} script name if present, value for //@ sourceURL comment
  * otherwise.
  */
-Script.prototype.nameOrSourceURL = function() {
-  if (this.name)
+function ScriptNameOrSourceURL() {
+  if (this.name) {
     return this.name;
+  }
   // TODO(608): the spaces in a regexp below had to be escaped as \040
   // because this file is being processed by js2c whose handling of spaces
   // in regexps is broken. Also, ['"] are excluded from allowed URLs to
@@ -520,6 +544,20 @@
 }
 
 
+SetUpLockedPrototype(Script,
+  $Array("source", "name", "line_ends", "line_offset", "column_offset"),
+  $Array(
+    "lineFromPosition", ScriptLineFromPosition,
+    "locationFromPosition", ScriptLocationFromPosition,
+    "locationFromLine", ScriptLocationFromLine,
+    "sourceSlice", ScriptSourceSlice,
+    "sourceLine", ScriptSourceLine,
+    "lineCount", ScriptLineCount,
+    "nameOrSourceURL", ScriptNameOrSourceURL
+  )
+);
+
+
 /**
  * Class for source location. A source location is a position within some
  * source with the following properties:
@@ -550,7 +588,6 @@
   this.end = end;
 }
 
-
 const kLineLengthLimit = 78;
 
 /**
@@ -561,7 +598,7 @@
  * @param {number} opt_before The number of characters to prefer before the
  *     position with a default value of 10 less that the limit
  */
-SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
+function SourceLocationRestrict(opt_limit, opt_before) {
   // Find the actual limit to use.
   var limit;
   var before;
@@ -608,11 +645,20 @@
  * @return {String}
  *     Source text for this location.
  */
-SourceLocation.prototype.sourceText = function () {
+function SourceLocationSourceText() {
   return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
 };
 
 
+SetUpLockedPrototype(SourceLocation,
+  $Array("script", "position", "line", "column", "start", "end"),
+  $Array(
+    "restrict", SourceLocationRestrict,
+    "sourceText", SourceLocationSourceText
+  )
+);
+
+
 /**
  * Class for a source slice. A source slice is a part of a script source with
  * the following properties:
@@ -639,19 +685,23 @@
   this.to_position = to_position;
 }
 
-
 /**
  * Get the source text for a SourceSlice
  * @return {String} Source text for this slice. The last line will include
  *     the line terminating characters (if any)
  */
-SourceSlice.prototype.sourceText = function () {
+function SourceSliceSourceText() {
   return %_CallFunction(this.script.source,
                         this.from_position,
                         this.to_position,
                         StringSubstring);
 };
 
+SetUpLockedPrototype(SourceSlice,
+  $Array("script", "from_line", "to_line", "from_position", "to_position"),
+  $Array("sourceText", SourceSliceSourceText)
+);
+
 
 // Returns the offset of the given position within the containing
 // line.
@@ -680,18 +730,24 @@
   // can't rely on 'this' being the same as 'obj'.
   var hasBeenSet = false;
   var value;
-  obj.__defineGetter__(name, function () {
+  function getter() {
     if (hasBeenSet) {
       return value;
     }
     hasBeenSet = true;
     value = fun(obj);
     return value;
-  });
-  obj.__defineSetter__(name, function (v) {
+  }
+  function setter(v) {
     hasBeenSet = true;
     value = v;
-  });
+  }
+  var desc = { get: getter,
+               set: setter,
+               enumerable: false,
+               configurable: true };
+  desc = ToPropertyDescriptor(desc);
+  DefineOwnProperty(obj, name, desc, true);
 }
 
 function CallSite(receiver, fun, pos) {
@@ -700,46 +756,49 @@
   this.pos = pos;
 }
 
-CallSite.prototype.getThis = function () {
+function CallSiteGetThis() {
   return this.receiver;
 };
 
-CallSite.prototype.getTypeName = function () {
+function CallSiteGetTypeName() {
   var constructor = this.receiver.constructor;
-  if (!constructor)
+  if (!constructor) {
     return %_CallFunction(this.receiver, ObjectToString);
+  }
   var constructorName = constructor.name;
-  if (!constructorName)
+  if (!constructorName) {
     return %_CallFunction(this.receiver, ObjectToString);
+  }
   return constructorName;
 };
 
-CallSite.prototype.isToplevel = function () {
-  if (this.receiver == null)
+function CallSiteIsToplevel() {
+  if (this.receiver == null) {
     return true;
+  }
   return IS_GLOBAL(this.receiver);
 };
 
-CallSite.prototype.isEval = function () {
+function CallSiteIsEval() {
   var script = %FunctionGetScript(this.fun);
   return script && script.compilation_type == COMPILATION_TYPE_EVAL;
 };
 
-CallSite.prototype.getEvalOrigin = function () {
+function CallSiteGetEvalOrigin() {
   var script = %FunctionGetScript(this.fun);
   return FormatEvalOrigin(script);
 };
 
-CallSite.prototype.getScriptNameOrSourceURL = function () {
+function CallSiteGetScriptNameOrSourceURL() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.nameOrSourceURL() : null;
 };
 
-CallSite.prototype.getFunction = function () {
+function CallSiteGetFunction() {
   return this.fun;
 };
 
-CallSite.prototype.getFunctionName = function () {
+function CallSiteGetFunctionName() {
   // See if the function knows its own name
   var name = this.fun.name;
   if (name) {
@@ -749,12 +808,13 @@
   }
   // Maybe this is an evaluation?
   var script = %FunctionGetScript(this.fun);
-  if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
+  if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
     return "eval";
+  }
   return null;
 };
 
-CallSite.prototype.getMethodName = function () {
+function CallSiteGetMethodName() {
   // See if we can find a unique property on the receiver that holds
   // this function.
   var ownName = this.fun.name;
@@ -772,24 +832,27 @@
         this.receiver.__lookupSetter__(prop) === this.fun ||
         (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
       // If we find more than one match bail out to avoid confusion.
-      if (name)
+      if (name) {
         return null;
+      }
       name = prop;
     }
   }
-  if (name)
+  if (name) {
     return name;
+  }
   return null;
 };
 
-CallSite.prototype.getFileName = function () {
+function CallSiteGetFileName() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.name : null;
 };
 
-CallSite.prototype.getLineNumber = function () {
-  if (this.pos == -1)
+function CallSiteGetLineNumber() {
+  if (this.pos == -1) {
     return null;
+  }
   var script = %FunctionGetScript(this.fun);
   var location = null;
   if (script) {
@@ -798,9 +861,10 @@
   return location ? location.line + 1 : null;
 };
 
-CallSite.prototype.getColumnNumber = function () {
-  if (this.pos == -1)
+function CallSiteGetColumnNumber() {
+  if (this.pos == -1) {
     return null;
+  }
   var script = %FunctionGetScript(this.fun);
   var location = null;
   if (script) {
@@ -809,26 +873,47 @@
   return location ? location.column + 1: null;
 };
 
-CallSite.prototype.isNative = function () {
+function CallSiteIsNative() {
   var script = %FunctionGetScript(this.fun);
   return script ? (script.type == TYPE_NATIVE) : false;
 };
 
-CallSite.prototype.getPosition = function () {
+function CallSiteGetPosition() {
   return this.pos;
 };
 
-CallSite.prototype.isConstructor = function () {
+function CallSiteIsConstructor() {
   var constructor = this.receiver ? this.receiver.constructor : null;
-  if (!constructor)
+  if (!constructor) {
     return false;
+  }
   return this.fun === constructor;
 };
 
+SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
+  "getThis", CallSiteGetThis,
+  "getTypeName", CallSiteGetTypeName,
+  "isToplevel", CallSiteIsToplevel,
+  "isEval", CallSiteIsEval,
+  "getEvalOrigin", CallSiteGetEvalOrigin,
+  "getScriptNameOrSourceURL", CallSiteGetScriptNameOrSourceURL,
+  "getFunction", CallSiteGetFunction,
+  "getFunctionName", CallSiteGetFunctionName,
+  "getMethodName", CallSiteGetMethodName,
+  "getFileName", CallSiteGetFileName,
+  "getLineNumber", CallSiteGetLineNumber,
+  "getColumnNumber", CallSiteGetColumnNumber,
+  "isNative", CallSiteIsNative,
+  "getPosition", CallSiteGetPosition,
+  "isConstructor", CallSiteIsConstructor
+));
+
+
 function FormatEvalOrigin(script) {
   var sourceURL = script.nameOrSourceURL();
-  if (sourceURL)
+  if (sourceURL) {
     return sourceURL;
+  }
 
   var eval_origin = "eval at ";
   if (script.eval_from_function_name) {
@@ -965,66 +1050,13 @@
   }
 }
 
-function DefineError(f) {
-  // Store the error function in both the global object
-  // and the runtime object. The function is fetched
-  // from the runtime object when throwing errors from
-  // within the runtime system to avoid strange side
-  // effects when overwriting the error functions from
-  // user code.
-  var name = f.name;
-  %SetProperty(global, name, f, DONT_ENUM);
-  this['$' + name] = f;
-  // Configure the error function.
-  if (name == 'Error') {
-    // The prototype of the Error object must itself be an error.
-    // However, it can't be an instance of the Error object because
-    // it hasn't been properly configured yet.  Instead we create a
-    // special not-a-true-error-but-close-enough object.
-    function ErrorPrototype() {}
-    %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
-    %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
-    %FunctionSetPrototype(f, new ErrorPrototype());
-  } else {
-    %FunctionSetPrototype(f, new $Error());
-  }
-  %FunctionSetInstanceClassName(f, 'Error');
-  %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
-  // The name property on the prototype of error objects is not
-  // specified as being read-one and dont-delete. However, allowing
-  // overwriting allows leaks of error objects between script blocks
-  // in the same context in a browser setting. Therefore we fix the
-  // name.
-  %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
-  %SetCode(f, function(m) {
-    if (%_IsConstructCall()) {
-      // Define all the expected properties directly on the error
-      // object. This avoids going through getters and setters defined
-      // on prototype objects.
-      %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
-      %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
-      %IgnoreAttributesAndSetProperty(this, 'type', void 0);
-      if (m === kAddMessageAccessorsMarker) {
-        // DefineOneShotAccessor always inserts a message property and
-        // ignores setters.
-        DefineOneShotAccessor(this, 'message', function (obj) {
-            return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
-        });
-      } else if (!IS_UNDEFINED(m)) {
-        %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
-      }
-      captureStackTrace(this, f);
-    } else {
-      return new f(m);
-    }
-  });
-}
 
 function captureStackTrace(obj, cons_opt) {
   var stackTraceLimit = $Error.stackTraceLimit;
   if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
-  if (stackTraceLimit < 0 || stackTraceLimit > 10000)
+  if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
     stackTraceLimit = 10000;
+  }
   var raw_stack = %CollectStackTrace(cons_opt
                                      ? cons_opt
                                      : captureStackTrace, stackTraceLimit);
@@ -1033,55 +1065,123 @@
   });
 };
 
-$Math.__proto__ = global.Object.prototype;
 
-DefineError(function Error() { });
-DefineError(function TypeError() { });
-DefineError(function RangeError() { });
-DefineError(function SyntaxError() { });
-DefineError(function ReferenceError() { });
-DefineError(function EvalError() { });
-DefineError(function URIError() { });
+function SetUpError() {
+  // Define special error type constructors.
+
+  function DefineError(f) {
+    // Store the error function in both the global object
+    // and the runtime object. The function is fetched
+    // from the runtime object when throwing errors from
+    // within the runtime system to avoid strange side
+    // effects when overwriting the error functions from
+    // user code.
+    var name = f.name;
+    %SetProperty(global, name, f, DONT_ENUM);
+    %SetProperty(builtins, '$' + name, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
+    // Configure the error function.
+    if (name == 'Error') {
+      // The prototype of the Error object must itself be an error.
+      // However, it can't be an instance of the Error object because
+      // it hasn't been properly configured yet.  Instead we create a
+      // special not-a-true-error-but-close-enough object.
+      function ErrorPrototype() {}
+      %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
+      %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
+      %FunctionSetPrototype(f, new ErrorPrototype());
+    } else {
+      %FunctionSetPrototype(f, new $Error());
+    }
+    %FunctionSetInstanceClassName(f, 'Error');
+    %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
+    // The name property on the prototype of error objects is not
+    // specified as being read-one and dont-delete. However, allowing
+    // overwriting allows leaks of error objects between script blocks
+    // in the same context in a browser setting. Therefore we fix the
+    // name.
+    %SetProperty(f.prototype, "name", name,
+                 DONT_ENUM | DONT_DELETE | READ_ONLY)  ;
+    %SetCode(f, function(m) {
+      if (%_IsConstructCall()) {
+        // Define all the expected properties directly on the error
+        // object. This avoids going through getters and setters defined
+        // on prototype objects.
+        %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
+        %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
+        %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
+        if (m === kAddMessageAccessorsMarker) {
+          // DefineOneShotAccessor always inserts a message property and
+          // ignores setters.
+          DefineOneShotAccessor(this, 'message', function (obj) {
+              return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
+          });
+        } else if (!IS_UNDEFINED(m)) {
+          %IgnoreAttributesAndSetProperty(this,
+                                          'message',
+                                          ToString(m),
+                                          DONT_ENUM);
+        }
+        captureStackTrace(this, f);
+      } else {
+        return new f(m);
+      }
+    });
+  }
+
+  DefineError(function Error() { });
+  DefineError(function TypeError() { });
+  DefineError(function RangeError() { });
+  DefineError(function SyntaxError() { });
+  DefineError(function ReferenceError() { });
+  DefineError(function EvalError() { });
+  DefineError(function URIError() { });
+}
+
+SetUpError();
 
 $Error.captureStackTrace = captureStackTrace;
 
-// Setup extra properties of the Error.prototype object.
-$Error.prototype.message = '';
+%SetProperty($Error.prototype, 'message', '', DONT_ENUM);
 
 // Global list of error objects visited during errorToString. This is
 // used to detect cycles in error toString formatting.
-var visited_errors = new $Array();
-var cyclic_error_marker = new $Object();
+const visited_errors = new InternalArray();
+const cyclic_error_marker = new $Object();
 
-function errorToStringDetectCycle() {
-  if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker;
+function errorToStringDetectCycle(error) {
+  if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
-    var type = this.type;
-    if (type && !%_CallFunction(this, "message", ObjectHasOwnProperty)) {
-      var formatted = FormatMessage(%NewMessageObject(type, this.arguments));
-      return this.name + ": " + formatted;
+    var type = error.type;
+    var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
+    if (type && !hasMessage) {
+      var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
+      return error.name + ": " + formatted;
     }
-    var message = %_CallFunction(this, "message", ObjectHasOwnProperty)
-        ? (": " + this.message)
-        : "";
-    return this.name + message;
+    var message = hasMessage ? (": " + error.message) : "";
+    return error.name + message;
   } finally {
     visited_errors.length = visited_errors.length - 1;
   }
 }
 
 function errorToString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Error.prototype.toString"]);
+  }
   // This helper function is needed because access to properties on
   // the builtins object do not work inside of a catch clause.
   function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
 
   try {
-    return %_CallFunction(this, errorToStringDetectCycle);
+    return errorToStringDetectCycle(this);
   } catch(e) {
     // If this error message was encountered already return the empty
     // string for it instead of recursively formatting it.
-    if (isCyclicErrorMarker(e)) return '';
-    else throw e;
+    if (isCyclicErrorMarker(e)) {
+      return '';
+    }
+    throw e;
   }
 }
 
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index f7453d1..c4c4fd2 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -45,7 +45,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
 
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
@@ -80,11 +80,23 @@
 
 
 // -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
 
 void RelocInfo::apply(intptr_t delta) {
-  // On MIPS we do not use pc relative addressing, so we don't need to patch the
-  // code here.
+  if (IsCodeTarget(rmode_)) {
+    uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
+    uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
+
+    if (scope1 != scope2) {
+      Assembler::JumpLabelToJumpRegister(pc_);
+    }
+  }
+  if (IsInternalReference(rmode_)) {
+    // Absolute code pointer inside code object moves with the code object.
+    byte* p = reinterpret_cast<byte*>(pc_);
+    int count = Assembler::RelocateInternalReference(p, delta);
+    CPU::FlushICache(p, count * sizeof(uint32_t));
+  }
 }
 
 
@@ -95,24 +107,8 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
-  // Read the address of the word containing the target_address in an
-  // instruction stream.
-  // The only architecture-independent user of this function is the serializer.
-  // The serializer uses it to find out how many raw bytes of instruction to
-  // output before the next target.
-  // For an instructions like LUI/ORI where the target bits are mixed into the
-  // instruction bits, the size of the target will be zero, indicating that the
-  // serializer should not step forward in memory after a target is resolved
-  // and written.  In this case the target_address_address function should
-  // return the end of the instructions to be patched, allowing the
-  // deserializer to deserialize the instructions as raw bytes and put them in
-  // place, ready to be patched with the target. In our case, that is the
-  // address of the instruction that follows LUI/ORI instruction pair.
-  return reinterpret_cast<Address>(
-    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
 }
 
 
@@ -144,12 +140,9 @@
   // Provide a "natural pointer" to the embedded object,
   // which can be de-referenced during heap iteration.
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_obj_ptr_ =
-  //   reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-  // return &reconstructed_obj_ptr_;
-  return NULL;
+  reconstructed_obj_ptr_ =
+      reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return &reconstructed_obj_ptr_;
 }
 
 
@@ -161,11 +154,8 @@
 
 Address* RelocInfo::target_reference_address() {
   ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
-  // return &reconstructed_adr_ptr_;
-  return NULL;
+  reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+  return &reconstructed_adr_ptr_;
 }
 
 
@@ -236,8 +226,9 @@
   Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
   bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
                          (instr1 & kOpcodeMask) == ORI &&
-                         (instr2 & kOpcodeMask) == SPECIAL &&
-                         (instr2 & kFunctionFieldMask) == JALR);
+                         ((instr2 & kOpcodeMask) == JAL ||
+                          ((instr2 & kOpcodeMask) == SPECIAL &&
+                           (instr2 & kFunctionFieldMask) == JALR)));
   return patched_return;
 }
 
@@ -251,26 +242,23 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    // RelocInfo is needed when pointer must be updated/serialized, such as
-    // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
-    // It is ignored by visitors that do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // GC won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitPointer(target_object_address(), this);
+    Object** p = target_object_address();
+    Object* orig = *p;
+    visitor->VisitPointer(p);
+    if (*p != orig) {
+      set_target_object(*p);
+    }
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    // RelocInfo is needed when external-references must be serialized by
-    // Serializer Visitor in serialize.cc. It is ignored by visitors that
-    // do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // Serializer won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitExternalReference(target_reference_address(), this);
+    visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
                IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
+              (RelocInfo::IsDebugBreakSlot(mode) &&
                IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
@@ -287,7 +275,9 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -296,7 +286,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
@@ -305,7 +295,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
 
 
 void Assembler::CheckBuffer() {
@@ -323,7 +313,9 @@
 
 
 void Assembler::emit(Instr x) {
-  CheckBuffer();
+  if (!is_buffer_growth_blocked()) {
+    CheckBuffer();
+  }
   *reinterpret_cast<Instr*>(pc_) = x;
   pc_ += kInstrSize;
   CheckTrampolinePoolQuick();
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 7d00da1..e01a0ca 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #include "v8.h"
@@ -43,13 +43,53 @@
 namespace v8 {
 namespace internal {
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
+// can be defined to enable FPU instructions when building the
+// snapshot.
+static uint64_t CpuFeaturesImpliedByCompiler() {
+  uint64_t answer = 0;
+#ifdef CAN_USE_FPU_INSTRUCTIONS
+  answer |= 1u << FPU;
+#endif  // def CAN_USE_FPU_INSTRUCTIONS
+
+#ifdef __mips__
+  // If the compiler is allowed to use FPU then we can use FPU too in our code
+  // generation even when generating snapshots.  This won't work for cross
+  // compilation.
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+  answer |= 1u << FPU;
+#endif  // defined(__mips_hard_float) && __mips_hard_float != 0
+#endif  // def __mips__
+
+  return answer;
 }
 
-void CpuFeatures::Probe(bool portable) {
+
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
+
+  // Get the features implied by the OS and the compiler settings. This is the
+  // minimal set of features which is also allowed for generated code in the
+  // snapshot.
+  supported_ |= OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= CpuFeaturesImpliedByCompiler();
+
+  if (Serializer::enabled()) {
+    // No probing for features if we might serialize (generate snapshot).
+    return;
+  }
+
   // If the compiler is allowed to use fpu then we can use fpu too in our
   // code generation.
 #if !defined(__mips__)
@@ -58,19 +98,13 @@
       supported_ |= 1u << FPU;
   }
 #else
-  if (portable && Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
-    return;  // No features if we might serialize.
-  }
-
+  // Probe for additional features not already known to be available.
   if (OS::MipsCpuHasFeature(FPU)) {
     // This implementation also sets the FPU flags if
     // runtime detection of FPU returns true.
     supported_ |= 1u << FPU;
     found_by_runtime_probing_ |= 1u << FPU;
   }
-
-  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -138,7 +172,8 @@
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfo.
 
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+                                  1 << RelocInfo::INTERNAL_REFERENCE;
 
 
 bool RelocInfo::IsCodedSpecially() {
@@ -235,12 +270,10 @@
 static const int kMinimalBufferSize = 4 * KB;
 
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
-      allow_peephole_optimization_(false) {
-  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
-  allow_peephole_optimization_ = FLAG_peephole_optimization;
+      emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -275,14 +308,24 @@
   last_trampoline_pool_end_ = 0;
   no_trampoline_pool_before_ = 0;
   trampoline_pool_blocked_nesting_ = 0;
-  next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+  // We leave space (16 * kTrampolineSlotsSize)
+  // for BlockTrampolinePoolScope buffer.
+  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+  internal_trampoline_exception_ = false;
+  last_bound_pos_ = 0;
+
+  trampoline_emitted_ = false;
+  unbound_labels_count_ = 0;
+  block_buffer_growth_ = false;
+
+  ClearRecordedAstId();
 }
 
 
 Assembler::~Assembler() {
   if (own_buffer_) {
     if (isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
+        buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
       DeleteArray(buffer_);
@@ -316,13 +359,92 @@
 }
 
 
-Register Assembler::GetRt(Instr instr) {
+Register Assembler::GetRtReg(Instr instr) {
   Register rt;
-  rt.code_ = (instr & kRtMask) >> kRtShift;
+  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
   return rt;
 }
 
 
+Register Assembler::GetRsReg(Instr instr) {
+  Register rs;
+  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+  return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+  Register rd;
+  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+  return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+  return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+  return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+  return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+  return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+  return  (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+  return  instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+  return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+  return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+  return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetFunction(Instr instr) {
+  return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+  return instr & kFunctionFieldMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+  return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+  return instr & ~kImm16Mask;
+}
+
+
 bool Assembler::IsPop(Instr instr) {
   return (instr & ~kRtMask) == kPopRegPattern;
 }
@@ -371,13 +493,15 @@
 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
 
 const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
 
 
 bool Assembler::IsBranch(Instr instr) {
-  uint32_t opcode   = ((instr & kOpcodeMask));
-  uint32_t rt_field = ((instr & kRtFieldMask));
-  uint32_t rs_field = ((instr & kRsFieldMask));
-  uint32_t label_constant = (instr & ~kImm16Mask);
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rs_field = GetRsField(instr);
+  uint32_t label_constant = GetLabelConst(instr);
   // Checks if the instruction is a branch.
   return opcode == BEQ ||
       opcode == BNE ||
@@ -386,7 +510,7 @@
       opcode == BEQL ||
       opcode == BNEL ||
       opcode == BLEZL ||
-      opcode == BGTZL||
+      opcode == BGTZL ||
       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
@@ -394,13 +518,69 @@
 }
 
 
+bool Assembler::IsBeq(Instr instr) {
+  return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+  return GetOpcodeField(instr) == BNE;
+}
+
+
+bool Assembler::IsJump(Instr instr) {
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rd_field = GetRdField(instr);
+  uint32_t function_field = GetFunctionField(instr);
+  // Checks if the instruction is a jump.
+  return opcode == J || opcode == JAL ||
+      (opcode == SPECIAL && rt_field == 0 &&
+      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a jump.
+  return opcode == J;
+}
+
+
+bool Assembler::IsJal(Instr instr) {
+  return GetOpcodeField(instr) == JAL;
+}
+
+bool Assembler::IsJr(Instr instr) {
+  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+}
+
+bool Assembler::IsJalr(Instr instr) {
+  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a load upper immediate.
+  return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a load upper immediate.
+  return opcode == ORI;
+}
+
+
 bool Assembler::IsNop(Instr instr, unsigned int type) {
   // See Assembler::nop(type).
   ASSERT(type < 32);
-  uint32_t opcode = ((instr & kOpcodeMask));
-  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
-  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
-  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+  uint32_t opcode = GetOpcodeField(instr);
+  uint32_t rt = GetRt(instr);
+  uint32_t rs = GetRs(instr);
+  uint32_t sa = GetSa(instr);
 
   // nop(type) == sll(zero_reg, zero_reg, type);
   // Technically all these values will be 0 but
@@ -465,6 +645,11 @@
 }
 
 
+bool Assembler::IsAndImmediate(Instr instr) {
+  return GetOpcodeField(instr) == ANDI;
+}
+
+
 int Assembler::target_at(int32_t pos) {
   Instr instr = instr_at(pos);
   if ((instr & ~kImm16Mask) == 0) {
@@ -476,17 +661,47 @@
        return (imm18 + pos);
      }
   }
-  // Check we have a branch instruction.
-  ASSERT(IsBranch(instr));
+  // Check we have a branch or jump instruction.
+  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
   // the compiler uses arithmectic shifts for signed integers.
-  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+  if (IsBranch(instr)) {
+    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
 
-  if (imm18 == kEndOfChain) {
-    // EndOfChain sentinel is returned directly, not relative to pc or pos.
-    return kEndOfChain;
+    if (imm18 == kEndOfChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      return pos + kBranchPCOffset + imm18;
+    }
+  } else if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+    ASSERT(IsOri(instr_ori));
+    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+
+    if (imm == kEndOfJumpChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+      int32_t delta = instr_address - imm;
+      ASSERT(pos > delta);
+      return pos - delta;
+    }
   } else {
-    return pos + kBranchPCOffset + imm18;
+    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+    if (imm28 == kEndOfJumpChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+      instr_address &= kImm28Mask;
+      int32_t delta = instr_address - imm28;
+      ASSERT(pos > delta);
+      return pos - delta;
+    }
   }
 }
 
@@ -501,15 +716,41 @@
     return;
   }
 
-  ASSERT(IsBranch(instr));
-  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
-  ASSERT((imm18 & 3) == 0);
+  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+  if (IsBranch(instr)) {
+    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+    ASSERT((imm18 & 3) == 0);
 
-  instr &= ~kImm16Mask;
-  int32_t imm16 = imm18 >> 2;
-  ASSERT(is_int16(imm16));
+    instr &= ~kImm16Mask;
+    int32_t imm16 = imm18 >> 2;
+    ASSERT(is_int16(imm16));
 
-  instr_at_put(pos, instr | (imm16 & kImm16Mask));
+    instr_at_put(pos, instr | (imm16 & kImm16Mask));
+  } else if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+    ASSERT(IsOri(instr_ori));
+    uint32_t imm = (uint32_t)buffer_ + target_pos;
+    ASSERT((imm & 3) == 0);
+
+    instr_lui &= ~kImm16Mask;
+    instr_ori &= ~kImm16Mask;
+
+    instr_at_put(pos + 0 * Assembler::kInstrSize,
+                 instr_lui | ((imm & kHiMask) >> kLuiShift));
+    instr_at_put(pos + 1 * Assembler::kInstrSize,
+                 instr_ori | (imm & kImm16Mask));
+  } else {
+    uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+    imm28 &= kImm28Mask;
+    ASSERT((imm28 & 3) == 0);
+
+    instr &= ~kImm26Mask;
+    uint32_t imm26 = imm28 >> 2;
+    ASSERT(is_uint26(imm26));
+
+    instr_at_put(pos, instr | (imm26 & kImm26Mask));
+  }
 }
 
 
@@ -539,28 +780,33 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
+  int32_t trampoline_pos = kInvalidSlotPos;
+  if (L->is_linked() && !trampoline_emitted_) {
+    unbound_labels_count_--;
+    next_buffer_check_ += kTrampolineSlotsSize;
+  }
+
   while (L->is_linked()) {
     int32_t fixup_pos = L->pos();
     int32_t dist = pos - fixup_pos;
     next(L);  // Call next before overwriting link with target at fixup_pos.
-    if (dist > kMaxBranchOffset) {
-      do {
-        int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+    Instr instr = instr_at(fixup_pos);
+    if (IsBranch(instr)) {
+      if (dist > kMaxBranchOffset) {
+        if (trampoline_pos == kInvalidSlotPos) {
+          trampoline_pos = get_trampoline_entry(fixup_pos);
+          CHECK(trampoline_pos != kInvalidSlotPos);
+        }
         ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
         dist = pos - fixup_pos;
-      } while (dist > kMaxBranchOffset);
-    } else if (dist < -kMaxBranchOffset) {
-      do {
-        int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
-        ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
-        target_at_put(fixup_pos, trampoline_pos);
-        fixup_pos = trampoline_pos;
-        dist = pos - fixup_pos;
-      } while (dist < -kMaxBranchOffset);
-    };
-    target_at_put(fixup_pos, pos);
+      }
+      target_at_put(fixup_pos, pos);
+    } else {
+      ASSERT(IsJ(instr) || IsLui(instr));
+      target_at_put(fixup_pos, pos);
+    }
   }
   L->bind_to(pos);
 
@@ -571,27 +817,6 @@
 }
 
 
-void Assembler::link_to(Label* L, Label* appendix) {
-  if (appendix->is_linked()) {
-    if (L->is_linked()) {
-      // Append appendix to L's list.
-      int fixup_pos;
-      int link = L->pos();
-      do {
-        fixup_pos = link;
-        link = target_at(fixup_pos);
-      } while (link > 0);
-      ASSERT(link == kEndOfChain);
-      target_at_put(fixup_pos, appendix->pos());
-    } else {
-      // L is empty, simply use appendix.
-      *L = *appendix;
-    }
-  }
-  appendix->Unuse();  // Appendix should not be used anymore.
-}
-
-
 void Assembler::bind(Label* L) {
   ASSERT(!L->is_bound());  // Label can only be bound once.
   bind_to(L, pc_offset());
@@ -601,14 +826,20 @@
 void Assembler::next(Label* L) {
   ASSERT(L->is_linked());
   int link = target_at(L->pos());
-  ASSERT(link > 0 || link == kEndOfChain);
   if (link == kEndOfChain) {
     L->Unuse();
-  } else if (link > 0) {
+  } else {
+    ASSERT(link >= 0);
     L->link_to(link);
   }
 }
 
+bool Assembler::is_near(Label* L) {
+  if (L->is_bound()) {
+    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+  }
+  return false;
+}
 
 // We have to use a temporary register for things that can be relocated even
 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
@@ -652,7 +883,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
       | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -666,7 +897,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (rt.code() << kRtShift)
       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -679,7 +910,7 @@
                                  FPUControlRegister fs,
                                  SecondaryField func) {
   ASSERT(fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr =
       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   emit(instr);
@@ -714,16 +945,15 @@
                                   FPURegister ft,
                                   int32_t j) {
   ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
       | (j & kImm16Mask);
   emit(instr);
 }
 
 
-// Registers are in the order of the instruction encoding, from left to right.
 void Assembler::GenInstrJump(Opcode opcode,
-                              uint32_t address) {
+                             uint32_t address) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   ASSERT(is_uint26(address));
   Instr instr = opcode | address;
@@ -732,106 +962,60 @@
 }
 
 
-// Returns the next free label entry from the next trampoline pool.
-int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
-  int trampoline_count = trampolines_.length();
-  int32_t label_entry = 0;
-  ASSERT(trampoline_count > 0);
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+  int32_t trampoline_entry = kInvalidSlotPos;
 
-  if (next_pool) {
-    for (int i = 0; i < trampoline_count; i++) {
-      if (trampolines_[i].start() > pos) {
-       label_entry = trampolines_[i].take_label();
-       break;
-      }
+  if (!internal_trampoline_exception_) {
+    if (trampoline_.start() > pos) {
+     trampoline_entry = trampoline_.take_slot();
     }
-  } else {  //  Caller needs a label entry from the previous pool.
-    for (int i = trampoline_count-1; i >= 0; i--) {
-      if (trampolines_[i].end() < pos) {
-       label_entry = trampolines_[i].take_label();
-       break;
-      }
-    }
-  }
-  return label_entry;
-}
 
-
-// Returns the next free trampoline entry from the next trampoline pool.
-int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
-  int trampoline_count = trampolines_.length();
-  int32_t trampoline_entry = 0;
-  ASSERT(trampoline_count > 0);
-
-  if (next_pool) {
-    for (int i = 0; i < trampoline_count; i++) {
-      if (trampolines_[i].start() > pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
-      }
-    }
-  } else {  // Caller needs a trampoline entry from the previous pool.
-    for (int i = trampoline_count-1; i >= 0; i--) {
-      if (trampolines_[i].end() < pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
-      }
+    if (kInvalidSlotPos == trampoline_entry) {
+      internal_trampoline_exception_ = true;
     }
   }
   return trampoline_entry;
 }
 
 
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+uint32_t Assembler::jump_address(Label* L) {
   int32_t target_pos;
-  int32_t pc_offset_v = pc_offset();
 
   if (L->is_bound()) {
     target_pos = L->pos();
-    int32_t dist = pc_offset_v - target_pos;
-    if (dist > kMaxBranchOffset) {
-      do {
-        int32_t trampoline_pos = get_trampoline_entry(target_pos);
-        ASSERT((trampoline_pos - target_pos) > 0);
-        ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
-        target_at_put(trampoline_pos, target_pos);
-        target_pos = trampoline_pos;
-        dist = pc_offset_v - target_pos;
-      } while (dist > kMaxBranchOffset);
-    } else if (dist < -kMaxBranchOffset) {
-      do {
-        int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
-        ASSERT((target_pos - trampoline_pos) > 0);
-        ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
-        target_at_put(trampoline_pos, target_pos);
-        target_pos = trampoline_pos;
-        dist = pc_offset_v - target_pos;
-      } while (dist < -kMaxBranchOffset);
-    }
   } else {
     if (L->is_linked()) {
       target_pos = L->pos();  // L's link.
-      int32_t dist = pc_offset_v - target_pos;
-      if (dist > kMaxBranchOffset) {
-        do {
-          int32_t label_pos = get_label_entry(target_pos);
-          ASSERT((label_pos - target_pos) < kMaxBranchOffset);
-          label_at_put(L, label_pos);
-          target_pos = label_pos;
-          dist = pc_offset_v - target_pos;
-        } while (dist > kMaxBranchOffset);
-      } else if (dist < -kMaxBranchOffset) {
-        do {
-          int32_t label_pos = get_label_entry(target_pos, false);
-          ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
-          label_at_put(L, label_pos);
-          target_pos = label_pos;
-          dist = pc_offset_v - target_pos;
-        } while (dist < -kMaxBranchOffset);
-      }
       L->link_to(pc_offset());
     } else {
       L->link_to(pc_offset());
+      return kEndOfJumpChain;
+    }
+  }
+
+  uint32_t imm = (uint32_t)buffer_ + target_pos;
+  ASSERT((imm & 3) == 0);
+
+  return imm;
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
       return kEndOfChain;
     }
   }
@@ -860,6 +1044,10 @@
     } else {
       target_pos = kEndOfChain;
       instr_at_put(at_offset, 0);
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
     }
     L->link_to(at_offset);
   }
@@ -938,7 +1126,12 @@
 
 
 void Assembler::j(int32_t target) {
-  ASSERT(is_uint28(target) && ((target & 3) == 0));
+#if DEBUG
+  // Get pc of delay slot.
+  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
+  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+  ASSERT(in_range && ((target & 3) == 0));
+#endif
   GenInstrJump(J, target >> 2);
 }
 
@@ -954,8 +1147,13 @@
 
 
 void Assembler::jal(int32_t target) {
+#ifdef DEBUG
+  // Get pc of delay slot.
+  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
+  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+  ASSERT(in_range && ((target & 3) == 0));
+#endif
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(is_uint28(target) && ((target & 3) == 0));
   GenInstrJump(JAL, target >> 2);
 }
 
@@ -968,6 +1166,32 @@
 }
 
 
+void Assembler::j_or_jr(int32_t target, Register rs) {
+  // Get pc of delay slot.
+  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
+  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+
+  if (in_range) {
+      j(target);
+  } else {
+      jr(t9);
+  }
+}
+
+
+void Assembler::jal_or_jalr(int32_t target, Register rs) {
+  // Get pc of delay slot.
+  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
+  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+
+  if (in_range) {
+      jal(target);
+  } else {
+      jalr(t9);
+  }
+}
+
+
 //-------Data-processing-instructions---------
 
 // Arithmetic.
@@ -979,157 +1203,6 @@
 
 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   GenInstrImmediate(ADDIU, rs, rd, j);
-
-  // Eliminate pattern: push(r), pop().
-  //   addiu(sp, sp, Operand(-kPointerSize));
-  //   sw(src, MemOperand(sp, 0);
-  //   addiu(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  if (can_peephole_optimize(3) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
-      (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
-    pc_ -= 3 * kInstrSize;
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
-
-  // Eliminate pattern: push(ry), pop(rx).
-  //   addiu(sp, sp, -kPointerSize)
-  //   sw(ry, MemOperand(sp, 0)
-  //   lw(rx, MemOperand(sp, 0)
-  //   addiu(sp, sp, kPointerSize);
-  // Both instructions can be eliminated if ry = rx.
-  // If ry != rx, a register copy from ry to rx is inserted
-  // after eliminating the push and the pop instructions.
-  if (can_peephole_optimize(4)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
-    Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(push_instr) &&
-        IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
-        // For consecutive push and pop on different registers,
-        // we delete both the push & pop and insert a register move.
-        // push ry, pop rx --> mov rx, ry.
-        Register reg_pushed, reg_popped;
-        reg_pushed = GetRt(push_instr);
-        reg_popped = GetRt(pop_instr);
-        pc_ -= 4 * kInstrSize;
-        // Insert a mov instruction, which is better than a pair of push & pop.
-        or_(reg_popped, reg_pushed, zero_reg);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
-                 pc_offset());
-        }
-      } else {
-        // For consecutive push and pop on the same register,
-        // both the push and the pop can be deleted.
-        pc_ -= 4 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(5)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
-    Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(mem_write_instr) &&
-        pre_push_sp_set == kPushInstruction &&
-        IsPop(mem_read_instr) &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((IsLwRegFpOffset(lw_instr) ||
-        IsLwRegFpNegOffset(lw_instr))) {
-        if ((mem_write_instr & kRtMask) ==
-              (mem_read_instr & kRtMask)) {
-          // Pattern: push & pop from/to same register,
-          // with a fp+offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp, -4
-          // sw rx, [sp, #0]!
-          // lw rz, [fp, #-24]
-          // lw rx, [sp, 0],
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(rx == rz)
-          //   delete all
-          // else
-          //   lw rz, [fp, #-24]
-
-          if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
-            pc_ -= 5 * kInstrSize;
-          } else {
-            pc_ -= 5 * kInstrSize;
-            // Reinsert back the lw rz.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
-          }
-        } else {
-          // Pattern: push & pop from/to different registers
-          // with a fp + offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp ,-4
-          // sw rx, [sp, 0]
-          // lw rz, [fp, #-24]
-          // lw ry, [sp, 0]
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(ry == rz)
-          //   mov ry, rx;
-          // else if(rx != rz)
-          //   lw rz, [fp, #-24]
-          //   mov ry, rx
-          // else if((ry != rz) || (rx == rz)) becomes:
-          //   mov ry, rx
-          //   lw rz, [fp, #-24]
-
-          Register reg_pushed, reg_popped;
-          if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if ((mem_write_instr & kRtMask)
-                                != (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            emit(lw_instr);
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if (((mem_read_instr & kRtMask)
-                                     != (lw_instr & kRtMask)) ||
-                    ((mem_write_instr & kRtMask)
-                                     == (lw_instr & kRtMask)) ) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
-          }
-        }
-      }
-    }
-  }
 }
 
 
@@ -1317,54 +1390,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
   }
-
-  if (can_peephole_optimize(2)) {
-    Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if ((IsSwRegFpOffset(sw_instr) &&
-         IsLwRegFpOffset(lw_instr)) ||
-       (IsSwRegFpNegOffset(sw_instr) &&
-         IsLwRegFpNegOffset(lw_instr))) {
-      if ((lw_instr & kLwSwInstrArgumentMask) ==
-            (sw_instr & kLwSwInstrArgumentMask)) {
-        // Pattern: Lw/sw same fp+offset, same register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw rx, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-
-        pc_ -= 1 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
-        }
-      } else if ((lw_instr & kLwSwOffsetMask) ==
-                 (sw_instr & kLwSwOffsetMask)) {
-        // Pattern: Lw/sw same fp+offset, different register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw ry, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-        // mov ry, rx
-
-        Register reg_stored, reg_loaded;
-        reg_stored = GetRt(sw_instr);
-        reg_loaded = GetRt(lw_instr);
-        pc_ -= 1 * kInstrSize;
-        // Insert a mov instruction, which is better than lw.
-        or_(reg_loaded, reg_stored, zero_reg);  // Move instruction.
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
-        }
-      }
-    }
-  }
 }
 
 
@@ -1405,23 +1430,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
   }
-
-  // Eliminate pattern: pop(), push(r).
-  //     addiu sp, sp, Operand(kPointerSize);
-  //     addiu sp, sp, Operand(-kPointerSize);
-  // ->  sw r, MemOpernad(sp, 0);
-  if (can_peephole_optimize(3) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) ==
-       (kPushRegPattern | (rd.code() << kRtShift)) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
-     instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
-    pc_ -= 3 * kInstrSize;
-    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1443,13 +1451,37 @@
 //-------------Misc-instructions--------------
 
 // Break / Trap instructions.
-void Assembler::break_(uint32_t code) {
+void Assembler::break_(uint32_t code, bool break_as_stop) {
   ASSERT((code & ~0xfffff) == 0);
+  // We need to invalidate breaks that could be stops as well because the
+  // simulator expects a char pointer after the stop instruction.
+  // See constants-mips.h for explanation.
+  ASSERT((break_as_stop &&
+          code <= kMaxStopCode &&
+          code > kMaxWatchpointCode) ||
+         (!break_as_stop &&
+          (code > kMaxStopCode ||
+           code <= kMaxWatchpointCode)));
   Instr break_instr = SPECIAL | BREAK | (code << 6);
   emit(break_instr);
 }
 
 
+void Assembler::stop(const char* msg, uint32_t code) {
+  ASSERT(code > kMaxWatchpointCode);
+  ASSERT(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS)
+  break_(0x54321);
+#else  // V8_HOST_ARCH_MIPS
+  BlockTrampolinePoolFor(2);
+  // The Simulator will handle the stop instruction and get the message address.
+  // On MIPS stop() is just a special kind of break_().
+  break_(code, true);
+  emit(reinterpret_cast<Instr>(msg));
+#endif
+}
+
+
 void Assembler::tge(Register rs, Register rt, uint16_t code) {
   ASSERT(is_uint10(code));
   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
@@ -1545,14 +1577,14 @@
 
 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 1;
+  rt.code_ = (cc & 0x0007) << 2 | 1;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
 
 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 0;
+  rt.code_ = (cc & 0x0007) << 2 | 0;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
@@ -1632,6 +1664,13 @@
   GenInstrRegister(COP1, CFC1, rt, fs);
 }
 
+void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
+  uint64_t i;
+  memcpy(&i, &d, 8);
+
+  *lo = i & 0xffffffff;
+  *hi = i >> 32;
+}
 
 // Arithmetic.
 
@@ -1816,7 +1855,7 @@
 // Conditions.
 void Assembler::c(FPUCondition cond, SecondaryField fmt,
     FPURegister fs, FPURegister ft, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   ASSERT((fmt & ~(31 << kRsShift)) == 0);
   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1827,7 +1866,7 @@
 
 void Assembler::fcmp(FPURegister src1, const double src2,
       FPUCondition cond) {
-  ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(src2 == 0.0);
   mtc1(zero_reg, f14);
   cvt_d_w(f14, f14);
@@ -1836,7 +1875,7 @@
 
 
 void Assembler::bc1f(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1844,7 +1883,7 @@
 
 
 void Assembler::bc1t(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1874,6 +1913,48 @@
 }
 
 
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+  Instr instr = instr_at(pc);
+  ASSERT(IsJ(instr) || IsLui(instr));
+  if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+    ASSERT(IsOri(instr_ori));
+    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+    if (imm == kEndOfJumpChain) {
+      return 0;  // Number of instructions patched.
+    }
+    imm += pc_delta;
+    ASSERT((imm & 3) == 0);
+
+    instr_lui &= ~kImm16Mask;
+    instr_ori &= ~kImm16Mask;
+
+    instr_at_put(pc + 0 * Assembler::kInstrSize,
+                 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+    instr_at_put(pc + 1 * Assembler::kInstrSize,
+                 instr_ori | (imm & kImm16Mask));
+    return 2;  // Number of instructions patched.
+  } else {
+    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+    if ((int32_t)imm28 == kEndOfJumpChain) {
+      return 0;  // Number of instructions patched.
+    }
+    imm28 += pc_delta;
+    imm28 &= kImm28Mask;
+    ASSERT((imm28 & 3) == 0);
+
+    instr &= ~kImm26Mask;
+    uint32_t imm26 = imm28 >> 2;
+    ASSERT(is_uint26(imm26));
+
+    instr_at_put(pc, instr | (imm26 & kImm26Mask));
+    return 1;  // Number of instructions patched.
+  }
+}
+
+
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
@@ -1909,9 +1990,14 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
-  // shift by pc_delta. But on MIPS the target address it directly loaded, so
-  // we do not need to relocate here.
+  // Relocate runtime entries.
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+      RelocateInternalReference(p, pc_delta);
+    }
+  }
 
   ASSERT(!overflow());
 }
@@ -1943,13 +2029,24 @@
   }
   if (rinfo.rmode() != RelocInfo::NONE) {
     // Don't record external references unless the heap will be serialized.
-    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
-        !Serializer::enabled() &&
-        !FLAG_debug_code) {
-      return;
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+      if (!Serializer::enabled()) {
+        Serializer::TooLateToEnableNow();
+      }
+#endif
+      if (!Serializer::enabled() && !emit_debug_code()) {
+        return;
+      }
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
-    reloc_info_writer.Write(&rinfo);
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      ClearRecordedAstId();
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
   }
 }
 
@@ -1959,16 +2056,7 @@
 }
 
 
-void Assembler::CheckTrampolinePool(bool force_emit) {
-  // Calculate the offset of the next check.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
-  int dist = pc_offset() - last_trampoline_pool_end_;
-
-  if (dist <= kMaxDistBetweenPools && !force_emit) {
-    return;
-  }
-
+void Assembler::CheckTrampolinePool() {
   // Some small sequences of instructions must not be broken up by the
   // insertion of a trampoline pool; such sequences are protected by setting
   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
@@ -1986,29 +2074,43 @@
     return;
   }
 
-  // First we emit jump (2 instructions), then we emit trampoline pool.
-  { BlockTrampolinePoolScope block_trampoline_pool(this);
-    Label after_pool;
-    b(&after_pool);
-    nop();
-
-    int pool_start = pc_offset();
-    for (int i = 0; i < kSlotsPerTrampoline; i++) {
+  ASSERT(!trampoline_emitted_);
+  ASSERT(unbound_labels_count_ >= 0);
+  if (unbound_labels_count_ > 0) {
+    // First we emit jump (2 instructions), then we emit trampoline pool.
+    { BlockTrampolinePoolScope block_trampoline_pool(this);
+      Label after_pool;
       b(&after_pool);
       nop();
-    }
-    for (int i = 0; i < kLabelsPerTrampoline; i++) {
-      emit(0);
-    }
-    last_trampoline_pool_end_ = pc_offset() - kInstrSize;
-    bind(&after_pool);
-    trampolines_.Add(Trampoline(pool_start,
-                                kSlotsPerTrampoline,
-                                kLabelsPerTrampoline));
 
-    // Since a trampoline pool was just emitted,
-    // move the check offset forward by the standard interval.
-    next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
+      int pool_start = pc_offset();
+      for (int i = 0; i < unbound_labels_count_; i++) {
+        uint32_t imm32;
+        imm32 = jump_address(&after_pool);
+        { BlockGrowBufferScope block_buf_growth(this);
+          // Buffer growth (and relocation) must be blocked for internal
+          // references until associated instructions are emitted and available
+          // to be patched.
+          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+          lui(at, (imm32 & kHiMask) >> kLuiShift);
+          ori(at, at, (imm32 & kImm16Mask));
+        }
+        jr(at);
+        nop();
+      }
+      bind(&after_pool);
+      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+      trampoline_emitted_ = true;
+      // As we are only going to emit trampoline once, we need to prevent any
+      // further emission.
+      next_buffer_check_ = kMaxInt;
+    }
+  } else {
+    // Number of branches to unbound label at this point is zero, so we can
+    // move next buffer check to maximum.
+    next_buffer_check_ = pc_offset() +
+        kMaxBranchOffset - kTrampolineSlotsSize * 16;
   }
   return;
 }
@@ -2017,76 +2119,155 @@
 Address Assembler::target_address_at(Address pc) {
   Instr instr1 = instr_at(pc);
   Instr instr2 = instr_at(pc + kInstrSize);
-  // Check we have 2 instructions generated by li.
-  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
-                            (instr2 & kOpcodeMask) == ORI ||
-                            (instr2 & kOpcodeMask) == LUI)));
-  // Interpret these 2 instructions.
-  if (instr1 == nopInstr) {
-    if ((instr2 & kOpcodeMask) == ADDI) {
-      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
-    } else if ((instr2 & kOpcodeMask) == ORI) {
-      return reinterpret_cast<Address>(instr2 & kImm16Mask);
-    } else if ((instr2 & kOpcodeMask) == LUI) {
-      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
-    }
-  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
-    // 32 bit value.
+  // Interpret 2 instructions generated by li: lui/ori
+  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+    // Assemble the 32 bit value.
     return reinterpret_cast<Address>(
-        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
   }
 
-  // We should never get here.
+  // We should never get here, force a bad address if we do.
   UNREACHABLE();
   return (Address)0x0;
 }
 
 
+// On Mips, a target address is stored in a lui/ori instruction pair, each
+// of which load 16 bits of the 32-bit address to a register.
+// Patching the address must replace both instr, and flush the i-cache.
+//
+// There is an optimization below, which emits a nop when the address
+// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
+// and possibly removed.
 void Assembler::set_target_address_at(Address pc, Address target) {
-  // On MIPS we need to patch the code to generate.
-
-  // First check we have a li.
   Instr instr2 = instr_at(pc + kInstrSize);
-#ifdef DEBUG
-  Instr instr1 = instr_at(pc);
-
-  // Check we have indeed the result from a li with MustUseReg true.
-  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
-                           (instr2 & kOpcodeMask)== ORI ||
-                           (instr2 & kOpcodeMask)== LUI)));
-#endif
-
-  uint32_t rt_code = (instr2 & kRtFieldMask);
+  uint32_t rt_code = GetRtField(instr2);
   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   uint32_t itarget = reinterpret_cast<uint32_t>(target);
 
-  if (is_int16(itarget)) {
-    // nop.
-    // addiu rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kHiMask)) {
-    // nop.
-    // ori rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kImm16Mask)) {
-    // nop.
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    *p = nopInstr;
-    *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-  } else {
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    // ori rt rt, (kImm16Mask & itarget).
-    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+#ifdef DEBUG
+  // Check we have the result from a li macro-instruction, using instr pair.
+  Instr instr1 = instr_at(pc);
+  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
+#endif
+
+  // Must use 2 instructions to insure patchable code => just use lui and ori.
+  // lui rt, upper-16.
+  // ori rt rt, lower-16.
+  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+
+  // The following code is an optimization for the common case of Call()
+  // or Jump() which is load to register, and jump through register:
+  //     li(t9, address); jalr(t9)    (or jr(t9)).
+  // If the destination address is in the same 256 MB page as the call, it
+  // is faster to do a direct jal, or j, rather than jump thru register, since
+  // that lets the cpu pipeline prefetch the target address. However each
+  // time the address above is patched, we have to patch the direct jal/j
+  // instruction, as well as possibly revert to jalr/jr if we now cross a
+  // 256 MB page. Note that with the jal/j instructions, we do not need to
+  // load the register, but that code is left, since it makes it easy to
+  // revert this process. A further optimization could try replacing the
+  // li sequence with nops.
+  // This optimization can only be applied if the rt-code from instr2 is the
+  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
+  // mips return. Occasionally this lands after an li().
+
+  Instr instr3 = instr_at(pc + 2 * kInstrSize);
+  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
+  bool in_range =
+             ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
+  uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
+  bool patched_jump = false;
+
+#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
+  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
+  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
+  // apply this workaround for all cores so we don't have to identify the core.
+  if (in_range) {
+    // The 24k core E156 bug has some very specific requirements, we only check
+    // the most simple one: if the address of the delay slot instruction is in
+    // the first or last 32 KB of the 256 MB segment.
+    uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
+    uint32_t ipc_segment_addr = ipc & segment_mask;
+    if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
+      in_range = false;
+  }
+#endif
+
+  if (IsJalr(instr3)) {
+    // Try to convert JALR to JAL.
+    if (in_range && GetRt(instr2) == GetRs(instr3)) {
+      *(p+2) = JAL | target_field;
+      patched_jump = true;
+    }
+  } else if (IsJr(instr3)) {
+    // Try to convert JR to J, skip returns (jr ra).
+    bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
+    if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
+      *(p+2) = J | target_field;
+      patched_jump = true;
+    }
+  } else if (IsJal(instr3)) {
+    if (in_range) {
+      // We are patching an already converted JAL.
+      *(p+2) = JAL | target_field;
+    } else {
+      // Patch JAL, but out of range, revert to JALR.
+      // JALR rs reg is the rt reg specified in the ORI instruction.
+      uint32_t rs_field = GetRt(instr2) << kRsShift;
+      uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
+      *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+    }
+    patched_jump = true;
+  } else if (IsJ(instr3)) {
+    if (in_range) {
+      // We are patching an already converted J (jump).
+      *(p+2) = J | target_field;
+    } else {
+      // Trying patch J, but out of range, just go back to JR.
+      // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
+      uint32_t rs_field = GetRt(instr2) << kRsShift;
+      *(p+2) = SPECIAL | rs_field | JR;
+    }
+    patched_jump = true;
   }
 
-  CPU::FlushICache(pc, 2 * sizeof(int32_t));
+  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
 }
 
+void Assembler::JumpLabelToJumpRegister(Address pc) {
+  // Address pc points to lui/ori instructions.
+  // Jump to label may follow at pc + 2 * kInstrSize.
+  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+#ifdef DEBUG
+  Instr instr1 = instr_at(pc);
+#endif
+  Instr instr2 = instr_at(pc + 1 * kInstrSize);
+  Instr instr3 = instr_at(pc + 2 * kInstrSize);
+  bool patched = false;
+
+  if (IsJal(instr3)) {
+    ASSERT(GetOpcodeField(instr1) == LUI);
+    ASSERT(GetOpcodeField(instr2) == ORI);
+
+    uint32_t rs_field = GetRt(instr2) << kRsShift;
+    uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
+    *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+    patched = true;
+  } else if (IsJ(instr3)) {
+    ASSERT(GetOpcodeField(instr1) == LUI);
+    ASSERT(GetOpcodeField(instr2) == ORI);
+
+    uint32_t rs_field = GetRt(instr2) << kRsShift;
+    *(p+2) = SPECIAL | rs_field | JR;
+    patched = true;
+  }
+
+  if (patched) {
+      CPU::FlushICache(pc+2, sizeof(Address));
+  }
+}
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 5a6e271..38e9537 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -67,12 +67,13 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
 
 // Core register.
 struct Register {
   static const int kNumRegisters = v8::internal::kNumRegisters;
-  static const int kNumAllocatableRegisters = 14;  // v0 through t7
+  static const int kNumAllocatableRegisters = 14;  // v0 through t7.
+  static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
     return reg.code() - 2;  // zero_reg and 'at' are skipped.
@@ -126,38 +127,38 @@
 
 const Register no_reg = { -1 };
 
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
+const Register zero_reg = { 0 };  // Always zero.
+const Register at = { 1 };   // at: Reserved for synthetic instructions.
+const Register v0 = { 2 };   // v0, v1: Used when returning multiple values
+const Register v1 = { 3 };   //   from subroutines.
+const Register a0 = { 4 };   // a0 - a4: Used to pass non-FP parameters.
 const Register a1 = { 5 };
 const Register a2 = { 6 };
 const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
+const Register t0 = { 8 };   // t0 - t9: Can be used without reservation, act
+const Register t1 = { 9 };   //   as temporary registers and are allowed to
+const Register t2 = { 10 };  //   be destroyed by subroutines.
 const Register t3 = { 11 };
 const Register t4 = { 12 };
 const Register t5 = { 13 };
 const Register t6 = { 14 };
 const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
+const Register s0 = { 16 };  // s0 - s7: Subroutine register variables.
+const Register s1 = { 17 };  //   Subroutines that write to these registers
+const Register s2 = { 18 };  //   must restore their values before exiting so
+const Register s3 = { 19 };  //   that the caller can expect the values to be
+const Register s4 = { 20 };  //   preserved.
 const Register s5 = { 21 };
 const Register s6 = { 22 };
 const Register s7 = { 23 };
 const Register t8 = { 24 };
 const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
+const Register k0 = { 26 };  // k0, k1: Reserved for system calls and
+const Register k1 = { 27 };  // interrupt handlers.
+const Register gp = { 28 };  // gp: Reserved.
+const Register sp = { 29 };  // sp: Stack pointer.
+const Register s8_fp = { 30 };  // fp: Frame pointer.
+const Register ra = { 31 };  // ra: Return address pointer.
 
 
 int ToNumber(Register reg);
@@ -167,24 +168,36 @@
 // Coprocessor register.
 struct FPURegister {
   static const int kNumRegisters = v8::internal::kNumFPURegisters;
-  // f0 has been excluded from allocation. This is following ia32
-  // where xmm0 is excluded.
-  static const int kNumAllocatableRegisters = 15;
+
+  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+  // number of Double regs (64-bit regs, or FPU-reg-pairs).
+
+  // A few double registers are reserved: one as a scratch register and one to
+  // hold 0.0.
+  //  f28: 0.0
+  //  f30: scratch register.
+  static const int kNumReservedRegisters = 2;
+  static const int kNumAllocatableRegisters = kNumRegisters / 2 -
+      kNumReservedRegisters;
+
 
   static int ToAllocationIndex(FPURegister reg) {
-    ASSERT(reg.code() != 0);
     ASSERT(reg.code() % 2 == 0);
-    return (reg.code() / 2) - 1;
+    ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+    ASSERT(reg.is_valid());
+    return (reg.code() / 2);
   }
 
   static FPURegister FromAllocationIndex(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
-    return from_code((index + 1) * 2);
+    return from_code(index * 2);
   }
 
   static const char* AllocationIndexToString(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
+      "f0",
       "f2",
       "f4",
       "f6",
@@ -197,9 +210,7 @@
       "f20",
       "f22",
       "f24",
-      "f26",
-      "f28",
-      "f30"
+      "f26"
     };
     return names[index];
   }
@@ -211,6 +222,23 @@
 
   bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
   bool is(FPURegister creg) const { return code_ == creg.code_; }
+  FPURegister low() const {
+    // Find low reg of a Double-reg pair, which is the reg itself.
+    ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
+    FPURegister reg;
+    reg.code_ = code_;
+    ASSERT(reg.is_valid());
+    return reg;
+  }
+  FPURegister high() const {
+    // Find high reg of a Doubel-reg pair, which is reg + 1.
+    ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
+    FPURegister reg;
+    reg.code_ = code_ + 1;
+    ASSERT(reg.is_valid());
+    return reg;
+  }
+
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -227,9 +255,19 @@
   int code_;
 };
 
-typedef FPURegister DoubleRegister;
+// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
+// 32-bit registers, f0 through f31. When used as 'double' they are used
+// in pairs, starting with the even numbered register. So a double operation
+// on f0 really uses f0 and f1.
+// (Modern mips hardware also supports 32 64-bit registers, via setting
+// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
+// but it is not in common use. Someday we will want to support this in v8.)
 
-const FPURegister no_creg = { -1 };
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+typedef FPURegister DoubleRegister;
+typedef FPURegister FloatRegister;
+
+const FPURegister no_freg = { -1 };
 
 const FPURegister f0 = { 0 };  // Return value in hard float mode.
 const FPURegister f1 = { 1 };
@@ -264,12 +302,11 @@
 const FPURegister f30 = { 30 };
 const FPURegister f31 = { 31 };
 
+const FPURegister kDoubleRegZero = f28;
+
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
 struct FPUControlRegister {
-  static const int kFCSRRegister = 31;
-  static const int kInvalidFPUControlRegister = -1;
-
   bool is_valid() const { return code_ == kFCSRRegister; }
   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
   int code() const {
@@ -288,7 +325,7 @@
   int code_;
 };
 
-const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
 
@@ -318,7 +355,7 @@
 
  private:
   Register rm_;
-  int32_t imm32_;  // Valid if rm_ == no_reg
+  int32_t imm32_;  // Valid if rm_ == no_reg.
   RelocInfo::Mode rmode_;
 
   friend class Assembler;
@@ -330,8 +367,12 @@
 // Class MemOperand represents a memory operand in load and store instructions.
 class MemOperand : public Operand {
  public:
-
   explicit MemOperand(Register rn, int32_t offset = 0);
+  int32_t offset() const { return offset_; }
+
+  bool OffsetIsInt16Encodable() const {
+    return is_int16(offset_);
+  }
 
  private:
   int32_t offset_;
@@ -342,58 +383,101 @@
 
 // CpuFeatures keeps track of which features are supported by the target CPU.
 // Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == FPU && !FLAG_enable_fpu) return false;
     return (supported_ & (1u << f)) != 0;
   }
 
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (1u << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+    return (enabled & (1u << f)) != 0;
   }
+#endif
 
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
+
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      unsigned mask = 1u << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= 1u << f;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
-     }
-   private:
-    unsigned old_enabled_;
-    CpuFeatures* cpu_features_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
+    }
+
+ private:
     Isolate* isolate_;
+    unsigned old_enabled_;
 #else
-   public:
+
+ public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (1u << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const unsigned old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  unsigned supported_;
-  unsigned enabled_;
-  unsigned found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static unsigned supported_;
+  static unsigned found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -414,7 +498,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -439,10 +523,13 @@
   //
   // Note: The same Label can be used for forward and backward branches
   // but it may be bound only once.
-  void bind(Label* L);  // binds an unbound label L to the current code position
+  void bind(Label* L);  // Binds an unbound label L to current code position.
+  // Determines if Label is bound and near enough so that branch instruction
+  // can be used to reach it, instead of jump instruction.
+  bool is_near(Label* L);
 
-  // Returns the branch offset to the given label from the current code position
-  // Links the label to the current position if it is still unbound
+  // Returns the branch offset to the given label from the current code
+  // position. Links the label to the current position if it is still unbound.
   // Manages the jump elimination optimization if the second parameter is true.
   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
@@ -450,6 +537,7 @@
     ASSERT((o & 3) == 0);   // Assert the offset is aligned.
     return o >> 2;
   }
+  uint32_t jump_address(Label* L);
 
   // Puts a labels target address at the given position.
   // The high 8 bits are set to zero.
@@ -459,6 +547,8 @@
   static Address target_address_at(Address pc);
   static void set_target_address_at(Address pc, Address target);
 
+  static void JumpLabelToJumpRegister(Address pc);
+
   // This sets the branch destination (which gets loaded at the call address).
   // This is for calls and branches within generated code.
   inline static void set_target_at(Address instruction_payload,
@@ -489,9 +579,13 @@
   static const int kExternalTargetSize = 0 * kInstrSize;
 
   // Number of consecutive instructions used to store 32bit constant.
-  // Used in RelocInfo::target_address_address() function to tell serializer
-  // address of the instruction that follows LUI/ORI instruction pair.
-  static const int kInstructionsFor32BitConstant = 2;
+  // Before jump-optimizations, this constant was used in
+  // RelocInfo::target_address_address() function to tell serializer address of
+  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
+  // optimization, where jump-through-register instruction that usually
+  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
+  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
+  static const int kInstructionsFor32BitConstant = 3;
 
   // Distance between the instruction referring to the address of the call
   // target and the return address.
@@ -541,14 +635,14 @@
     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
   };
 
-  // type == 0 is the default non-marking type.
+  // Type == 0 is the default non-marking type.
   void nop(unsigned int type = 0) {
     ASSERT(type < 32);
     sll(zero_reg, zero_reg, type, true);
   }
 
 
-  //------- Branch and jump  instructions --------
+  // --------Branch-and-jump-instructions----------
   // We don't use likely variant of instructions.
   void b(int16_t offset);
   void b(Label* L) { b(branch_offset(L, false)>>2); }
@@ -571,13 +665,15 @@
   }
 
   // Never use the int16_t b(l)cond version with a branch offset
-  // instead of using the Label* version. See Twiki for infos.
+  // instead of using the Label* version.
 
   // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
   void j(int32_t target);
   void jal(int32_t target);
   void jalr(Register rs, Register rd = ra);
   void jr(Register target);
+  void j_or_jr(int32_t target, Register rs);
+  void jal_or_jalr(int32_t target, Register rs);
 
 
   //-------Data-processing-instructions---------
@@ -637,7 +733,8 @@
   //-------------Misc-instructions--------------
 
   // Break / Trap instructions.
-  void break_(uint32_t code);
+  void break_(uint32_t code, bool break_as_stop = false);
+  void stop(const char* msg, uint32_t code = kMaxStopCode);
   void tge(Register rs, Register rt, uint16_t code);
   void tgeu(Register rs, Register rt, uint16_t code);
   void tlt(Register rs, Register rt, uint16_t code);
@@ -733,8 +830,13 @@
   void fcmp(FPURegister src1, const double src2, FPUCondition cond);
 
   // Check the code size generated from label to here.
-  int InstructionsGeneratedSince(Label* l) {
-    return (pc_offset() - l->pos()) / kInstrSize;
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
+
+  // Check the number of instructions generated from label to here.
+  int InstructionsGeneratedSince(Label* label) {
+    return SizeOfCodeGeneratedSince(label) / kInstrSize;
   }
 
   // Class for scoping postponing the trampoline pool generation.
@@ -753,6 +855,25 @@
     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
   };
 
+  // Class for postponing the assembly buffer growth. Typically used for
+  // sequences of instructions that must be emitted as a unit, before
+  // buffer growth (and relocation) can occur.
+  // This blocking scope is not nestable.
+  class BlockGrowBufferScope {
+   public:
+    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockGrowBuffer();
+    }
+    ~BlockGrowBufferScope() {
+      assem_->EndBlockGrowBuffer();
+    }
+
+    private:
+     Assembler* assem_;
+
+     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+  };
+
   // Debugging.
 
   // Mark address of the ExitJSFrame code.
@@ -761,10 +882,26 @@
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot();
 
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void SetRecordedAstId(unsigned ast_id) {
+    ASSERT(recorded_ast_id_ == kNoASTId);
+    recorded_ast_id_ = ast_id;
+  }
+
+  unsigned RecordedAstId() {
+    ASSERT(recorded_ast_id_ != kNoASTId);
+    return recorded_ast_id_;
+  }
+
+  void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
   void db(uint8_t data);
@@ -774,12 +911,6 @@
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
-  bool can_peephole_optimize(int instructions) {
-    if (!allow_peephole_optimization_) return false;
-    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
-    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
-  }
-
   // Postpone the generation of the trampoline pool for the specified number of
   // instructions.
   void BlockTrampolinePoolFor(int instructions);
@@ -804,6 +935,17 @@
 
   // Check if an instruction is a branch of some kind.
   static bool IsBranch(Instr instr);
+  static bool IsBeq(Instr instr);
+  static bool IsBne(Instr instr);
+
+  static bool IsJump(Instr instr);
+  static bool IsJ(Instr instr);
+  static bool IsLui(Instr instr);
+  static bool IsOri(Instr instr);
+
+  static bool IsJal(Instr instr);
+  static bool IsJr(Instr instr);
+  static bool IsJalr(Instr instr);
 
   static bool IsNop(Instr instr, unsigned int type);
   static bool IsPop(Instr instr);
@@ -813,7 +955,23 @@
   static bool IsLwRegFpNegOffset(Instr instr);
   static bool IsSwRegFpNegOffset(Instr instr);
 
-  static Register GetRt(Instr instr);
+  static Register GetRtReg(Instr instr);
+  static Register GetRsReg(Instr instr);
+  static Register GetRdReg(Instr instr);
+
+  static uint32_t GetRt(Instr instr);
+  static uint32_t GetRtField(Instr instr);
+  static uint32_t GetRs(Instr instr);
+  static uint32_t GetRsField(Instr instr);
+  static uint32_t GetRd(Instr instr);
+  static uint32_t GetRdField(Instr instr);
+  static uint32_t GetSa(Instr instr);
+  static uint32_t GetSaField(Instr instr);
+  static uint32_t GetOpcodeField(Instr instr);
+  static uint32_t GetFunction(Instr instr);
+  static uint32_t GetFunctionField(Instr instr);
+  static uint32_t GetImmediate16(Instr instr);
+  static uint32_t GetLabelConst(Instr instr);
 
   static int32_t GetBranchOffset(Instr instr);
   static bool IsLw(Instr instr);
@@ -825,9 +983,16 @@
   static bool IsAddImmediate(Instr instr);
   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
 
-  void CheckTrampolinePool(bool force_emit = false);
+  static bool IsAndImmediate(Instr instr);
+
+  void CheckTrampolinePool();
 
  protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  unsigned recorded_ast_id_;
+
   bool emit_debug_code() const { return emit_debug_code_; }
 
   int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -853,6 +1018,7 @@
   void StartBlockTrampolinePool() {
     trampoline_pool_blocked_nesting_++;
   }
+
   void EndBlockTrampolinePool() {
     trampoline_pool_blocked_nesting_--;
   }
@@ -861,6 +1027,31 @@
     return trampoline_pool_blocked_nesting_ > 0;
   }
 
+  bool has_exception() const {
+    return internal_trampoline_exception_;
+  }
+
+  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
+
+  bool is_trampoline_emitted() const {
+    return trampoline_emitted_;
+  }
+
+  // Temporarily block automatic assembly buffer growth.
+  void StartBlockGrowBuffer() {
+    ASSERT(!block_buffer_growth_);
+    block_buffer_growth_ = true;
+  }
+
+  void EndBlockGrowBuffer() {
+    ASSERT(block_buffer_growth_);
+    block_buffer_growth_ = false;
+  }
+
+  bool is_buffer_growth_blocked() const {
+    return block_buffer_growth_;
+  }
+
  private:
   // Code buffer:
   // The buffer into which code and relocation info are generated.
@@ -897,6 +1088,9 @@
   // Keep track of the last emitted pool to guarantee a maximal distance.
   int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
 
+  // Automatic growth of the assembly buffer may be blocked for some sequences.
+  bool block_buffer_growth_;  // Block growth when true.
+
   // Relocation information generation.
   // Each relocation is encoded as a variable size value.
   static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -977,7 +1171,6 @@
   // Labels.
   void print(Label* L);
   void bind_to(Label* L, int pos);
-  void link_to(Label* L, Label* appendix);
   void next(Label* L);
 
   // One trampoline consists of:
@@ -990,13 +1183,17 @@
   // label_count *  kInstrSize.
   class Trampoline {
    public:
-    Trampoline(int start, int slot_count, int label_count) {
+    Trampoline() {
+      start_ = 0;
+      next_slot_ = 0;
+      free_slot_count_ = 0;
+      end_ = 0;
+    }
+    Trampoline(int start, int slot_count) {
       start_ = start;
       next_slot_ = start;
       free_slot_count_ = slot_count;
-      next_label_ = start + slot_count * 2 * kInstrSize;
-      free_label_count_ = label_count;
-      end_ = next_label_ + (label_count - 1) * kInstrSize;
+      end_ = start + slot_count * kTrampolineSlotsSize;
     }
     int start() {
       return start_;
@@ -1005,41 +1202,42 @@
       return end_;
     }
     int take_slot() {
-      int trampoline_slot = next_slot_;
-      ASSERT(free_slot_count_ > 0);
-      free_slot_count_--;
-      next_slot_ += 2 * kInstrSize;
+      int trampoline_slot = kInvalidSlotPos;
+      if (free_slot_count_ <= 0) {
+        // We have run out of space on trampolines.
+        // Make sure we fail in debug mode, so we become aware of each case
+        // when this happens.
+        ASSERT(0);
+        // Internal exception will be caught.
+      } else {
+        trampoline_slot = next_slot_;
+        free_slot_count_--;
+        next_slot_ += kTrampolineSlotsSize;
+      }
       return trampoline_slot;
     }
-    int take_label() {
-      int label_pos = next_label_;
-      ASSERT(free_label_count_ > 0);
-      free_label_count_--;
-      next_label_ += kInstrSize;
-      return label_pos;
-    }
+
    private:
     int start_;
     int end_;
     int next_slot_;
     int free_slot_count_;
-    int next_label_;
-    int free_label_count_;
   };
 
-  int32_t get_label_entry(int32_t pos, bool next_pool = true);
-  int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
-
-  static const int kSlotsPerTrampoline = 2304;
-  static const int kLabelsPerTrampoline = 8;
-  static const int kTrampolineInst =
-      2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
-  static const int kTrampolineSize = kTrampolineInst * kInstrSize;
+  int32_t get_trampoline_entry(int32_t pos);
+  int unbound_labels_count_;
+  // If trampoline is emitted, generated code is becoming large. As this is
+  // already a slow case which can possibly break our code generation for the
+  // extreme case, we use this information to trigger different mode of
+  // branch instruction generation, where we use jump instructions rather
+  // than regular branch instructions.
+  bool trampoline_emitted_;
+  static const int kTrampolineSlotsSize = 4 * kInstrSize;
   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
-  static const int kMaxDistBetweenPools =
-      kMaxBranchOffset - 2 * kTrampolineSize;
+  static const int kInvalidSlotPos = -1;
 
-  List<Trampoline> trampolines_;
+  Trampoline trampoline_;
+  bool internal_trampoline_exception_;
 
   friend class RegExpMacroAssemblerMIPS;
   friend class RelocInfo;
@@ -1047,7 +1245,6 @@
   friend class BlockTrampolinePoolScope;
 
   PositionsRecorder positions_recorder_;
-  bool allow_peephole_optimization_;
   bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index b4bab8e..d772304 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -47,97 +47,1576 @@
 void Builtins::Generate_Adaptor(MacroAssembler* masm,
                                 CFunctionId id,
                                 BuiltinExtraArguments extra_args) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : called function (only guaranteed when
+  //  --                      extra_args requires it)
+  //  -- cp                 : context
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * agrc]       : receiver
+  // -----------------------------------
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  if (extra_args == NEEDS_CALLED_FUNCTION) {
+    num_extra_args = 1;
+    __ push(a1);
+  } else {
+    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+  }
+
+  // JumpToExternalReference expects a0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ Addu(a0, a0, Operand(num_extra_args + 1));
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ lw(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ lw(scratch1, FieldMemOperand(array_function,
+                                  JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  zero_reg);
+  __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ Addu(scratch1, result, Operand(JSArray::kSize));
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+  __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ sw(scratch3, MemOperand(scratch1));
+    __ Addu(scratch1, scratch1, kPointerSize);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ lw(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ Branch(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ li(elements_array_end,
+        (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+  __ sra(scratch1, array_size, kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_end, scratch1);
+  __ AllocateInNewSpace(
+      elements_array_end,
+      result,
+      scratch1,
+      scratch2,
+      gc_required,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch1, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is stored as a smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+  __ movz(array_size, at, array_size);
+
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(array_size, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: smi-tagged size of elements array
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ Branch(&entry);
+    __ bind(&loop);
+    __ sw(scratch1, MemOperand(elements_array_storage));
+    __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+    __ bind(&entry);
+    __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   a0: argc
+//   a1: constructor (built-in Array function)
+//   ra: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label* call_generic_code) {
+  Counters* counters = masm->isolate()->counters();
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       a1,
+                       a2,
+                       a3,
+                       t0,
+                       t1,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(v0, a2);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ Ret();
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+  STATIC_ASSERT(kSmiTag == 0);
+  __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
+  __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+  __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(call_generic_code, Ugreater_equal, a2,
+            Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(v0, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ sll(a2, a0, kSmiTagSize);  // Convert argc to a smi.
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store.
+  // a0: argc
+  // a3: JSArray
+  // t0: elements_array storage start (untagged)
+  // t1: elements_array_end (untagged)
+  // sp[0]: last argument
+
+  Label loop, entry;
+  __ Branch(&entry);
+  __ bind(&loop);
+  __ pop(a2);
+  __ Addu(t1, t1, -kPointerSize);
+  __ sw(a2, MemOperand(t1));
+  __ bind(&entry);
+  __ Branch(&loop, lt, t0, Operand(t1));
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // a0: argc
+  // a3: JSArray
+  // sp[0]: receiver
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ mov(v0, a3);
+  __ Ret();
 }
 
 
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (1)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (2)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->ArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin and internal
+    // Array functions which always have a map.
+    // Initial map for the builtin Array function should be a map.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (3)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (4)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+
+  Handle<Code> generic_construct_stub =
+      masm->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+  Register function = a1;
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+    __ Assert(eq, "Unexpected String function", function, Operand(a2));
+  }
+
+  // Load the first arguments in a0 and get rid of the rest.
+  Label no_arguments;
+  __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+  // First args = sp[(argc - 1) * 4].
+  __ Subu(a0, a0, Operand(1));
+  __ sll(a0, a0, kPointerSizeLog2);
+  __ Addu(sp, a0, sp);
+  __ lw(a0, MemOperand(sp));
+  // sp now point to args[0], drop args[0] + receiver.
+  __ Drop(2);
+
+  Register argument = a2;
+  Label not_cached, argument_is_string;
+  NumberToStringStub::GenerateLookupNumberStringCache(
+      masm,
+      a0,        // Input.
+      argument,  // Result.
+      a3,        // Scratch.
+      t0,        // Scratch.
+      t1,        // Scratch.
+      false,     // Is it a Smi?
+      &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- a2     : argument converted to string
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label gc_required;
+  __ AllocateInNewSpace(JSValue::kSize,
+                        v0,  // Result.
+                        a3,  // Scratch.
+                        t0,  // Scratch.
+                        &gc_required,
+                        TAG_OBJECT);
+
+  // Initialising the String Object.
+  Register map = a3;
+  __ LoadGlobalFunctionInitialMap(function, map, t0);
+  if (FLAG_debug_code) {
+    __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+    __ Assert(eq, "Unexpected string wrapper instance size",
+        t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+    __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+    __ Assert(eq, "Unexpected unused properties of string wrapper",
+        t0, Operand(zero_reg));
+  }
+  __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  __ Ret();
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  __ JumpIfSmi(a0, &convert_argument);
+
+  // Is it a String?
+  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ And(t0, a3, Operand(kIsNotStringMask));
+  __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+  __ mov(argument, a0);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ Branch(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into a2.
+  __ bind(&convert_argument);
+  __ push(function);  // Preserve the function.
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ LeaveInternalFrame();
+  __ pop(function);
+  __ mov(argument, v0);
+  __ Branch(&argument_is_string);
+
+  // Load the empty string into a2, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+  __ Drop(1);
+  __ Branch(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(argument);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ And(t0, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Jump to the function-specific construct stub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(t9);
+
+  // a0: number of arguments
+  // a1: called object
+  __ bind(&non_function_call);
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site). The receiver is
+  // stack element argc.
+  // Set expected number of arguments to zero (not changing a0).
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
+  Isolate* isolate = masm->isolate();
+
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Preserve the two incoming parameters on the stack.
+  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+  __ MultiPushReversed(a0.bit() | a1.bit());
+
+  // Use t7 to hold undefined, which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+  Label rt_call, allocated;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(isolate);
+    __ li(a2, Operand(debug_step_in_fp));
+    __ lw(a2, MemOperand(a2));
+    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+    // Load the initial map and verify that it is in fact a map.
+    // a1: constructor function
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t4);
+    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+      __ lbu(t0, constructor_count);
+      __ Subu(t0, t0, Operand(1));
+      __ sb(t0, constructor_count);
+      __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+      __ Push(a1, a2);
+
+      __ push(a1);  // Constructor.
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(a2);
+      __ pop(a1);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size
+    // t4: JSObject (not tagged)
+    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(t5, t4);
+    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+    __ Addu(t5, t5, Operand(3*kPointerSize));
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+    // Fill all the in-object properties with appropriate filler.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size (in words)
+    // t4: JSObject (not tagged)
+    // t5: First in-object property of JSObject (not tagged)
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t6, t4, t0);   // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        // To allow for truncation.
+        __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(t5, 0));
+      __ addiu(t5, t5, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, Uless, t5, Operand(t6));
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: start of next object (not tagged)
+    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+    __ And(t6,
+           a0,
+           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+    __ Addu(a3, a3, Operand(t0));
+    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+    __ subu(a3, a3, t0);
+
+    // Done if no extra properties are to be allocated.
+    __ Branch(&allocated, eq, a3, Operand(zero_reg));
+    __ Assert(greater_equal, "Property allocation count failed.",
+        a3, Operand(zero_reg));
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // a1: constructor
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: start of next object
+    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(
+        a0,
+        t5,
+        t6,
+        a2,
+        &undo_allocation,
+        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+    // Initialize the FixedArray.
+    // a1: constructor
+    // a3: number of elements in properties array (un-tagged)
+    // t4: JSObject
+    // t5: start of next object
+    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+    __ mov(a2, t5);
+    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+    __ sll(a0, a3, kSmiTagSize);
+    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+    __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+    // Initialize the fields to undefined.
+    // a1: constructor
+    // a2: First element of FixedArray (not tagged)
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ sll(t3, a3, kPointerSizeLog2);
+    __ addu(t6, a2, t3);  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(a2));
+      __ addiu(a2, a2, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, less, a2, Operand(t6));
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+    // Continue with JSObject being successfully allocated.
+    // a1: constructor function
+    // a4: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // t4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(t4, t5);
+  }
+
+  __ bind(&rt_call);
+  // Allocate the new receiver object using the runtime call.
+  // a1: constructor function
+  __ push(a1);  // Argument for Runtime_NewObject.
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(t4, v0);
+
+  // Receiver for constructor call allocated.
+  // t4: JSObject
+  __ bind(&allocated);
+  __ push(t4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, kPointerSize));
+  __ MultiPushReversed(a1.bit() | t4.bit());
+
+  // Reload the number of arguments from the stack.
+  // a1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below.
+  __ srl(a0, a3, kSmiTagSize);
+
+  // Copy arguments and receiver to the expression stack.
+  // a0: number of arguments
+  // a1: constructor function
+  // a2: address of last argument (caller sp)
+  // a3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ lw(t1, MemOperand(t0));
+  __ push(t1);
+  __ bind(&entry);
+  __ Addu(a3, a3, Operand(-2));
+  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+  // Call the function.
+  // a0: number of arguments
+  // a1: constructor function
+  if (is_api_function) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected,
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Pop the function from the stack.
+  // v0: result
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ Pop();
+
+  // Restore context from the frame.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ GetObjectType(v0, a3, a3);
+  __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ lw(v0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
+  __ sll(t0, a1, kPointerSizeLog2 - 1);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, kPointerSize);
+  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, true);
 }
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: code entry
+  //  -- a1: function
+  //  -- a2: reveiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+
+  // Clear the context before we push it when entering the JS frame.
+  __ mov(cp, zero_reg);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
+  __ li(s6, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ Push(a1, a2);
+
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ addu(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(&loop, ne, s0, Operand(t2));
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t0);
+  __ mov(s2, t0);
+  __ mov(s3, t0);
+  __ mov(s4, t0);
+  __ mov(s5, t0);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
+
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    __ Call(masm->isolate()->builtins()->JSConstructCall());
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  __ LeaveInternalFrame();
+
+  __ Jump(ra);
 }
 
 
 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, false);
 }
 
 
 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, true);
 }
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  // Call the runtime function.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
+// These functions are called from C++ but cannot be used in live code.
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  { Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+    __ push(t2);
+    __ Addu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // a0: actual number of arguments
+  Label non_function;
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(at, sp, at);
+  __ lw(a1, MemOperand(at));
+  __ And(at, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  // a0: actual number of arguments
+  // a1: function
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Do not transform the receiver for strict mode functions.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Do not transform the receiver for native (Compilerhints already in a3).
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Compute the receiver in non-strict mode.
+    // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+    __ lw(a2, MemOperand(a2, -kPointerSize));
+    // a0: actual number of arguments
+    // a1: function
+    // a2: first argument
+    __ JumpIfSmi(a2, &convert_to_object, t2);
+
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+    __ LoadRoot(a3, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+    __ push(a0);
+
+    __ push(a2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a2, v0);
+
+    __ pop(a0);
+    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+    __ LeaveInternalFrame();
+    // Restore the function to a1.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(at, sp, at);
+    __ lw(a1, MemOperand(at));
+    __ Branch(&patch_receiver);
+
+    // Use the global receiver object from the called function as the
+    // receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+    __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a3, sp, at);
+    __ sw(a2, MemOperand(a3, -kPointerSize));
+
+    __ Branch(&shift_arguments);
+  }
+
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&non_function);
+  // Restore the function in case it has been modified.
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(a2, sp, at);
+  __ sw(a1, MemOperand(a2, -kPointerSize));
+  // Clear a1 to indicate a non-function being called.
+  __ mov(a1, zero_reg);
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&shift_arguments);
+  { Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+
+    __ bind(&loop);
+    __ lw(at, MemOperand(a2, -kPointerSize));
+    __ sw(at, MemOperand(a2));
+    __ Subu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Subu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // a0: actual number of arguments
+  // a1: function
+  { Label function;
+    __ Branch(&function, ne, a1, Operand(zero_reg));
+    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // a0: actual number of arguments
+  // a1: function
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2,
+         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sra(a2, a2, kSmiTagSize);
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  // Check formal and actual parameter counts.
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+  ParameterCount expected(0);
+  __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kIndexOffset    = -5 * kPointerSize;
+  const int kLimitOffset    = -4 * kPointerSize;
+  const int kArgsOffset     =  2 * kPointerSize;
+  const int kRecvOffset     =  3 * kPointerSize;
+  const int kFunctionOffset =  4 * kPointerSize;
+
+  __ EnterInternalFrame();
+
+  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+  __ push(a0);
+  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+  __ push(a0);
+  // Returns (in v0) number of arguments to copy to stack as Smi.
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause a2 to become negative.
+  __ subu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+
+  // Out of stack space.
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ push(a1);
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  // End of stack check.
+
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(v0);  // Limit.
+  __ mov(a1, zero_reg);  // Initial index.
+  __ push(a1);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ lw(a0, MemOperand(fp, kFunctionOffset));
+  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in a0.
+  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ lw(a0, MemOperand(fp, kRecvOffset));
+
+  // Do not transform the receiver for strict mode functions.
+  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Do not transform the receiver for native (Compilerhints already in a2).
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Compute the receiver in non-strict mode.
+  __ And(t0, a0, Operand(kSmiTagMask));
+  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+  // Check if the receiver is already a JavaScript object.
+  // a0: receiver
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Convert the receiver to a regular object.
+  // a0: receiver
+  __ bind(&call_to_object);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+  __ Branch(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  // a0: receiver
+  __ bind(&push_receiver);
+  __ push(a0);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Branch(&entry);
+
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // a0: current argument index
+  __ bind(&loop);
+  __ lw(a1, MemOperand(fp, kArgsOffset));
+  __ push(a1);
+  __ push(a0);
+
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(v0);
+
+  // Use inline caching to access the arguments.
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+  __ sw(a0, MemOperand(fp, kIndexOffset));
+
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ lw(a1, MemOperand(fp, kLimitOffset));
+  __ Branch(&loop, ne, a0, Operand(a1));
+  // Invoke the function.
+  ParameterCount actual(a0);
+  __ sra(a0, a0, kSmiTagSize);
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
+
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ sll(a0, a0, kSmiTagSize);
+  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+  __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(sp, sp, t0);
+  // Adjust for the receiver.
+  __ Addu(sp, sp, Operand(kPointerSize));
 }
 
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  //  -- a3: callee code entry
+  //  -- t1: call kind information
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ Branch(&dont_adapt_arguments, eq,
+      a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into a0 and copy end address into a2.
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ sll(a2, a2, kPointerSizeLog2);
+    __ subu(a2, a0, a2);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: copy end address
+    // a3: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));
+    __ push(t0);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+    __ addiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // TODO(MIPS): Optimize these loops.
+
+    // Calculate copy start address into a0 and copy end address is fp.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Addu(t3, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    // t3: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ push(t0);
+    __ Subu(a0, a0, kPointerSize);
+    __ Branch(&copy, ne, a0, Operand(t3));
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ sll(t2, a2, kPointerSizeLog2);
+    __ Subu(a2, fp, Operand(t2));
+    __ Addu(a2, a2, Operand(-4 * kPointerSize));  // Adjust for frame.
+
+    Label fill;
+    __ bind(&fill);
+    __ push(t0);
+    __ Branch(&fill, ne, sp, Operand(a2));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+
+  __ Call(a3);
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ Jump(a3);
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 6cc272c..c3c3874 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -31,7 +31,7 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "regexp-macro-assembler.h"
 
 namespace v8 {
@@ -40,24 +40,232 @@
 
 #define __ ACCESS_MASM(masm)
 
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+                                   Register scratch1, Register scratch2,
+                                   Label* not_a_heap_number) {
+  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The ToNumber stub takes one argument in a0.
+  Label check_heap_number, call_builtin;
+  __ JumpIfNotSmi(a0, &check_heap_number);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&check_heap_number);
+  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in cp.
+  Label gc;
+
+  // Pop the function info from the stack.
+  __ pop(a3);
+
+  // Attempt to allocate new JSFunction in new space.
+  __ AllocateInNewSpace(JSFunction::kSize,
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
+
+  // Compute the function map in the current global context and set that
+  // as the map of the allocated object.
+  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+  __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+  // Return result. The argument function info has been popped already.
+  __ Ret();
+
+  // Create a new closure through the slower runtime call.
+  __ bind(&gc);
+  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+  __ Push(cp, a3, t0);
+  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
 }
 
 
 void FastNewContextStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+  // Attempt to allocate the context in new space.
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ lw(a3, MemOperand(sp, 0));
+
+  // Setup the object header.
+  __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ li(a2, Operand(Smi::FromInt(length)));
+  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+  // Setup the fixed slots.
+  __ li(a1, Operand(Smi::FromInt(0)));
+  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+  __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+  // Copy the global object from the previous context.
+  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+  // Initialize the rest of the slots to undefined.
+  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, v0);
+  __ Pop();
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
 }
 
 
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Stack layout on entry:
+  // [sp]: constant elements.
+  // [sp + kPointerSize]: literal index.
+  // [sp + (2 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a3, t0);
+  __ lw(a3, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t1));
+
+  if (FLAG_debug_code) {
+    const char* message;
+    Heap::RootListIndex expected_map_index;
+    if (mode_ == CLONE_ELEMENTS) {
+      message = "Expected (writable) fixed array";
+      expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else {
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      message = "Expected copy-on-write fixed array";
+      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+    }
+    __ push(a3);
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+    __ LoadRoot(at, expected_map_index);
+    __ Assert(eq, message, a3, Operand(at));
+    __ pop(a3);
+  }
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  // Return new object in v0.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        &slow_case,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -97,82 +305,98 @@
   }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
 };
 
 
 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+#ifndef BIG_ENDIAN_FLOATING_POINT
+  Register exponent = result1_;
+  Register mantissa = result2_;
+#else
+  Register exponent = result2_;
+  Register mantissa = result1_;
+#endif
+  Label not_special;
+  // Convert from Smi to integer.
+  __ sra(source_, source_, kSmiTagSize);
+  // Move sign bit from source to destination.  This works because the sign bit
+  // in the exponent word of the double has the same position and polarity as
+  // the 2's complement sign bit in a Smi.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+  // Subtract from 0 if source was negative.
+  __ subu(at, zero_reg, source_);
+  __ movn(source_, at, exponent);
+
+  // We have -1, 0 or 1, which we treat specially. Register source_ contains
+  // absolute value: it is either equal to 1 (special case of -1 and 1),
+  // greater than 1 (not a special case) or less than 1 (special case of 0).
+  __ Branch(&not_special, gt, source_, Operand(1));
+
+  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+  static const uint32_t exponent_word_for_1 =
+      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+  // Safe to use 'at' as dest reg here.
+  __ Or(at, exponent, Operand(exponent_word_for_1));
+  __ movn(exponent, at, source_);  // Write exp when source not 0.
+  // 1, 0 and -1 all have 0 for the second word.
+  __ mov(mantissa, zero_reg);
+  __ Ret();
+
+  __ bind(&not_special);
+  // Count leading zeros.
+  // Gets the wrong answer for 0, but we already checked for that case above.
+  __ clz(zeros_, source_);
+  // Compute exponent and or it into the exponent register.
+  // We use mantissa as a scratch register here.
+  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+  __ subu(mantissa, mantissa, zeros_);
+  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+  __ Or(exponent, exponent, mantissa);
+
+  // Shift up the source chopping the top bit off.
+  __ Addu(zeros_, zeros_, Operand(1));
+  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+  __ sllv(source_, source_, zeros_);
+  // Compute lower part of fraction (last 12 bits).
+  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+  // And the top (top 20 bits).
+  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+  __ or_(exponent, exponent, source_);
+
+  __ Ret();
 }
 
 
-class FloatingPointHelper : public AllStatic {
- public:
-
-  enum Destination {
-    kFPURegisters,
-    kCoreRegisters
-  };
-
-
-  // Loads smis from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will be scratched.
-  static void LoadSmis(MacroAssembler* masm,
-                       Destination destination,
-                       Register scratch1,
-                       Register scratch2);
-
-  // Loads objects from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will still be scratched. If
-  // either a0 or a1 is not a number (not smi and not heap number object) the
-  // not_number label is jumped to with a0 and a1 intact.
-  static void LoadOperands(MacroAssembler* masm,
-                           FloatingPointHelper::Destination destination,
-                           Register heap_number_map,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* not_number);
-  // Loads the number from object into dst as a 32-bit integer if possible. If
-  // the object is not a 32-bit integer control continues at the label
-  // not_int32. If FPU is supported double_scratch is used but not scratch2.
-  static void LoadNumberAsInteger(MacroAssembler* masm,
-                                  Register object,
-                                  Register dst,
-                                  Register heap_number_map,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  FPURegister double_scratch,
-                                  Label* not_int32);
- private:
-  static void LoadNumber(MacroAssembler* masm,
-                         FloatingPointHelper::Destination destination,
-                         Register object,
-                         FPURegister dst,
-                         Register dst1,
-                         Register dst2,
-                         Register heap_number_map,
-                         Register scratch1,
-                         Register scratch2,
-                         Label* not_number);
-};
-
-
 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
                                    Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(scratch1, a0, kSmiTagSize);
+    __ mtc1(scratch1, f14);
+    __ cvt_d_w(f14, f14);
+    __ sra(scratch1, a1, kSmiTagSize);
+    __ mtc1(scratch1, f12);
+    __ cvt_d_w(f12, f12);
+    if (destination == kCoreRegisters) {
+      __ Move(a2, a3, f14);
+      __ Move(a0, a1, f12);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write Smi from a0 to a3 and a2 in double format.
+    __ mov(scratch1, a0);
+    ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub1.GetCode());
+    // Write Smi from a1 to a1 and a0 in double format.
+    __ mov(scratch1, a1);
+    ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+    __ Call(stub2.GetCode());
+    __ pop(ra);
+  }
 }
 
 
@@ -183,7 +407,14 @@
     Register scratch1,
     Register scratch2,
     Label* slow) {
-  UNIMPLEMENTED_MIPS();
+
+  // Load right operand (a0) to f12 or a2/a3.
+  LoadNumber(masm, destination,
+             a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+  // Load left operand (a1) to f14 or a0/a1.
+  LoadNumber(masm, destination,
+             a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
 }
 
 
@@ -197,30 +428,989 @@
                                      Register scratch1,
                                      Register scratch2,
                                      Label* not_number) {
-  UNIMPLEMENTED_MIPS();
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+
+  Label is_smi, done;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+  // Handle loading a double from a heap number.
+  if (CpuFeatures::IsSupported(FPU) &&
+      destination == kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double from tagged HeapNumber to double register.
+
+    // ARM uses a workaround here because of the unaligned HeapNumber
+    // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+    // point in generating even more instructions.
+    __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Load the double from heap number to dst1 and dst2 in double format.
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+    __ lw(dst2, FieldMemOperand(object,
+        HeapNumber::kValueOffset + kPointerSize));
+  }
+  __ Branch(&done);
+
+  // Handle loading a double from a smi.
+  __ bind(&is_smi);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Convert smi to double using FPU instructions.
+    __ SmiUntag(scratch1, object);
+    __ mtc1(scratch1, dst);
+    __ cvt_d_w(dst, dst);
+    if (destination == kCoreRegisters) {
+      // Load the converted smi to dst1 and dst2 in double format.
+      __ Move(dst1, dst2, dst);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write smi to dst1 and dst2 double format.
+    __ mov(scratch1, object);
+    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub.GetCode());
+    __ pop(ra);
+  }
+
+  __ bind(&done);
 }
 
 
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
-                                              Register object,
-                                              Register dst,
-                                              Register heap_number_map,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              FPURegister double_scratch,
-                                              Label* not_int32) {
-  UNIMPLEMENTED_MIPS();
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+                                               Register object,
+                                               Register dst,
+                                               Register heap_number_map,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Register scratch3,
+                                               FPURegister double_scratch,
+                                               Label* not_number) {
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  Label is_smi;
+  Label done;
+  Label not_in_int32_range;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+  __ ConvertToInt32(object,
+                    dst,
+                    scratch1,
+                    scratch2,
+                    double_scratch,
+                    &not_in_int32_range);
+  __ jmp(&done);
+
+  __ bind(&not_in_int32_range);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+  __ EmitOutOfInt32RangeTruncate(dst,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3);
+
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+                                             Register int_scratch,
+                                             Destination destination,
+                                             FPURegister double_dst,
+                                             Register dst1,
+                                             Register dst2,
+                                             Register scratch2,
+                                             FPURegister single_scratch) {
+  ASSERT(!int_scratch.is(scratch2));
+  ASSERT(!int_scratch.is(dst1));
+  ASSERT(!int_scratch.is(dst2));
+
+  Label done;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(int_scratch, single_scratch);
+    __ cvt_d_w(double_dst, single_scratch);
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+  } else {
+    Label fewer_than_20_useful_bits;
+    // Expected output:
+    // |         dst2            |         dst1            |
+    // | s |   exp   |              mantissa               |
+
+    // Check for zero.
+    __ mov(dst2, int_scratch);
+    __ mov(dst1, int_scratch);
+    __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+    // Preload the sign of the value.
+    __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+    // Get the absolute value of the object (as an unsigned integer).
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+    __ Subu(int_scratch, zero_reg, int_scratch);
+    __ bind(&skip_sub);
+
+    // Get mantisssa[51:20].
+
+    // Get the position of the first set bit.
+    __ clz(dst1, int_scratch);
+    __ li(scratch2, 31);
+    __ Subu(dst1, scratch2, dst1);
+
+    // Set the exponent.
+    __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Ins(dst2, scratch2,
+        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+    // Clear the first non null bit.
+    __ li(scratch2, Operand(1));
+    __ sllv(scratch2, scratch2, dst1);
+    __ li(at, -1);
+    __ Xor(scratch2, scratch2, at);
+    __ And(int_scratch, int_scratch, scratch2);
+
+    // Get the number of bits to set in the lower part of the mantissa.
+    __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+    // Set the higher 20 bits of the mantissa.
+    __ srlv(at, int_scratch, scratch2);
+    __ or_(dst2, dst2, at);
+    __ li(at, 32);
+    __ subu(scratch2, at, scratch2);
+    __ sllv(dst1, int_scratch, scratch2);
+    __ Branch(&done);
+
+    __ bind(&fewer_than_20_useful_bits);
+    __ li(at, HeapNumber::kMantissaBitsInTopWord);
+    __ subu(scratch2, at, dst1);
+    __ sllv(scratch2, int_scratch, scratch2);
+    __ Or(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, zero_reg);
+  }
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+                                                  Register object,
+                                                  Destination destination,
+                                                  FPURegister double_dst,
+                                                  Register dst1,
+                                                  Register dst2,
+                                                  Register heap_number_map,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  FPURegister single_scratch,
+                                                  Label* not_int32) {
+  ASSERT(!scratch1.is(object) && !scratch2.is(object));
+  ASSERT(!scratch1.is(scratch2));
+  ASSERT(!heap_number_map.is(object) &&
+         !heap_number_map.is(scratch1) &&
+         !heap_number_map.is(scratch2));
+
+  Label done, obj_is_not_smi;
+
+  __ JumpIfNotSmi(object, &obj_is_not_smi);
+  __ SmiUntag(scratch1, object);
+  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+                     scratch2, single_scratch);
+  __ Branch(&done);
+
+  __ bind(&obj_is_not_smi);
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Load the number.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(single_scratch, double_dst);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion or exception.
+    __ And(scratch2, scratch2, kFCSRFlagMask);
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+
+  } else {
+    ASSERT(!scratch1.is(object) && !scratch2.is(object));
+    // Load the double value in the destination registers.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+    __ Or(scratch1, scratch1, Operand(dst2));
+    __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+    // Check that the value can be exactly represented by a 32-bit integer.
+    // Jump to not_int32 if that's not the case.
+    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+    // dst1 and dst2 were trashed. Reload the double value.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+                                            Register object,
+                                            Register dst,
+                                            Register heap_number_map,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            FPURegister double_scratch,
+                                            Label* not_int32) {
+  ASSERT(!dst.is(object));
+  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+  ASSERT(!scratch1.is(scratch2) &&
+         !scratch1.is(scratch3) &&
+         !scratch2.is(scratch3));
+
+  Label done;
+
+  // Untag the object into the destination register.
+  __ SmiUntag(dst, object);
+  // Just return if the object is a smi.
+  __ JumpIfSmi(object, &done);
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Object is a heap number.
+  // Convert the floating point value to a 32-bit integer.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(double_scratch, double_scratch);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion or exception.
+    __ And(scratch2, scratch2, kFCSRFlagMask);
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    // Get the result in the destination register.
+    __ mfc1(dst, double_scratch);
+
+  } else {
+    // Load the double value in the destination registers.
+    __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+    __ Or(dst, scratch2, Operand(dst));
+    __ Branch(&done, eq, dst, Operand(zero_reg));
+
+    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+    // Registers state after DoubleIs32BitInteger.
+    // dst: mantissa[51:20].
+    // scratch2: 1
+
+    // Shift back the higher bits of the mantissa.
+    __ srlv(dst, dst, scratch3);
+    // Set the implicit first bit.
+    __ li(at, 32);
+    __ subu(scratch3, at, scratch3);
+    __ sllv(scratch2, scratch2, scratch3);
+    __ Or(dst, dst, scratch2);
+    // Set the sign.
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+    __ Subu(dst, zero_reg, dst);
+    __ bind(&skip_sub);
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+                                               Register src1,
+                                               Register src2,
+                                               Register dst,
+                                               Register scratch,
+                                               Label* not_int32) {
+  // Get exponent alone in scratch.
+  __ Ext(scratch,
+         src1,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // Substract the bias from the exponent.
+  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+  // src1: higher (exponent) part of the double value.
+  // src2: lower (mantissa) part of the double value.
+  // scratch: unbiased exponent.
+
+  // Fast cases. Check for obvious non 32-bit integer values.
+  // Negative exponent cannot yield 32-bit integers.
+  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+  // Exponent greater than 31 cannot yield 32-bit integers.
+  // Also, a positive value with an exponent equal to 31 is outside of the
+  // signed 32-bit integer range.
+  // Another way to put it is that if (exponent - signbit) > 30 then the
+  // number cannot be represented as an int32.
+  Register tmp = dst;
+  __ srl(at, src1, 31);
+  __ subu(tmp, scratch, at);
+  __ Branch(not_int32, gt, tmp, Operand(30));
+  // - Bits [21:0] in the mantissa are not null.
+  __ And(tmp, src2, 0x3fffff);
+  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+  // Otherwise the exponent needs to be big enough to shift left all the
+  // non zero bits left. So we need the (30 - exponent) last bits of the
+  // 31 higher bits of the mantissa to be null.
+  // Because bits [21:0] are null, we can check instead that the
+  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+  // Get the 32 higher bits of the mantissa in dst.
+  __ Ext(dst,
+         src2,
+         HeapNumber::kMantissaBitsInTopWord,
+         32 - HeapNumber::kMantissaBitsInTopWord);
+  __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+  __ or_(dst, dst, at);
+
+  // Create the mask and test the lower bits (of the higher bits).
+  __ li(at, 32);
+  __ subu(scratch, at, scratch);
+  __ li(src2, 1);
+  __ sllv(src1, src2, scratch);
+  __ Subu(src1, src1, Operand(1));
+  __ And(src1, dst, src1);
+  __ Branch(not_int32, ne, src1, Operand(zero_reg));
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+    MacroAssembler* masm,
+    Token::Value op,
+    Register heap_number_result,
+    Register scratch) {
+  // Using core registers:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+
+  // Assert that heap_number_result is saved.
+  // We currently always use s0 to pass it.
+  ASSERT(heap_number_result.is(s0));
+
+  // Push the current return address before the C call.
+  __ push(ra);
+  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // We are not using MIPS FPU instructions, and parameters for the runtime
+    // function call are prepaired in a0-a3 registers, but function we are
+    // calling is compiled with hard-float flag and expecting hard float ABI
+    // (parameters in f12/f14 registers). We need to copy parameters from
+    // a0-a3 registers to f12/f14 register pairs.
+    __ Move(f12, a0, a1);
+    __ Move(f14, a2, a3);
+  }
+  // Call C routine that may not cause GC or other trouble.
+  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+                   4);
+  // Store answer in the overwritable heap number.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // Double returned in register f0.
+    __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+  } else {
+    // Double returned in registers v0 and v1.
+    __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+    __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+  }
+  // Place heap_number_result in v0 and return to the pushed return address.
+  __ mov(v0, heap_number_result);
+  __ pop(ra);
+  __ Ret();
 }
 
 
 // See comment for class, this does NOT work for int32's that are in Smi range.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  // Test sign, and save for later conditionals.
+  __ And(sign_, the_int_, Operand(0x80000000u));
+  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ or_(scratch_, scratch_, sign_);
+  // Subtract from 0 if the value was negative.
+  __ subu(at, zero_reg, the_int_);
+  __ movn(the_int_, at, sign_);
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ srl(at, the_int_, shift_distance);
+  __ or_(scratch_, scratch_, at);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kExponentOffset));
+  __ sll(scratch_, the_int_, 32 - shift_distance);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kMantissaOffset));
+  __ Ret();
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ mov(scratch_, zero_reg);
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  Register exp_mask_reg = t5;
+
+  __ Branch(&not_identical, ne, a0, Operand(a1));
+
+  // The two objects are identical. If we know that one of them isn't NaN then
+  // we now know they test equal.
+  if (cc != eq || !never_nan_nan) {
+    __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+    // so we do the second best thing - test it ourselves.
+    // They are both equal and they are not both Smis so both of them are not
+    // Smis. If it's not a heap number, then return equal.
+    if (cc == less || cc == greater) {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+    } else {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+      // Comparing JS objects with <=, >= is complicated.
+      if (cc != eq) {
+      __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+        // Normally here we fall through to return_equal, but undefined is
+        // special: (undefined == undefined) == true, but
+        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+        if (cc == less_equal || cc == greater_equal) {
+          __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+          __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+          __ Branch(&return_equal, ne, a0, Operand(t2));
+          if (cc == le) {
+            // undefined <= undefined should fail.
+            __ li(v0, Operand(GREATER));
+          } else  {
+            // undefined >= undefined should fail.
+            __ li(v0, Operand(LESS));
+          }
+          __ Ret();
+        }
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  if (cc == less) {
+    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cc == greater) {
+    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
+  } else {
+    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
+  }
+  __ Ret();
+
+  if (cc != eq || !never_nan_nan) {
+    // For less and greater we don't have to check for NaN since the result of
+    // x < x is false regardless.  For the others here is some code to check
+    // for NaN.
+    if (cc != lt && cc != gt) {
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read top bits of double representation (second word of value).
+      __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+      // Test that exponent bits are all set.
+      __ And(t3, t2, Operand(exp_mask_reg));
+      // If all bits not set (ne cond), then not a NaN, objects are equal.
+      __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+      // Or with all low-bits of mantissa.
+      __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+      __ Or(v0, t3, Operand(t2));
+      // For equal we already have the right value in v0:  Return zero (equal)
+      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+      // not (it's a NaN).  For <= and >= we need to load v0 with the failing
+      // value if it's a NaN.
+      if (cc != eq) {
+        // All-zero means Infinity means equal.
+        __ Ret(eq, v0, Operand(zero_reg));
+        if (cc == le) {
+          __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
+        } else {
+          __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
+        }
+      }
+      __ Ret();
+    }
+    // No fall through here.
+  }
+
+  __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* both_loaded_as_doubles,
+                                    Label* slow,
+                                    bool strict) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  Label lhs_is_smi;
+  __ And(t0, lhs, Operand(kSmiTagMask));
+  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  // Rhs is a Smi.
+  // Check whether the non-smi is a heap number.
+  __ GetObjectType(lhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal (lhs is already not zero).
+    __ mov(v0, lhs);
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Rhs is a smi, lhs is a number.
+  // Convert smi rhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, rhs, kSmiTagSize);
+    __ mtc1(at, f14);
+    __ cvt_d_w(f14, f14);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  } else {
+    // Load lhs to a double in a2, a3.
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+    // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+    __ mov(t6, rhs);
+    ConvertToDoubleStub stub1(a1, a0, t6, t5);
+    __ push(ra);
+    __ Call(stub1.GetCode());
+
+    __ pop(ra);
+  }
+
+  // We now have both loaded as doubles.
+  __ jmp(both_loaded_as_doubles);
+
+  __ bind(&lhs_is_smi);
+  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ GetObjectType(rhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal.
+    __ li(v0, Operand(1));
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert smi lhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, lhs, kSmiTagSize);
+    __ mtc1(at, f12);
+    __ cvt_d_w(f12, f12);
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    // Convert lhs to a double format. t5 is scratch.
+    __ mov(t6, lhs);
+    ConvertToDoubleStub stub2(a3, a2, t6, t5);
+    __ push(ra);
+    __ Call(stub2.GetCode());
+    __ pop(ra);
+    // Load rhs to a double in a1, a0.
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  // Fall through to both_loaded_as_doubles.
 }
 
 
 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
-  UNIMPLEMENTED_MIPS();
+  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Lhs and rhs are already loaded to f12 and f14 register pairs.
+    __ Move(t0, t1, f14);
+    __ Move(t2, t3, f12);
+  } else {
+    // Lhs and rhs are already loaded to GP registers.
+    __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+    __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+    __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+    __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+  }
+  Register rhs_exponent = exp_first ? t0 : t1;
+  Register lhs_exponent = exp_first ? t2 : t3;
+  Register rhs_mantissa = exp_first ? t1 : t0;
+  Register lhs_mantissa = exp_first ? t3 : t2;
+  Label one_is_nan, neither_is_nan;
+  Label lhs_not_nan_exp_mask_is_loaded;
+
+  Register exp_mask_reg = t4;
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ and_(t5, lhs_exponent, exp_mask_reg);
+  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ bind(&lhs_not_nan_exp_mask_is_loaded);
+  __ and_(t5, rhs_exponent, exp_mask_reg);
+
+  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+  __ bind(&one_is_nan);
+  // NaN comparisons always fail.
+  // Load whatever we need in v0 to make the comparison fail.
+  if (cc == lt || cc == le) {
+    __ li(v0, Operand(GREATER));
+  } else {
+    __ li(v0, Operand(LESS));
+  }
+  __ Ret();  // Return.
+
+  __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+  // f12 and f14 have the two doubles.  Neither is a NaN.
+  // Call a native function to do a comparison between two non-NaNs.
+  // Call C routine that may not cause GC or other trouble.
+  // We use a call_was and return manually because we need arguments slots to
+  // be freed.
+
+  Label return_result_not_equal, return_result_equal;
+  if (cc == eq) {
+    // Doubles are not equal unless they have the same bit pattern.
+    // Exception: 0 and -0.
+    bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Lhs and rhs are already loaded to f12 and f14 register pairs.
+      __ Move(t0, t1, f14);
+      __ Move(t2, t3, f12);
+    } else {
+      // Lhs and rhs are already loaded to GP registers.
+      __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+      __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+      __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+      __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+    }
+    Register rhs_exponent = exp_first ? t0 : t1;
+    Register lhs_exponent = exp_first ? t2 : t3;
+    Register rhs_mantissa = exp_first ? t1 : t0;
+    Register lhs_mantissa = exp_first ? t3 : t2;
+
+    __ xor_(v0, rhs_mantissa, lhs_mantissa);
+    __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+    __ subu(v0, rhs_exponent, lhs_exponent);
+    __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+    // 0, -0 case.
+    __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+    __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+    __ or_(t4, rhs_exponent, lhs_exponent);
+    __ or_(t4, t4, rhs_mantissa);
+
+    __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+    __ bind(&return_result_equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+  }
+
+  __ bind(&return_result_not_equal);
+
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ push(ra);
+    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    if (!IsMipsSoftFloatABI) {
+      // We are not using MIPS FPU instructions, and parameters for the runtime
+      // function call are prepaired in a0-a3 registers, but function we are
+      // calling is compiled with hard-float flag and expecting hard float ABI
+      // (parameters in f12/f14 registers). We need to copy parameters from
+      // a0-a3 registers to f12/f14 register pairs.
+      __ Move(f12, a0, a1);
+      __ Move(f14, a2, a3);
+    }
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ pop(ra);  // Because this function returns int, result is in v0.
+    __ Ret();
+  } else {
+    CpuFeatures::Scope scope(FPU);
+    Label equal, less_than;
+    __ c(EQ, D, f12, f14);
+    __ bc1t(&equal);
+    __ nop();
+
+    __ c(OLT, D, f12, f14);
+    __ bc1t(&less_than);
+    __ nop();
+
+    // Not equal, not less, not NaN, must be greater.
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&less_than);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+  }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs) {
+    // If either operand is a JS object or an oddball value, then they are
+    // not equal since their pointers are different.
+    // There is no test for undetectability in strict equality.
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    Label first_non_object;
+    // Get the type of the first operand into a2 and compare it with
+    // FIRST_SPEC_OBJECT_TYPE.
+    __ GetObjectType(lhs, a2, a2);
+    __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Return non-zero.
+    Label return_not_equal;
+    __ bind(&return_not_equal);
+    __ li(v0, Operand(1));
+    __ Ret();
+
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+    __ GetObjectType(rhs, a3, a3);
+    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+    // Now that we have the types we might as well check for symbol-symbol.
+    // Ensure that no non-strings have the symbol bit set.
+    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ And(t2, a2, Operand(a3));
+    __ And(t0, t2, Operand(kIsSymbolMask));
+    __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+                                       Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers,
+                                       Label* slow) {
+  __ GetObjectType(lhs, a3, a2);
+  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  // If first was a heap number & second wasn't, go to slow case.
+  __ Branch(slow, ne, a3, Operand(a2));
+
+  // Both are heap numbers. Load them up then jump to the code we have
+  // for that.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+                                         Register lhs,
+                                         Register rhs,
+                                         Label* possible_strings,
+                                         Label* not_both_strings) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  // a2 is object type of lhs.
+  // Ensure that no non-strings have the symbol bit set.
+  Label object_test;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(at, a2, Operand(kIsNotStringMask));
+  __ Branch(&object_test, ne, at, Operand(zero_reg));
+  __ And(at, a2, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+  __ GetObjectType(rhs, a3, a3);
+  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ And(at, a3, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+  // Both are symbols. We already checked they weren't the same pointer
+  // so they are not equal.
+  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ Ret();
+
+  __ bind(&object_test);
+  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ GetObjectType(rhs, a2, a3);
+  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // If both objects are undetectable, they are equal.  Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ and_(a0, a2, a3);
+  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Ret();
 }
 
 
@@ -232,12 +1422,109 @@
                                                          Register scratch3,
                                                          bool object_is_smi,
                                                          Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is a smi).
+  __ sra(mask, mask, kSmiTagSize + 1);
+  __ Addu(mask, mask, -1);  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Isolate* isolate = masm->isolate();
+  Label is_smi;
+  Label load_result_from_cache;
+  if (!object_is_smi) {
+    __ JumpIfSmi(object, &is_smi);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ CheckMap(object,
+                  scratch1,
+                  Heap::kHeapNumberMapRootIndex,
+                  not_found,
+                  DONT_DO_SMI_CHECK);
+
+      STATIC_ASSERT(8 == kDoubleSize);
+      __ Addu(scratch1,
+              object,
+              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+      __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+      __ lw(scratch1, MemOperand(scratch1, 0));
+      __ Xor(scratch1, scratch1, Operand(scratch2));
+      __ And(scratch1, scratch1, Operand(mask));
+
+      // Calculate address of entry in string cache: each entry consists
+      // of two pointer sized fields.
+      __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+      __ Addu(scratch1, number_string_cache, scratch1);
+
+      Register probe = mask;
+      __ lw(probe,
+             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+      __ JumpIfSmi(probe, not_found);
+      __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+      __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+      __ c(EQ, D, f12, f14);
+      __ bc1t(&load_result_from_cache);
+      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ Branch(not_found);
+    } else {
+      // Note that there is no cache check for non-FPU case, even though
+      // it seems there could be. May be a tiny opimization for non-FPU
+      // cores.
+      __ Branch(not_found);
+    }
+  }
+
+  __ bind(&is_smi);
+  Register scratch = scratch1;
+  __ sra(scratch, object, 1);   // Shift away the tag.
+  __ And(scratch, mask, Operand(scratch));
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+  __ Addu(scratch, number_string_cache, scratch);
+
+  // Check if the entry is the smi we are looking for.
+  Register probe = mask;
+  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  __ Branch(not_found, ne, object, Operand(probe));
+
+  // Get the result from the cache.
+  __ bind(&load_result_from_cache);
+  __ lw(result,
+         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+  __ IncrementCounter(isolate->counters()->number_to_string_native(),
+                      1,
+                      scratch1,
+                      scratch2);
 }
 
 
 void NumberToStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  __ lw(a1, MemOperand(sp, 0));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+  __ Addu(sp, sp, Operand(1 * kPointerSize));
+  __ Ret();
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
 }
 
 
@@ -245,105 +1532,1000 @@
 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
 // of the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles;
+
+
+  if (include_smi_compare_) {
+    Label not_two_smis, smi_done;
+    __ Or(a2, a1, a0);
+    __ JumpIfNotSmi(a2, &not_two_smis);
+    __ sra(a1, a1, 1);
+    __ sra(a0, a0, 1);
+    __ Subu(v0, a1, a0);
+    __ Ret();
+    __ bind(&not_two_smis);
+  } else if (FLAG_debug_code) {
+    __ Or(a2, a1, a0);
+    __ And(a2, a2, kSmiTagMask);
+    __ Assert(ne, "CompareStub: unexpected smi operands.",
+        a2, Operand(zero_reg));
+  }
+
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  __ And(t2, lhs_, Operand(rhs_));
+  __ JumpIfNotSmi(t2, &not_smis, t0);
+  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to rhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison and the numbers have been loaded into f12 and f14 as doubles,
+  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+                          &both_loaded_as_doubles, &slow, strict_);
+
+  __ bind(&both_loaded_as_doubles);
+  // f12, f14 are the double representations of the left hand side
+  // and the right hand side if we have FPU. Otherwise a2, a3 represent
+  // left hand side and a0, a1 represent right hand side.
+
+  Isolate* isolate = masm->isolate();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    Label nan;
+    __ li(t0, Operand(LESS));
+    __ li(t1, Operand(GREATER));
+    __ li(t2, Operand(EQUAL));
+
+    // Check if either rhs or lhs is NaN.
+    __ c(UN, D, f12, f14);
+    __ bc1t(&nan);
+    __ nop();
+
+    // Check if LESS condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(OLT, D, f12, f14);
+    __ movt(v0, t0);
+    // Use previous check to store conditionally to v0 oposite condition
+    // (GREATER). If rhs is equal to lhs, this will be corrected in next
+    // check.
+    __ movf(v0, t1);
+    // Check if EQUAL condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(EQ, D, f12, f14);
+    __ movt(v0, t2);
+
+    __ Ret();
+
+    __ bind(&nan);
+    // NaN comparisons always fail.
+    // Load whatever we need in v0 to make the comparison fail.
+    if (cc_ == lt || cc_ == le) {
+      __ li(v0, Operand(GREATER));
+    } else {
+      __ li(v0, Operand(LESS));
+    }
+    __ Ret();
+  } else {
+    // Checks for NaN in the doubles we have loaded.  Can return the answer or
+    // fall through if neither is a NaN.  Also binds rhs_not_nan.
+    EmitNanCheck(masm, cc_);
+
+    // Compares two doubles that are not NaNs. Returns the answer.
+    // Never falls through.
+    EmitTwoNonNanDoubleComparison(masm, cc_);
+  }
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+  if (strict_) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+  }
+
+  Label check_for_symbols;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison. Can jump to slow case,
+  // or load both doubles and jump to the code that handles
+  // that case. If the inputs are not doubles then jumps to check_for_symbols.
+  // In this case a2 will contain the type of lhs_.
+  EmitCheckForTwoHeapNumbers(masm,
+                             lhs_,
+                             rhs_,
+                             &both_loaded_as_doubles,
+                             &check_for_symbols,
+                             &flat_string_check);
+
+  __ bind(&check_for_symbols);
+  if (cc_ == eq && !strict_) {
+    // Returns an answer for two symbols or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that a2 is the type of lhs_ on entry.
+    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+  }
+
+  // Check for both being sequential ASCII strings, and inline if that is the
+  // case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+  if (cc_ == eq) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+                                                     lhs_,
+                                                     rhs_,
+                                                     a2,
+                                                     a3,
+                                                     t0);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       lhs_,
+                                                       rhs_,
+                                                       a2,
+                                                       a3,
+                                                       t0,
+                                                       t1);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+  // a1 (rhs) second.
+  __ Push(lhs_, rhs_);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  if (cc_ == eq) {
+    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result.
+    if (cc_ == lt || cc_ == le) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == gt || cc_ == ge);  // Remaining cases.
+      ncr = LESS;
+    }
+    __ li(a0, Operand(Smi::FromInt(ncr)));
+    __ push(a0);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
 }
 
 
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This stub uses FPU instructions.
+  CpuFeatures::Scope scope(FPU);
+
+  Label false_result;
+  Label not_heap_number;
+  Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+  // undefined -> false
+  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // Boolean -> its value
+  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the equal condition is satisfied.
+  __ Ret(eq, tos_, Operand(scratch0));
+
+  // Smis: 0 -> false, all other -> true
+  __ And(scratch0, tos_, tos_);
+  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+  __ And(scratch0, tos_, Operand(kSmiTagMask));
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the not equal condition is satisfied.
+  __ Ret(eq, scratch0, Operand(zero_reg));
+
+  // 'null' -> false
+  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // HeapNumber => false if +0, -0, or NaN.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+  __ fcmp(f12, 0.0, UEQ);
+
+  // "tos_" is a register, and contains a non zero value by default.
+  // Hence we only need to overwrite "tos_" with zero to return false for
+  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+  __ movt(tos_, zero_reg);
+  __ Ret();
+
+  __ bind(&not_heap_number);
+
+  // It can be an undetectable object.
+  // Undetectable => false.
+  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+  // JavaScript object => true.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Check for string.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+  // String value => false iff empty, i.e., length is zero.
+  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+  // If length is zero, "tos_" contains zero ==> false.
+  // If length is not zero, "tos_" contains a non-zero value ==> true.
+  __ Ret();
+
+  // Return 0 in "tos_" for false.
+  __ bind(&false_result);
+  __ mov(tos_, zero_reg);
+  __ Ret();
 }
 
 
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.  The operands are in lhs and rhs.
-// To call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in a0 and a1 (for the
-// value in a1) and a2 and a3 (for the value in a0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
-                                    Label* not_smi,
-                                    Register lhs,
-                                    Register rhs,
-                                    const Builtins::JavaScript& builtin) {
-  UNIMPLEMENTED_MIPS();
+void UnaryOpStub::PrintName(StringStream* stream) {
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+  stream->Add("UnaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              UnaryOpIC::GetName(operand_type_));
 }
 
 
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value.  We truncate towards zero as required
-// by the ES spec.  If this is the case we do the bitwise op and see if the
-// result is a Smi.  If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
-// On exit the result is in v0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                                                Register lhs,
-                                                Register rhs) {
-  UNIMPLEMENTED_MIPS();
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
 }
 
 
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  // Argument is in a0 and v0 at this point, so we can overwrite a0.
+  __ li(a2, Operand(Smi::FromInt(op_)));
+  __ li(a1, Operand(Smi::FromInt(mode_)));
+  __ li(a0, Operand(Smi::FromInt(operand_type_)));
+  __ Push(v0, a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
 }
 
 
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
 }
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
-  return stub.GetCode();
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // The result of negating zero or the smallest negative smi is not a smi.
+  __ And(t0, a0, ~0x80000000);
+  __ Branch(slow, eq, t0, Operand(zero_reg));
+
+  // Return '0 - value'.
+  __ Subu(v0, zero_reg, a0);
+  __ Ret();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // Flip bits and revert inverted smi-tag.
+  __ Neg(v0, a0);
+  __ And(v0, v0, ~kSmiTagMask);
+  __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // a0 is a heap number.  Get a new heap number in a1.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+  } else {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a1, v0);
+    __ pop(a0);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+    __ mov(v0, a1);
+  }
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+    MacroAssembler* masm,
+    Label* slow) {
+  Label impossible;
+
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // Convert the heap number in a0 to an untagged integer in a1.
+  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ Neg(a1, a1);
+  __ Addu(a2, a1, Operand(0x40000000));
+  __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+  // Tag the result as a smi and we're done.
+  __ SmiTag(v0, a1);
+  __ Ret();
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    // Allocate a new heap number without zapping v0, which we need if it fails.
+    __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(v0);  // Push the heap number, not the untagged int32.
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a2, v0);  // Move the new heap number into a2.
+    // Get the heap number into v0, now that the new heap number is in a2.
+    __ pop(v0);
+    __ LeaveInternalFrame();
+
+    // Convert the heap number in v0 to an untagged integer in a1.
+    // This can't go slow-case because it's the same number we already
+    // converted once again.
+    __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
+    // Negate the result.
+    __ Xor(a1, a1, -1);
+
+    __ bind(&heapnumber_allocated);
+    __ mov(v0, a2);  // Move newly allocated heap number to v0.
+  }
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(a1, f0);
+    __ cvt_d_w(f0, f0);
+    __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+  } else {
+    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+    // have to set up a frame.
+    WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&impossible);
+  if (FLAG_debug_code) {
+    __ stop("Incorrect assumption in bit-not stub");
+  }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(
+    MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ push(a0);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  __ Push(a1, a0);
+
+  __ li(a2, Operand(Smi::FromInt(MinorKey())));
+  __ li(a1, Operand(Smi::FromInt(op_)));
+  __ li(a0, Operand(Smi::FromInt(operands_type_)));
+  __ Push(a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+                        masm->isolate()),
+      5,
+      1);
+}
+
+
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
     MacroAssembler* masm) {
   UNIMPLEMENTED();
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case BinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case BinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case BinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case BinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case BinaryOpIC::ODDBALL:
+      GenerateOddballStub(masm);
+      break;
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case BinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return name_;
+void BinaryOpStub::PrintName(StringStream* stream) {
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+  stream->Add("BinaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              BinaryOpIC::GetName(operands_type_));
 }
 
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
-    MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+  Register left = a1;
+  Register right = a0;
+
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(kSmiTag == 0);
+
+  Label not_smi_result;
+  switch (op_) {
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::MUL: {
+      // Remove tag from one of the operands. This way the multiplication result
+      // will be a smi if it fits the smi range.
+      __ SmiUntag(scratch1, right);
+      // Do multiplication.
+      // lo = lower 32 bits of scratch1 * left.
+      // hi = higher 32 bits of scratch1 * left.
+      __ Mult(left, scratch1);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+      // Go slow on zero result to handle -0.
+      __ mflo(v0);
+      __ Ret(ne, v0, Operand(zero_reg));
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ Addu(scratch2, right, left);
+      Label skip;
+      // ARM uses the 'pl' condition, which is 'ge'.
+      // Negating it results in 'lt'.
+      __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      __ Ret();  // Return smi 0 if the non-zero one was positive.
+      __ bind(&skip);
+      // We fall through here if we multiplied a negative number with 0, because
+      // that would mean we should produce -0.
+      }
+      break;
+    case Token::DIV: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by zero before getting the result.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividsor (right) is
+      // positive, otherwise it is a -0 case.
+      // Quotient is in 'lo', remainder is in 'hi'.
+      // Check for no remainder first.
+      __ mfhi(scratch1);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      __ mflo(scratch1);
+      __ Branch(&done, ne, scratch1, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      }
+      break;
+    case Token::MOD: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by 0 before calling mfhi.
+      // Check for zero on the right hand side.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividend (left) is
+      // positive (or 0), otherwise it is a -0 case.
+      // Remainder is in 'hi'.
+      __ mfhi(scratch2);
+      __ Branch(&done, ne, scratch2, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch1, scratch2, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ SmiTag(v0, scratch2);
+      __ Ret();
+      }
+      break;
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::SAR:
+      // Remove tags from right operand.
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(scratch1, left, scratch1);
+      // Smi tag result.
+      __ And(v0, scratch1, Operand(~kSmiTagMask));
+      __ Ret();
+      break;
+    case Token::SHR:
+      // Remove tags from operands. We can't do this on a 31 bit number
+      // because then the 0s get shifted into bit 30 instead of bit 31.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(v0, scratch1, scratch2);
+      // Unsigned shift is not allowed to produce a negative number, so
+      // check the sign bit and the sign bit after Smi tagging.
+      __ And(scratch1, v0, Operand(0xc0000000));
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      // Smi tag result.
+      __ SmiTag(v0);
+      __ Ret();
+      break;
+    case Token::SHL:
+      // Remove tags from operands.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&not_smi_result);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
-                                                    bool smi_operands,
-                                                    Label* not_numbers,
-                                                    Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                       bool smi_operands,
+                                       Label* not_numbers,
+                                       Label* gc_required) {
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t0;
+
+  ASSERT(smi_operands || (not_numbers != NULL));
+  if (smi_operands && FLAG_debug_code) {
+    __ AbortIfNotSmi(left);
+    __ AbortIfNotSmi(right);
+  }
+
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+      // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+      // depending on whether FPU is available or not.
+      FloatingPointHelper::Destination destination =
+          CpuFeatures::IsSupported(FPU) &&
+          op_ != Token::MOD ?
+              FloatingPointHelper::kFPURegisters :
+              FloatingPointHelper::kCoreRegisters;
+
+      // Allocate new heap number for result.
+      Register result = s0;
+      GenerateHeapResultAllocation(
+          masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+      // Load the operands.
+      if (smi_operands) {
+        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+      } else {
+        FloatingPointHelper::LoadOperands(masm,
+                                          destination,
+                                          heap_number_map,
+                                          scratch1,
+                                          scratch2,
+                                          not_numbers);
+      }
+
+      // Calculate the result.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        // Using FPU registers:
+        // f12: Left value.
+        // f14: Right value.
+        CpuFeatures::Scope scope(FPU);
+        switch (op_) {
+        case Token::ADD:
+          __ add_d(f10, f12, f14);
+          break;
+        case Token::SUB:
+          __ sub_d(f10, f12, f14);
+          break;
+        case Token::MUL:
+          __ mul_d(f10, f12, f14);
+          break;
+        case Token::DIV:
+          __ div_d(f10, f12, f14);
+          break;
+        default:
+          UNREACHABLE();
+        }
+
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+        __ mov(v0, result);
+        __ Ret();
+      } else {
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+                                                         op_,
+                                                         result,
+                                                         scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+      }
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      if (smi_operands) {
+        __ SmiUntag(a3, left);
+        __ SmiUntag(a2, right);
+      } else {
+        // Convert operands to 32-bit integers. Right in a2 and left in a3.
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  left,
+                                                  a3,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  right,
+                                                  a2,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+      }
+      Label result_not_a_smi;
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // The code below for writing into heap numbers isn't capable of
+          // writing the register as an unsigned int so we go to slow case if we
+          // hit this case.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+          } else {
+            __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      // Check that the *signed* result fits in a smi.
+      __ Addu(a3, a2, Operand(0x40000000));
+      __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      // Allocate new heap number for result.
+      __ bind(&result_not_a_smi);
+      Register result = t1;
+      if (smi_operands) {
+        __ AllocateHeapNumber(
+            result, scratch1, scratch2, heap_number_map, gc_required);
+      } else {
+        GenerateHeapResultAllocation(
+            masm, result, heap_number_map, scratch1, scratch2, gc_required);
+      }
+
+      // a2: Answer as signed int32.
+      // t1: Heap number to write answer into.
+
+      // Nothing can go wrong now, so move the heap number to v0, which is the
+      // result.
+      __ mov(v0, t1);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        // Convert the int32 in a2 to the heap number in a0. As
+        // mentioned above SHR needs to always produce a positive result.
+        CpuFeatures::Scope scope(FPU);
+        __ mtc1(a2, f0);
+        if (op_ == Token::SHR) {
+          __ Cvt_d_uw(f0, f0, f22);
+        } else {
+          __ cvt_d_w(f0, f0);
+        }
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a0 as scratch. v0 is preserved and returned.
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+        __ TailCallStub(&stub);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -351,83 +2533,933 @@
 // generated. If the result is not a smi and heap number allocation is not
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
+    Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-  UNIMPLEMENTED_MIPS();
+  Label not_smis;
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(scratch1, &not_smis);
+
+  // If the smi-smi operation results in a smi return is generated.
+  GenerateSmiSmiOperation(masm);
+
+  // If heap number results are possible generate the result in an allocated
+  // heap number.
+  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+    GenerateFPOperation(masm, true, use_runtime, gc_required);
+  }
+  __ bind(&not_smis);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label not_smis, call_runtime;
+
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm,
+                    &call_runtime,
+                    &call_runtime,
+                    ALLOW_HEAPNUMBER_RESULTS);
+  }
+
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
+  GenerateTypeTransition(masm);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
+  // Try to add arguments as strings, otherwise, transition to the generic
+  // BinaryOpIC type.
+  GenerateAddStrings(masm);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  FPURegister double_scratch = f0;
+  FPURegister single_scratch = f6;
+
+  Register heap_number_result = no_reg;
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  Label call_runtime;
+  // Labels for type transition, used for wrong input or output types.
+  // Both label are currently actually bound to the same position. We use two
+  // different label to differentiate the cause leading to type transition.
+  Label transition;
+
+  // Smi-smi fast case.
+  Label skip;
+  __ Or(scratch1, left, right);
+  __ JumpIfNotSmi(scratch1, &skip);
+  GenerateSmiSmiOperation(masm);
+  // Fall through if the result is not a smi.
+  __ bind(&skip);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+      // Load both operands and check that they are 32-bit integer.
+      // Jump to type transition if they are not. The registers a0 and a1 (right
+      // and left) are preserved for the runtime call.
+      FloatingPointHelper::Destination destination =
+          (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+              ? FloatingPointHelper::kFPURegisters
+              : FloatingPointHelper::kCoreRegisters;
+
+      FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                   right,
+                                                   destination,
+                                                   f14,
+                                                   a2,
+                                                   a3,
+                                                   heap_number_map,
+                                                   scratch1,
+                                                   scratch2,
+                                                   f2,
+                                                   &transition);
+      FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                   left,
+                                                   destination,
+                                                   f12,
+                                                   t0,
+                                                   t1,
+                                                   heap_number_map,
+                                                   scratch1,
+                                                   scratch2,
+                                                   f2,
+                                                   &transition);
+
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        Label return_heap_number;
+        switch (op_) {
+          case Token::ADD:
+            __ add_d(f10, f12, f14);
+            break;
+          case Token::SUB:
+            __ sub_d(f10, f12, f14);
+            break;
+          case Token::MUL:
+            __ mul_d(f10, f12, f14);
+            break;
+          case Token::DIV:
+            __ div_d(f10, f12, f14);
+            break;
+          default:
+            UNREACHABLE();
+        }
+
+        if (op_ != Token::DIV) {
+          // These operations produce an integer result.
+          // Try to return a smi if we can.
+          // Otherwise return a heap number if allowed, or jump to type
+          // transition.
+
+          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+          // On MIPS a lot of things cannot be implemented the same way so right
+          // now it makes a lot more sense to just do things manually.
+
+          // Save FCSR.
+          __ cfc1(scratch1, FCSR);
+          // Disable FPU exceptions.
+          __ ctc1(zero_reg, FCSR);
+          __ trunc_w_d(single_scratch, f10);
+          // Retrieve FCSR.
+          __ cfc1(scratch2, FCSR);
+          // Restore FCSR.
+          __ ctc1(scratch1, FCSR);
+
+          // Check for inexact conversion or exception.
+          __ And(scratch2, scratch2, kFCSRFlagMask);
+
+          if (result_type_ <= BinaryOpIC::INT32) {
+            // If scratch2 != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+          }
+
+          // Check if the result fits in a smi.
+          __ mfc1(scratch1, single_scratch);
+          __ Addu(scratch2, scratch1, Operand(0x40000000));
+          // If not try to return a heap number.
+          __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+          // Check for minus zero. Return heap number for minus zero.
+          Label not_zero;
+          __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+          __ mfc1(scratch2, f11);
+          __ And(scratch2, scratch2, HeapNumber::kSignMask);
+          __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+          __ bind(&not_zero);
+
+          // Tag the result and return.
+          __ SmiTag(v0, scratch1);
+          __ Ret();
+        } else {
+          // DIV just falls through to allocating a heap number.
+        }
+
+        __ bind(&return_heap_number);
+        // Return a heap number, or fall through to type transition or runtime
+        // call if we can't.
+        if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+                                                 : BinaryOpIC::INT32)) {
+          // We are using FPU registers so s0 is available.
+          heap_number_result = s0;
+          GenerateHeapResultAllocation(masm,
+                                       heap_number_result,
+                                       heap_number_map,
+                                       scratch1,
+                                       scratch2,
+                                       &call_runtime);
+          __ mov(v0, heap_number_result);
+          __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+          __ Ret();
+        }
+
+        // A DIV operation expecting an integer result falls through
+        // to type transition.
+
+      } else {
+        // We preserved a0 and a1 to be able to call runtime.
+        // Save the left value on the stack.
+        __ Push(t1, t0);
+
+        Label pop_and_call_runtime;
+
+        // Allocate a heap number to store the result.
+        heap_number_result = s0;
+        GenerateHeapResultAllocation(masm,
+                                     heap_number_result,
+                                     heap_number_map,
+                                     scratch1,
+                                     scratch2,
+                                     &pop_and_call_runtime);
+
+        // Load the left value from the value saved on the stack.
+        __ Pop(a1, a0);
+
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(
+            masm, op_, heap_number_result, scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+
+        __ bind(&pop_and_call_runtime);
+        __ Drop(2);
+        __ Branch(&call_runtime);
+      }
+
+      break;
+    }
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      Label return_heap_number;
+      Register scratch3 = t1;
+      // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+      // registers a0 and a1 (right and left) are preserved for the runtime
+      // call.
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             left,
+                                             a3,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             right,
+                                             a2,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+
+      // The ECMA-262 standard specifies that, for shift operations, only the
+      // 5 least significant bits of the shift value should be used.
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // We only get a negative result if the shift value (a2) is 0.
+          // This result cannot be respresented as a signed 32-bit integer, try
+          // to return a heap number if we can.
+          // The non FPU code does not support this special case, so jump to
+          // runtime if we don't support it.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &return_heap_number,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          } else {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &call_runtime,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          __ And(a2, a2, Operand(0x1f));
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      // Check if the result fits in a smi.
+      __ Addu(scratch1, a2, Operand(0x40000000));
+      // If not try to return a heap number. (We know the result is an int32.)
+      __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+      // Tag the result and return.
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      __ bind(&return_heap_number);
+      heap_number_result = t1;
+      GenerateHeapResultAllocation(masm,
+                                   heap_number_result,
+                                   heap_number_map,
+                                   scratch1,
+                                   scratch2,
+                                   &call_runtime);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+
+        if (op_ != Token::SHR) {
+          // Convert the result to a floating point value.
+          __ mtc1(a2, double_scratch);
+          __ cvt_d_w(double_scratch, double_scratch);
+        } else {
+          // The result must be interpreted as an unsigned 32-bit integer.
+          __ mtc1(a2, double_scratch);
+          __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
+        }
+
+        // Store the result.
+        __ mov(v0, heap_number_result);
+        __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a1 as scratch. v0 is preserved and returned.
+        __ mov(a0, t1);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+        __ TailCallStub(&stub);
+      }
+
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  // We never expect DIV to yield an integer result, so we always generate
+  // type transition code for DIV operations expecting an integer result: the
+  // code will fall through to this type transition.
+  if (transition.is_linked() ||
+      ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
+    __ bind(&transition);
+    GenerateTypeTransition(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (op_ == Token::ADD) {
+    // Handle string addition here, because it is the only operation
+    // that does not do a ToNumber conversion on the operands.
+    GenerateAddStrings(masm);
+  }
+
+  // Convert oddball arguments to numbers.
+  Label check, done;
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&check, ne, a1, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a1, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a1, Heap::kNanValueRootIndex);
+  }
+  __ jmp(&done);
+  __ bind(&check);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done, ne, a0, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a0, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a0, Heap::kNanValueRootIndex);
+  }
+  __ bind(&done);
+
+  GenerateHeapNumberStub(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime, call_string_add_or_runtime;
+
+  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+  __ bind(&call_string_add_or_runtime);
+  if (op_ == Token::ADD) {
+    GenerateAddStrings(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD);
+  Label left_not_string, call_runtime;
+
+  Register left = a1;
+  Register right = a0;
+
+  // Check if left argument is a string.
+  __ JumpIfSmi(left, &left_not_string);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // At least one argument is not a string.
+  __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Register result,
     Register heap_number_map,
     Register scratch1,
     Register scratch2,
     Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+
+  // Code below will scratch result if allocation fails. To keep both arguments
+  // intact for the runtime call result cannot be one of these.
+  ASSERT(!result.is(a0) && !result.is(a1));
+
+  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+    Label skip_allocation, allocated;
+    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+    // If the overwritable operand is already an object, we skip the
+    // allocation of a heap number.
+    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+    // Allocate a heap number for the result.
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+    __ Branch(&allocated);
+    __ bind(&skip_allocation);
+    // Use object holding the overwritable operand for result.
+    __ mov(result, overwritable_operand);
+    __ bind(&allocated);
+  } else {
+    ASSERT(mode_ == NO_OVERWRITE);
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+  }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ Push(a1, a0);
 }
 
 
 
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Untagged case: double input in f4, double result goes
+  //   into f4.
+  // Tagged case: tagged input on top of stack and in a0,
+  //   tagged result (heap number) goes into v0.
+
+  Label input_not_smi;
+  Label loaded;
+  Label calculate;
+  Label invalid_cache;
+  const Register scratch0 = t5;
+  const Register scratch1 = t3;
+  const Register cache_entry = a0;
+  const bool tagged = (argument_type_ == TAGGED);
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    if (tagged) {
+      // Argument is a number and is on stack and in a0.
+      // Load argument and check if it is a smi.
+      __ JumpIfNotSmi(a0, &input_not_smi);
+
+      // Input is a smi. Convert to double and load the low and high words
+      // of the double into a2, a3.
+      __ sra(t0, a0, kSmiTagSize);
+      __ mtc1(t0, f4);
+      __ cvt_d_w(f4, f4);
+      __ Move(a2, a3, f4);
+      __ Branch(&loaded);
+
+      __ bind(&input_not_smi);
+      // Check if input is a HeapNumber.
+      __ CheckMap(a0,
+                  a1,
+                  Heap::kHeapNumberMapRootIndex,
+                  &calculate,
+                  DONT_DO_SMI_CHECK);
+      // Input is a HeapNumber. Store the
+      // low and high words into a2, a3.
+      __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+      __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+    } else {
+      // Input is untagged double in f4. Output goes to f4.
+      __ Move(a2, a3, f4);
+    }
+    __ bind(&loaded);
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // Compute hash (the shifts are arithmetic):
+    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+    __ Xor(a1, a2, a3);
+    __ sra(t0, a1, 16);
+    __ Xor(a1, a1, t0);
+    __ sra(t0, a1, 8);
+    __ Xor(a1, a1, t0);
+    ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+    __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // a1 = TranscendentalCache::hash(double value).
+    __ li(cache_entry, Operand(
+        ExternalReference::transcendental_cache_array_address(
+            masm->isolate())));
+    // a0 points to cache array.
+    __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+        Isolate::Current()->transcendental_cache()->caches_[0])));
+    // a0 points to the cache for the type type_.
+    // If NULL, the cache hasn't been initialized yet, so go through runtime.
+    __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+    // Check that the layout of cache elements match expectations.
+    { TranscendentalCache::SubCache::Element test_elem[2];
+      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
+      CHECK_EQ(0, elem_in0 - elem_start);
+      CHECK_EQ(kIntSize, elem_in1 - elem_start);
+      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+    }
+#endif
+
+    // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+    __ sll(t0, a1, 1);
+    __ Addu(a1, a1, t0);
+    __ sll(t0, a1, 2);
+    __ Addu(cache_entry, cache_entry, t0);
+
+    // Check if cache matches: Double value is stored in uint32_t[2] array.
+    __ lw(t0, MemOperand(cache_entry, 0));
+    __ lw(t1, MemOperand(cache_entry, 4));
+    __ lw(t2, MemOperand(cache_entry, 8));
+    __ Addu(cache_entry, cache_entry, 12);
+    __ Branch(&calculate, ne, a2, Operand(t0));
+    __ Branch(&calculate, ne, a3, Operand(t1));
+    // Cache hit. Load result, cleanup and return.
+    if (tagged) {
+      // Pop input value from stack and load result into v0.
+      __ Drop(1);
+      __ mov(v0, t2);
+    } else {
+      // Load result into f4.
+      __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+    }
+    __ Ret();
+  }  // if (CpuFeatures::IsSupported(FPU))
+
+  __ bind(&calculate);
+  if (tagged) {
+    __ bind(&invalid_cache);
+    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+                                                   masm->isolate()),
+                                 1,
+                                 1);
+  } else {
+    if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+    CpuFeatures::Scope scope(FPU);
+
+    Label no_update;
+    Label skip_cache;
+    const Register heap_number_map = t2;
+
+    // Call C function to calculate the result and update the cache.
+    // Register a0 holds precalculated cache entry address; preserve
+    // it on the stack and pop it into register cache_entry after the
+    // call.
+    __ push(cache_entry);
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+
+    // Try to update the cache. If we cannot allocate a
+    // heap number, we return the result without updating.
+    __ pop(cache_entry);
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+    __ mov(v0, cache_entry);
+    __ Ret();
+
+    __ bind(&invalid_cache);
+    // The cache is invalid. Call runtime which will recreate the
+    // cache.
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
+    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+
+    __ bind(&skip_cache);
+    // Call C function to calculate the result and answer directly
+    // without updating the cache.
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+    __ bind(&no_update);
+
+    // We return the value in f4 without adding it to the cache, but
+    // we cause a scavenging GC so that future allocations will succeed.
+    __ EnterInternalFrame();
+
+    // Allocate an aligned object larger than a HeapNumber.
+    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+    __ li(scratch0, Operand(4 * kPointerSize));
+    __ push(scratch0);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
+    __ Ret();
+  }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+                                                    Register scratch) {
+  __ push(ra);
+  __ PrepareCallCFunction(2, scratch);
+  if (IsMipsSoftFloatABI) {
+    __ Move(v0, v1, f4);
+  } else {
+    __ mov_d(f12, f4);
+  }
+  switch (type_) {
+    case TranscendentalCache::SIN:
+      __ CallCFunction(
+          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::COS:
+      __ CallCFunction(
+          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::LOG:
+      __ CallCFunction(
+          ExternalReference::math_log_double_function(masm->isolate()), 2);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  __ pop(ra);
 }
 
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
-  UNIMPLEMENTED_MIPS();
-  return Runtime::kAbort;
+  switch (type_) {
+    // Add more cases when necessary.
+    case TranscendentalCache::SIN: return Runtime::kMath_sin;
+    case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
+    default:
+      UNIMPLEMENTED();
+      return Runtime::kAbort;
+  }
 }
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void MathPowStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    Label base_not_smi;
+    Label exponent_not_smi;
+    Label convert_exponent;
+
+    const Register base = a0;
+    const Register exponent = a2;
+    const Register heapnumbermap = t1;
+    const Register heapnumber = s0;  // Callee-saved register.
+    const Register scratch = t2;
+    const Register scratch2 = t3;
+
+    // Alocate FP values in the ABI-parameter-passing regs.
+    const DoubleRegister double_base = f12;
+    const DoubleRegister double_exponent = f14;
+    const DoubleRegister double_result = f0;
+    const DoubleRegister double_scratch = f2;
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+    __ lw(base, MemOperand(sp, 1 * kPointerSize));
+    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    // Convert base to double value and store it in f0.
+    __ JumpIfNotSmi(base, &base_not_smi);
+    // Base is a Smi. Untag and convert it.
+    __ SmiUntag(base);
+    __ mtc1(base, double_scratch);
+    __ cvt_d_w(double_base, double_scratch);
+    __ Branch(&convert_exponent);
+
+    __ bind(&base_not_smi);
+    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Base is a heapnumber. Load it into double register.
+    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+    __ bind(&convert_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
+
+    // The base is in a double register and the exponent is
+    // an untagged smi. Allocate a heap number and call a
+    // C function for integer exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(3, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(masm->isolate()), 3);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+
+    __ bind(&exponent_not_smi);
+    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Exponent is a heapnumber. Load it into double register.
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+    // The base and the exponent are in double registers.
+    // Allocate a heap number and call a C function for
+    // double exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(4, scratch);
+    // ABI (o32) for func(double a, double b): a in f12, b in f14.
+    ASSERT(double_base.is(f12));
+    ASSERT(double_exponent.is(f14));
+    __ SetCallCDoubleArguments(double_base, double_exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(masm->isolate()), 4);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+  }
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -437,13 +3469,13 @@
 
 
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Throw(v0);
 }
 
 
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
-  UNIMPLEMENTED_MIPS();
+  __ ThrowUncatchable(type, v0);
 }
 
 
@@ -453,78 +3485,1681 @@
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
                               bool always_allocate) {
-  UNIMPLEMENTED_MIPS();
+  // v0: result parameter for PerformGC, if any
+  // s0: number of arguments including receiver (C callee-saved)
+  // s1: pointer to the first argument          (C callee-saved)
+  // s2: pointer to builtin function            (C callee-saved)
+
+  if (do_gc) {
+    // Move result passed in v0 into a0 to call PerformGC.
+    __ mov(a0, v0);
+    __ PrepareCallCFunction(1, a1);
+    __ CallCFunction(
+        ExternalReference::perform_gc_function(masm->isolate()), 1);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+  if (always_allocate) {
+    __ li(a0, Operand(scope_depth));
+    __ lw(a1, MemOperand(a0));
+    __ Addu(a1, a1, Operand(1));
+    __ sw(a1, MemOperand(a0));
+  }
+
+  // Prepare arguments for C routine: a0 = argc, a1 = argv
+  __ mov(a0, s0);
+  __ mov(a1, s1);
+
+  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+  // also need to reserve the 4 argument slots on the stack.
+
+  __ AssertStackIsAligned();
+
+  __ li(a2, Operand(ExternalReference::isolate_address()));
+
+  // To let the GC traverse the return address of the exit frames, we need to
+  // know where the return address is. The CEntryStub is unmovable, so
+  // we can store the address on the stack to be able to find it again and
+  // we never have to restore it, because it will not change.
+  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+    // This branch-and-link sequence is needed to find the current PC on mips,
+    // saved to the ra register.
+    // Use masm-> here instead of the double-underscore macro since extra
+    // coverage code can interfere with the proper calculation of ra.
+    Label find_ra;
+    masm->bal(&find_ra);  // bal exposes branch delay slot.
+    masm->nop();  // Branch delay slot nop.
+    masm->bind(&find_ra);
+
+    // Adjust the value in ra to point to the correct return location, 2nd
+    // instruction past the real call into C code (the jalr(t9)), and push it.
+    // This is the return address of the exit frame.
+    const int kNumInstructionsToJump = 6;
+    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    masm->Subu(sp, sp, kCArgsSlotsSize);
+    // Stack is still aligned.
+
+    // Call the C routine.
+    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    masm->jalr(t9);
+    masm->nop();    // Branch delay slot nop.
+    // Make sure the stored 'ra' points to this position.
+    ASSERT_EQ(kNumInstructionsToJump,
+              masm->InstructionsGeneratedSince(&find_ra));
+  }
+
+  // Restore stack (remove arg slots).
+  __ Addu(sp, sp, kCArgsSlotsSize);
+
+  if (always_allocate) {
+    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+    __ li(a2, Operand(scope_depth));
+    __ lw(a3, MemOperand(a2));
+    __ Subu(a3, a3, Operand(1));
+    __ sw(a3, MemOperand(a2));
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ addiu(a2, v0, 1);
+  __ andi(t0, a2, kFailureTagMask);
+  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(save_doubles_, s0);
+  __ Ret();
+
+  // Check if we should retry or throw exception.
+  Label retry;
+  __ bind(&failure_returned);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+  __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ Branch(throw_out_of_memory_exception, eq,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+  // Retrieve the pending exception and clear the variable.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a3, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(t0));
+  __ sw(a3, MemOperand(t0));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ Branch(throw_termination_exception, eq,
+            v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  __ bind(&retry);
+  // Last failure (v0) will be moved to (a0) for parameter when retrying.
 }
 
 
 void CEntryStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  // Compute the argv pointer in a callee-saved register.
+  __ sll(s1, a0, kPointerSizeLog2);
+  __ Addu(s1, sp, s1);
+  __ Subu(s1, s1, Operand(kPointerSize));
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(save_doubles_);
+
+  // Setup argc and the builtin function in callee-saved registers.
+  __ mov(s0, a0);
+  __ mov(s2, a1);
+
+  // s0: number of arguments (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  Label invoke, exit;
+
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: reveiver
+  // a3: argc
+  //
+  // Stack:
+  // 4 args slots
+  // args
+
+  // Save callee saved registers on the stack.
+  __ MultiPush(kCalleeSaved | ra.bit());
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Save callee-saved FPU registers.
+    __ MultiPushFPU(kCalleeSavedFPU);
+  }
+
+  // Load argv in s0 register.
+  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
+  if (CpuFeatures::IsSupported(FPU)) {
+    offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
+  }
+
+  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
+
+  // We build an EntryFrame.
+  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ li(t2, Operand(Smi::FromInt(marker)));
+  __ li(t1, Operand(Smi::FromInt(marker)));
+  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+                                      masm->isolate())));
+  __ lw(t0, MemOperand(t0));
+  __ Push(t3, t2, t1, t0);
+  // Setup frame pointer for the frame to be pushed.
+  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // If this is the outermost JS call, set js_entry_sp value.
+  Label non_outermost_js;
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
+                                masm->isolate());
+  __ li(t1, Operand(ExternalReference(js_entry_sp)));
+  __ lw(t2, MemOperand(t1));
+  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+  __ sw(fp, MemOperand(t1));
+  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  Label cont;
+  __ b(&cont);
+  __ nop();   // Branch delay slot nop.
+  __ bind(&non_outermost_js);
+  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ bind(&cont);
+  __ push(t0);
+
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);  // bal exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      masm->isolate())));
+  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
+  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);  // b exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(t1, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      masm->isolate());
+    __ li(t0, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+    __ li(t0, Operand(entry));
+  }
+  __ lw(t9, MemOperand(t0));  // Deref address.
+
+  // Call JSEntryTrampoline.
+  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ Call(t9);
+
+  // Unlink this frame from the handler chain.
+  __ PopTryHandler();
+
+  __ bind(&exit);  // v0 holds result
+  // Check if the current stack frame is marked as the outermost JS frame.
+  Label non_outermost_js_2;
+  __ pop(t1);
+  __ Branch(&non_outermost_js_2, ne, t1,
+            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ li(t1, Operand(ExternalReference(js_entry_sp)));
+  __ sw(zero_reg, MemOperand(t1));
+  __ bind(&non_outermost_js_2);
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(t1);
+  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Reset the stack to the callee saved registers.
+  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Restore callee-saved fpu registers.
+    __ MultiPopFPU(kCalleeSavedFPU);
+  }
+
+  // Restore callee saved registers from the stack.
+  __ MultiPop(kCalleeSaved | ra.bit());
+  // Return.
+  __ Jump(ra);
 }
 
 
-// Uses registers a0 to t0. Expected input is
-// object in a0 (or at sp+1*kPointerSize) and function in
-// a1 (or at sp), depending on whether or not
-// args_in_registers() is true.
+// Uses registers a0 to t0.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This is a crankshaft-specific feature that has not been implemented yet.
+  ASSERT(!HasCallSiteInlineCheck());
+  // Call site inlining and patching implies arguments in registers.
+  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  // ReturnTrueFalse is only implemented for inlined call sites.
+  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+  // Fixed register usage throughout the stub:
+  const Register object = a0;  // Object (lhs).
+  Register map = a3;  // Map of the object.
+  const Register function = a1;  // Function (rhs).
+  const Register prototype = t0;  // Prototype of the function.
+  const Register inline_site = t5;
+  const Register scratch = a2;
+
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+  if (!HasArgsInRegisters()) {
+    __ lw(object, MemOperand(sp, 1 * kPointerSize));
+    __ lw(function, MemOperand(sp, 0));
+  }
+
+  // Check that the left hand is a JS object and load map.
+  __ JumpIfSmi(object, &not_js_object);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+  // If there is a call site cache don't look in the global cache, but do the
+  // real lookup and update the call site cache.
+  if (!HasCallSiteInlineCheck()) {
+    Label miss;
+    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(t1));
+    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+    __ bind(&miss);
+  }
+
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
+  if (!HasCallSiteInlineCheck()) {
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Register mapping: a3 is object map and t0 is function prototype.
+  // Get prototype of object into a2.
+  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+  // We don't need map any more. Use it as a scratch register.
+  Register scratch2 = map;
+  map = no_reg;
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ Branch(&is_instance, eq, scratch, Operand(prototype));
+  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+  __ Branch(&loop);
+
+  __ bind(&is_instance);
+  ASSERT(Smi::FromInt(0) == 0);
+  if (!HasCallSiteInlineCheck()) {
+    __ mov(v0, zero_reg);
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&is_not_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ li(v0, Operand(Smi::FromInt(1)));
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ JumpIfSmi(function, &slow);
+  __ GetObjectType(function, scratch2, scratch);
+  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Null is not instance of anything.
+  __ Branch(&object_not_null, ne, scratch,
+      Operand(masm->isolate()->factory()->null_value()));
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  if (!ReturnTrueFalseObject()) {
+    if (HasArgsInRegisters()) {
+      __ Push(a0, a1);
+    }
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    __ EnterInternalFrame();
+    __ Push(a0, a1);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
+    __ mov(a0, v0);
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+  }
 }
 
 
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  static const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+  // Check that the key is a smiGenerateReadElement.
+  Label slow;
+  __ JumpIfNotSmi(a1, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check index (a1) against formal parameters count limit passed in
+  // through register a0. Use unsigned comparison to get negative
+  // check for free.
+  __ Branch(&slow, hs, a1, Operand(a0));
+
+  // Read the argument from the stack and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, fp, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Arguments adaptor case: Check index (a1) against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+  // Read the argument from the adaptor frame and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(a1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&runtime, ne,
+            a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Patch the arguments.length and the parameters pointer in the current frame.
+  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+  __ sll(t3, a2, 1);
+  __ Addu(a3, a3, Operand(t3));
+  __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+  // Stack layout:
+  //  sp[0] : number of parameters (tagged)
+  //  sp[4] : address of receiver argument
+  //  sp[8] : function
+  // Registers used over whole function:
+  //  t2 : allocated object (tagged)
+  //  t5 : mapped parameter count (tagged)
+
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+  // a1 = parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame, eq, a2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // No adaptor, parameter count = argument count.
+  __ mov(a2, a1);
+  __ b(&try_allocate);
+  __ nop();   // Branch delay slot nop.
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sll(t6, a2, 1);
+  __ Addu(a3, a3, Operand(t6));
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // a1 = parameter count (tagged)
+  // a2 = argument count (tagged)
+  // Compute the mapped parameter count = min(a1, a2) in a1.
+  Label skip_min;
+  __ Branch(&skip_min, lt, a1, Operand(a2));
+  __ mov(a1, a2);
+  __ bind(&skip_min);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  Label param_map_size;
+  ASSERT_EQ(0, Smi::FromInt(0));
+  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
+  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
+  __ sll(t5, a1, 1);
+  __ addiu(t5, t5, kParameterMapHeaderSize);
+  __ bind(&param_map_size);
+
+  // 2. Backing store.
+  __ sll(t6, a2, 1);
+  __ Addu(t5, t5, Operand(t6));
+  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+
+  // v0 = address of new object(s) (tagged)
+  // a2 = argument count (tagged)
+  // Get the arguments boilerplate from the current (global) context into t0.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+  Label skip2_ne, skip2_eq;
+  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+  __ lw(t0, MemOperand(t0, kNormalOffset));
+  __ bind(&skip2_ne);
+
+  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+  __ lw(t0, MemOperand(t0, kAliasedOffset));
+  __ bind(&skip2_eq);
+
+  // v0 = address of new object (tagged)
+  // a1 = mapped parameter count (tagged)
+  // a2 = argument count (tagged)
+  // t0 = address of boilerplate object (tagged)
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ lw(a3, FieldMemOperand(t0, i));
+    __ sw(a3, FieldMemOperand(v0, i));
+  }
+
+  // Setup the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+  const int kCalleeOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsCalleeIndex * kPointerSize;
+  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  const int kLengthOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsLengthIndex * kPointerSize;
+  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+
+  // Setup the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, t0 will point there, otherwise
+  // it will point to the backing store.
+  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // v0 = address of new object (tagged)
+  // a1 = mapped parameter count (tagged)
+  // a2 = argument count (tagged)
+  // t0 = address of parameter map or backing store (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  Label skip3;
+  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+  // Move backing store address to a3, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(a3, t0);
+  __ bind(&skip3);
+
+  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+  __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
+  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
+  __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ sll(t6, a1, 1);
+  __ Addu(t2, t0, Operand(t6));
+  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
+  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(t2, a1);
+  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
+  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ Subu(t5, t5, Operand(a1));
+  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+  __ sll(t6, t2, 1);
+  __ Addu(a3, t0, Operand(t6));
+  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+
+  // t2 = loop variable (tagged)
+  // a1 = mapping index (tagged)
+  // a3 = address of backing store (tagged)
+  // t0 = address of parameter map (tagged)
+  // t1 = temporary scratch (a.o., for address calculation)
+  // t3 = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
+  __ sll(t1, t2, 1);
+  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ Addu(t6, t0, t1);
+  __ sw(t5, MemOperand(t6));
+  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ Addu(t6, a3, t1);
+  __ sw(t3, MemOperand(t6));
+  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
+
+  __ bind(&skip_parameter_map);
+  // a2 = argument count (tagged)
+  // a3 = address of backing store (tagged)
+  // t1 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+  __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
+  __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ mov(t5, a1);
+  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+  __ sll(t6, t5, 1);
+  __ Subu(t0, t0, Operand(t6));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ Subu(t0, t0, Operand(kPointerSize));
+  __ lw(t2, MemOperand(t0, 0));
+  __ sll(t6, t5, 1);
+  __ Addu(t1, a3, Operand(t6));
+  __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ Branch(&arguments_loop, lt, t5, Operand(a2));
+
+  // Return and remove the on-stack parameters.
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // a2 = argument count (taggged)
+  __ bind(&runtime);
+  __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the length from the frame.
+  __ lw(a1, MemOperand(sp, 0));
+  __ Branch(&try_allocate);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sw(a1, MemOperand(sp, 0));
+  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(at));
+
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array in words.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+  __ srl(a1, a1, kSmiTagSize);
+
+  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ AllocateInNewSpace(a1,
+                        v0,
+                        a2,
+                        a3,
+                        &runtime,
+                        static_cast<AllocationFlags>(TAG_OBJECT |
+                                                     SIZE_IN_WORDS));
+
+  // Get the arguments boilerplate from the current (global) context.
+  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+  __ lw(t0, MemOperand(t0, Context::SlotOffset(
+      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+
+  // Copy the JS object part.
+  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+      Heap::kArgumentsLengthIndex * kPointerSize));
+
+  Label done;
+  __ Branch(&done, eq, a1, Operand(zero_reg));
+
+  // Get the parameters pointer from the stack.
+  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+  // Setup the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // Untag the length for the loop.
+  __ srl(a1, a1, kSmiTagSize);
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Setup t0 to point to the first array slot.
+  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ bind(&loop);
+  // Pre-decrement a2 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ Addu(a2, a2, Operand(-kPointerSize));
+  __ lw(a3, MemOperand(a2));
+  // Post-increment t0 with kPointerSize on each iteration.
+  __ sw(a3, MemOperand(t0));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
 }
 
 
 void RegExpExecStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Just jump directly to runtime if native RegExp is not selected at compile
+  // time or if regexp entry in generated code is turned off runtime switch or
+  // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  static const int kLastMatchInfoOffset = 0 * kPointerSize;
+  static const int kPreviousIndexOffset = 1 * kPointerSize;
+  static const int kSubjectOffset = 2 * kPointerSize;
+  static const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, invoke_regexp;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  // MIPS - using s0..s2, since we are not using CEntry Stub.
+  Register subject = s0;
+  Register regexp_data = s1;
+  Register last_match_info_elements = s2;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(
+          masm->isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+  __ li(a0, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a0, MemOperand(a0, 0));
+  __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+  // Check that the first argument is a JSRegExp object.
+  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ And(t0, regexp_data, Operand(kSmiTagMask));
+    __ Check(nz,
+             "Unexpected type for RegExp data, FixedArray expected",
+             t0,
+             Operand(zero_reg));
+    __ GetObjectType(regexp_data, a0, a0);
+    __ Check(eq,
+             "Unexpected type for RegExp data, FixedArray expected",
+             a0,
+             Operand(FIXED_ARRAY_TYPE));
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ lw(a2,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2. This
+  // uses the asumption that smis are 2 * their untagged value.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a2, a2, Operand(2));  // a2 was a smi.
+  // Check that the static offsets vector buffer is large enough.
+  __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+  // a2: Number of capture registers
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the second argument is a string.
+  __ lw(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ GetObjectType(subject, a0, a0);
+  __ And(a0, a0, Operand(kIsNotStringMask));
+  STATIC_ASSERT(kStringTag == 0);
+  __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+  // Get the length of the string to r3.
+  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+  // a2: Number of capture registers
+  // a3: Length of subject string as a smi
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the third argument is a positive smi less than the subject
+  // string length. A negative value will be greater (unsigned comparison).
+  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+  __ And(at, a0, Operand(kSmiTagMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&runtime, ls, a3, Operand(a0));
+
+  // a2: Number of capture registers
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the fourth object is a JSArray object.
+  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+  // Check that the JSArray is in fast case.
+  __ lw(last_match_info_elements,
+         FieldMemOperand(a0, JSArray::kElementsOffset));
+  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ Branch(&runtime, ne, a0, Operand(
+      masm->isolate()->factory()->fixed_array_map()));
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ lw(a0,
+         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+  __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
+  __ Branch(&runtime, gt, a2, Operand(at));
+
+  // Reset offset for possibly sliced string.
+  __ mov(t0, zero_reg);
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check the representation and encoding of the subject string.
+  Label seq_string;
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // First check for flat string.
+  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ Branch(&seq_string, eq, a1, Operand(zero_reg));
+
+  // subject: Subject string
+  // a0: instance type if Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check for flat cons string or sliced string.
+  // A flat cons string is a cons string where the second part is the empty
+  // string. In that case the subject string is just the first part of the cons
+  // string. Also in this case the first part of the cons string is known to be
+  // a sequential string or an external string.
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
+  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+
+  // String is sliced.
+  __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ sra(t0, t0, kSmiTagSize);
+  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  // t5: offset of sliced string, smi-tagged.
+  __ jmp(&check_encoding);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
+  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+  __ Branch(&runtime, ne, a0, Operand(a1));
+  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  // Is first part of cons or parent of slice a flat string?
+  __ bind(&check_encoding);
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, a0, Operand(kStringRepresentationMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+  __ bind(&seq_string);
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // a0: Instance type of subject string
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kAsciiStringTag == 4);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Find the code object based on the assumptions above.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
+  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
+  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it contains
+  // a smi (code flushing support).
+  __ JumpIfSmi(t9, &runtime);
+
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Load used arguments before starting to push arguments for call to native
+  // RegExp code to avoid handling changing stack height.
+  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
+
+  // a1: previous index
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+                      1, a0, a2);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  static const int kRegExpExecuteArguments = 8;
+  static const int kParameterRegisters = 4;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers, meaning we
+  // treat the return address as argument 5. Thus every argument after that
+  // needs to be shifted back by 1. Since DirectCEntryStub will handle
+  // allocating space for the c argument slots, we don't need to calculate
+  // that into the argument positions on the stack. This is how the stack will
+  // look (sp meaning the value of sp at this moment):
+  // [sp + 4] - Argument 8
+  // [sp + 3] - Argument 7
+  // [sp + 2] - Argument 6
+  // [sp + 1] - Argument 5
+  // [sp + 0] - saved ra
+
+  // Argument 8: Pass current isolate address.
+  // CFunctionArgumentOperand handles MIPS stack argument slots.
+  __ li(a0, Operand(ExternalReference::isolate_address()));
+  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+  // Argument 7: Indicate that this is a direct call from JavaScript.
+  __ li(a0, Operand(1));
+  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+  // Argument 6: Start (high end) of backtracking stack memory area.
+  __ li(a0, Operand(address_of_regexp_stack_memory_address));
+  __ lw(a0, MemOperand(a0, 0));
+  __ li(a2, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a2, MemOperand(a2, 0));
+  __ addu(a0, a0, a2);
+  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+  // Argument 5: static offsets vector buffer.
+  __ li(a0, Operand(
+        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+  // For arguments 4 and 3 get string length, calculate start of string data
+  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, a3: End of string data
+  // Argument 3, a2: Start of string data
+  // Prepare start and end index of the input.
+  __ sllv(t1, t0, a3);
+  __ addu(t0, t2, t1);
+  __ sllv(t1, a1, a3);
+  __ addu(a2, t0, t1);
+
+  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
+  __ sra(t2, t2, kSmiTagSize);
+  __ sllv(t1, t2, a3);
+  __ addu(a3, t0, t1);
+  // Argument 2 (a1): Previous index.
+  // Already there
+
+  // Argument 1 (a0): Subject string.
+  __ mov(a0, subject);
+
+  // Locate the code entry and call it.
+  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+  DirectCEntryStub stub;
+  stub.GenerateCall(masm, t9);
+
+  __ LeaveExitFrame(false, no_reg);
+
+  // v0: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+
+  // Check the result.
+
+  Label success;
+  __ Branch(&success, eq,
+            v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  Label failure;
+  __ Branch(&failure, eq,
+            v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ Branch(&runtime, ne,
+            v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ li(a1, Operand(
+      ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a1, MemOperand(a1, 0));
+  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(a2, 0));
+  __ Branch(&runtime, eq, v0, Operand(a1));
+
+  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
+
+  // Check if the exception is a termination. If so, throw as uncatchable.
+  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+  Label termination_exception;
+  __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+  __ Throw(v0);  // Expects thrown value in v0.
+
+  __ bind(&termination_exception);
+  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ lw(a1,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
+
+  // a1: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
+  __ sw(a2, FieldMemOperand(last_match_info_elements,
+                             RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastSubjectOffset));
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastInputOffset));
+  __ mov(a3, last_match_info_elements);
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(masm->isolate());
+  __ li(a2, Operand(address_of_static_offsets_vector));
+
+  // a1: number of capture registers
+  // a2: offsets vector
+  Label next_capture, done;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wrapping after zero.
+  __ Addu(a0,
+         last_match_info_elements,
+         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ bind(&next_capture);
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&done, lt, a1, Operand(zero_reg));
+  // Read the value from the static offsets vector buffer.
+  __ lw(a3, MemOperand(a2, 0));
+  __ addiu(a2, a2, kPointerSize);
+  // Store the smi value in the last match info.
+  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
+  __ sw(a3, MemOperand(a0, 0));
+  __ Branch(&next_capture, USE_DELAY_SLOT);
+  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
+
+  __ bind(&done);
+
+  // Return last match info.
+  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_INTERPRETED_REGEXP
 }
 
 
 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ JumpIfNotSmi(a1, &slowcase);
+  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+  // Smi-tagging is equivalent to multiplying by 2.
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  // Size of JSArray with two in-object properties and the header of a
+  // FixedArray.
+  int objects_size =
+      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+  __ Addu(a2, t1, Operand(objects_size));
+  __ AllocateInNewSpace(
+      a2,  // In: Size, in words.
+      v0,  // Out: Start of allocation (tagged).
+      a3,  // Scratch register.
+      t0,  // Scratch register.
+      &slowcase,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  // v0: Start of allocated area, object-tagged.
+  // a1: Number of elements in array, as smi.
+  // t1: Number of elements, untagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Set input, index and length fields from arguments.
+  __ lw(a1, MemOperand(sp, kPointerSize * 0));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 1));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+  // Fill out the elements FixedArray.
+  // v0: JSArray, tagged.
+  // a3: FixedArray, tagged.
+  // t1: Number of elements in array, untagged.
+
+  // Set map.
+  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+  // Set FixedArray length.
+  __ sll(t2, t1, kSmiTagSize);
+  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+  // Fill contents of fixed-array with the-hole.
+  __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Fill fixed array elements with hole.
+  // v0: JSArray, tagged.
+  // a2: the hole.
+  // a3: Start of elements in FixedArray.
+  // t1: Number of elements to fill.
+  Label loop;
+  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
+  __ addu(t1, t1, a3);  // Point past last element to store.
+  __ bind(&loop);
+  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
+  __ sw(a2, MemOperand(a3));
+  __ Branch(&loop, USE_DELAY_SLOT);
+  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
+
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
 }
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;
+
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
+    // Get the receiver from the stack.
+    // function, receiver [, arguments]
+    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
+    // Call as function is indicated with the hole.
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call, ne, t0, Operand(at));
+    // Patch the receiver on the stack with the global receiver object.
+    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+    __ bind(&call);
+  }
+
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+  // Check that the function is really a JavaScript function.
+  // a1: pushed function (to be verified)
+  __ JumpIfSmi(a1, &slow);
+  // Get the map of the function object.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Fast-case: Invoke the function now.
+  // a1: pushed function
+  ParameterCount actual(argc_);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call_as_function, eq, t0, Operand(at));
+    __ InvokeFunction(a1,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+  __ li(a0, Operand(argc_));  // Setup the number of arguments.
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 }
 
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return name_;
+void CompareStub::PrintName(StringStream* stream) {
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+  const char* cc_name;
+  switch (cc_) {
+    case lt: cc_name = "LT"; break;
+    case gt: cc_name = "GT"; break;
+    case le: cc_name = "LE"; break;
+    case ge: cc_name = "GE"; break;
+    case eq: cc_name = "EQ"; break;
+    case ne: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+  bool is_equality = cc_ == eq || cc_ == ne;
+  stream->Add("CompareStub_%s", cc_name);
+  stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
+  stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
+  if (strict_ && is_equality) stream->Add("_STRICT");
+  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+  if (!include_number_compare_) stream->Add("_NO_NUMBER");
+  if (!include_smi_compare_) stream->Add("_NO_SMI");
 }
 
 
 int CompareStub::MinorKey() {
-  UNIMPLEMENTED_MIPS();
-  return 0;
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | RegisterField::encode(lhs_.is(a0))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
-// StringCharCodeAtGenerator
-
+// StringCharCodeAtGenerator.
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label flat_string;
+  Label ascii_string;
+  Label got_char_code;
+  Label sliced_string;
+
+  ASSERT(!t0.is(scratch_));
+  ASSERT(!t0.is(index_));
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(object_));
+
+  // If the receiver is a smi trigger the non-string case.
+  __ JumpIfSmi(object_, receiver_not_string_);
+
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ And(t0, result_, Operand(kIsNotStringMask));
+  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+  // Handle non-flat strings.
+  __ And(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
+  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
+
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  Label assure_seq_string;
+  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+  __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+  // Get the first of the two strings and load its instance type.
+  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ addu(scratch_, scratch_, result_);
+  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // Check that parent is not an external string. Go to runtime otherwise.
+  STATIC_ASSERT(kSeqStringTag == 0);
+
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ And(t0, result_, Operand(kStringEncodingMask));
+  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register. We can
+  // add without shifting since the smi tag size is the log2 of the
+  // number of bytes in a two-byte character.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  __ Addu(scratch_, object_, Operand(scratch_));
+  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+  __ Branch(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+
+  __ srl(t0, scratch_, kSmiTagSize);
+  __ Addu(scratch_, object_, t0);
+
+  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
+  __ sll(result_, result_, kSmiTagSize);
+  __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_,
+              scratch_,
+              Heap::kHeapNumberMapRootIndex,
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  // Consumed by runtime conversion function:
+  __ Push(object_, index_, index_);
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+  } else {
+    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
+  }
+
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+
+  __ Move(scratch_, v0);
+
+  __ pop(index_);
+  __ pop(object_);
+  // Reload the instance type.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ Branch(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ jmp(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
 }
 
 
@@ -532,13 +5167,46 @@
 // StringCharFromCodeGenerator
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(code_));
+
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ And(t0,
+         code_,
+         Operand(kSmiTagMask |
+                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged ASCII char code.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(result_, result_, t0);
+  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case_, eq, result_, Operand(t0));
+  __ bind(&exit_);
 }
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ Branch(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharFromCode slow case");
 }
 
 
@@ -546,13 +5214,15 @@
 // StringCharAtGenerator
 
 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateFast(masm);
+  char_from_code_generator_.GenerateFast(masm);
 }
 
 
 void StringCharAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateSlow(masm, call_helper);
+  char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
@@ -626,7 +5296,24 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  UNIMPLEMENTED_MIPS();
+  Label loop;
+  Label done;
+  // This loop just copies one character at a time, as it is only used for
+  // very short strings.
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+  __ addu(count, dest, count);  // Count now points to the last dest byte.
+
+  __ bind(&loop);
+  __ lbu(scratch, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&loop, lt, dest, Operand(count));
+
+  __ bind(&done);
 }
 
 
@@ -646,7 +5333,105 @@
                                               Register scratch4,
                                               Register scratch5,
                                               int flags) {
-  UNIMPLEMENTED_MIPS();
+  bool ascii = (flags & COPY_ASCII) != 0;
+  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+  if (dest_always_aligned && FLAG_debug_code) {
+    // Check that destination is actually word aligned if the flag says
+    // that it is.
+    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+    __ Check(eq,
+             "Destination of copy not aligned.",
+             scratch4,
+             Operand(zero_reg));
+  }
+
+  const int kReadAlignment = 4;
+  const int kReadAlignmentMask = kReadAlignment - 1;
+  // Ensure that reading an entire aligned word containing the last character
+  // of a string will not read outside the allocated area (because we pad up
+  // to kObjectAlignment).
+  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+
+  Label byte_loop;
+  // Must copy at least eight bytes, otherwise just do it one byte at a time.
+  __ Subu(scratch1, count, Operand(8));
+  __ Addu(count, dest, Operand(count));
+  Register limit = count;  // Read until src equals this.
+  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+  if (!dest_always_aligned) {
+    // Align dest by byte copying. Copies between zero and three bytes.
+    __ And(scratch4, dest, Operand(kReadAlignmentMask));
+    Label dest_aligned;
+    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+    Label aligned_loop;
+    __ bind(&aligned_loop);
+    __ lbu(scratch1, MemOperand(src));
+    __ addiu(src, src, 1);
+    __ sb(scratch1, MemOperand(dest));
+    __ addiu(dest, dest, 1);
+    __ addiu(scratch4, scratch4, 1);
+    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+    __ bind(&dest_aligned);
+  }
+
+  Label simple_loop;
+
+  __ And(scratch4, src, Operand(kReadAlignmentMask));
+  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+  // Loop for src/dst that are not aligned the same way.
+  // This loop uses lwl and lwr instructions. These instructions
+  // depend on the endianness, and the implementation assumes little-endian.
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lwr(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ lwl(scratch1, MemOperand(src, -1));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  __ Branch(&byte_loop);
+
+  // Simple loop.
+  // Copy words from src to dest, until less than four bytes left.
+  // Both src and dest are word aligned.
+  __ bind(&simple_loop);
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lw(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  // Copy bytes from src to dest until dest hits limit.
+  __ bind(&byte_loop);
+  // Test if dest has already reached the limit.
+  __ Branch(&done, ge, dest, Operand(limit));
+  __ lbu(scratch1, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch1, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&byte_loop);
+
+  __ bind(&done);
 }
 
 
@@ -659,88 +5444,1523 @@
                                                         Register scratch4,
                                                         Register scratch5,
                                                         Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+  __ Branch(&not_array_index,
+            Ugreater,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+  // If check failed combine both characters into single halfword.
+  // This is required by the contract of the method: code at the
+  // not_found branch expects this combination in c1 register.
+  Label tmp;
+  __ sll(scratch1, c2, kBitsPerByte);
+  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+  __ Or(c1, c1, scratch1);
+  __ bind(&tmp);
+  __ Branch(not_found,
+            Uless_equal,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  StringHelper::GenerateHashInit(masm, hash, c1);
+  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+  StringHelper::GenerateHashGetHash(masm, hash);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ sll(scratch, c2, kBitsPerByte);
+  __ Or(chars, chars, scratch);
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load symbol table.
+  // Load address of first element of the symbol table.
+  Register symbol_table = c2;
+  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+  Register undefined = scratch4;
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+  __ sra(mask, mask, 1);
+  __ Addu(mask, mask, -1);
+
+  // Calculate untagged address of the first element of the symbol table.
+  Register first_symbol_table_element = symbol_table;
+  __ Addu(first_symbol_table_element, symbol_table,
+         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+  // Registers.
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string
+  // mask:  capacity mask
+  // first_symbol_table_element: address of the first element of
+  //                             the symbol table
+  // undefined: the undefined object
+  // scratch: -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes];
+  Register candidate = scratch5;  // Scratch register contains candidate.
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    if (i > 0) {
+      __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+    } else {
+      __ mov(candidate, hash);
+    }
+
+    __ And(candidate, candidate, Operand(mask));
+
+    // Load the entry from the symble table.
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+    __ sll(scratch, candidate, kPointerSizeLog2);
+    __ Addu(scratch, scratch, first_symbol_table_element);
+    __ lw(candidate, MemOperand(scratch));
+
+    // If entry is undefined no string with this hash can be found.
+    Label is_string;
+    __ GetObjectType(candidate, scratch, scratch);
+    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+    __ Branch(not_found, eq, undefined, Operand(candidate));
+    // Must be null (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or null",
+          scratch, Operand(candidate));
+    }
+    __ jmp(&next_probe[i]);
+
+    __ bind(&is_string);
+
+    // Check that the candidate is a non-external ASCII string.  The instance
+    // type is still in the scratch register from the CompareObjectType
+    // operation.
+    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+    // If length is not 2 the string is not a candidate.
+    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+    // Check if the two characters match.
+    // Assumes that word load is little endian.
+    __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = candidate;
+  __ bind(&found_in_symbol_table);
+  __ mov(v0, result);
 }
 
 
 void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                      Register hash,
-                                      Register character) {
-  UNIMPLEMENTED_MIPS();
+                                    Register hash,
+                                    Register character) {
+  // hash = seed + character + ((seed + character) << 10);
+  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+  // Untag smi seed and add the character.
+  __ SmiUntag(hash);
+  __ addu(hash, hash, character);
+  __ sll(at, hash, 10);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                              Register hash,
-                                              Register character) {
-  UNIMPLEMENTED_MIPS();
+                                            Register hash,
+                                            Register character) {
+  // hash += character;
+  __ addu(hash, hash, character);
+  // hash += hash << 10;
+  __ sll(at, hash, 10);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                         Register hash) {
-  UNIMPLEMENTED_MIPS();
+                                       Register hash) {
+  // hash += hash << 3;
+  __ sll(at, hash, 3);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 11;
+  __ sra(at, hash, 11);
+  __ xor_(hash, hash, at);
+  // hash += hash << 15;
+  __ sll(at, hash, 15);
+  __ addu(hash, hash, at);
+
+  // if (hash == 0) hash = 27;
+  __ ori(at, zero_reg, 27);
+  __ movz(hash, at, hash);
 }
 
 
 void SubStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label sub_string_runtime;
+  // Stack frame on entry.
+  //  ra: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
+
+  Register to = t2;
+  Register from = t3;
+
+  // Check bounds and smi-ness.
+  __ lw(to, MemOperand(sp, kToOffset));
+  __ lw(from, MemOperand(sp, kFromOffset));
+  STATIC_ASSERT(kFromOffset == kToOffset + 4);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+  __ JumpIfNotSmi(from, &sub_string_runtime);
+  __ JumpIfNotSmi(to, &sub_string_runtime);
+
+  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
+  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
+
+  // a3: from index (untagged smi)
+  // t5: to index (untagged smi)
+
+  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
+
+  __ subu(a2, t5, a3);
+  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
+
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache in
+  // generated code.
+  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+  // Both to and from are smis.
+
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Make sure first argument is a sequential (or flat) string.
+  __ lw(v0, MemOperand(sp, kStringOffset));
+  __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ And(t4, v0, Operand(kIsNotStringMask));
+
+  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+  // Short-cut for the case of trivial substring.
+  Label return_v0;
+  // v0: original string
+  // a2: result string length
+  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
+  __ sra(t0, t0, 1);
+  __ Branch(&return_v0, eq, a2, Operand(t0));
+
+  Label create_slice;
+  if (FLAG_string_slices) {
+    __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
+  }
+
+  // v0: original string
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  Label seq_string;
+  __ And(t0, a1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
+
+  // Slices and external strings go to runtime.
+  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+  // Sequential strings are handled directly.
+  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
+  __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  // Cons, slices and external strings go to runtime.
+  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // v0: original string
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
+  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
+  to = no_reg;
+
+  // v0: original string or left hand side of the original cons string.
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Check for flat ASCII string.
+  Label non_ascii_flat;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+
+  __ And(t4, a1, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+  Label result_longer_than_two;
+  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+  // Sub string of length 2 requested.
+  // Get the two characters forming the sub string.
+  __ Addu(v0, v0, Operand(a3));
+  __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+  Counters* counters = masm->isolate()->counters();
+  __ jmp(&return_v0);
+
+  // a2: result string length.
+  // a3: two characters combined into halfword in little endian byte order.
+  __ bind(&make_two_character_string);
+  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+  __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ jmp(&return_v0);
+
+  __ bind(&result_longer_than_two);
+
+  // Locate 'from' character of string.
+  __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ sra(t4, from, 1);
+  __ Addu(t1, t1, t4);
+
+  // Allocate the result.
+  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+  // v0: result string
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: first character of substring to copy
+  // t3: (a.k.a. from): from offset (smi)
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  // v0: result string
+  // a1: first character of result string
+  // a2: result string length
+  // t1: first character of substring to copy
+  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  __ jmp(&return_v0);
+
+  __ bind(&non_ascii_flat);
+  // a2: result string length
+  // t1: string
+  // t3: (a.k.a. from): from offset (smi)
+  // Check for flat two byte string.
+
+  // Locate 'from' character of string.
+  __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ Addu(t1, t1, Operand(from));
+
+  // Allocate the result.
+  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+  // v0: result string
+  // a2: result string length
+  // t1: first character of substring to copy
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  from = no_reg;
+
+  // v0: result string.
+  // a1: first character of result.
+  // a2: result length.
+  // t1: first character of substring to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+  __ jmp(&return_v0);
+
+  if (FLAG_string_slices) {
+    __ bind(&create_slice);
+    // v0: original string
+    // a1: instance type
+    // a2: length
+    // a3: from index (untagged smi)
+    // t2 (a.k.a. to): to (smi)
+    // t3 (a.k.a. from): from offset (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ And(t4, a1, Operand(kStringRepresentationMask));
+    __ Branch(&seq_string, eq, t4, Operand(zero_reg));
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ And(t4, a1, Operand(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
+
+    __ And(t4, a1, Operand(kSlicedNotConsMask));
+    __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
+    __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
+    __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
+    __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ addu(t3, t3, t1);
+    __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(t1, v0);
+
+    __ bind(&allocate_slice);
+    // a1: instance type of original string
+    // a2: length
+    // t1: underlying subject string
+    // t3 (a.k.a. from): from offset (smi)
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ And(t4, a1, Operand(kStringEncodingMask));
+    __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
+    __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
+    __ bind(&set_slice_header);
+    __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+  }
+
+  __ bind(&return_v0);
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&sub_string_runtime);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2,
+                                                      Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+  __ bind(&strings_not_equal);
+  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, length, scratch2, scratch3, v0,
+                                &strings_not_equal);
+
+  // Characters are equal.
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
 }
 
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register right,
                                                         Register left,
+                                                        Register right,
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3,
                                                         Register scratch4) {
-  UNIMPLEMENTED_MIPS();
+  Label result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Subu(scratch3, scratch1, Operand(scratch2));
+  Register length_delta = scratch3;
+  __ slt(scratch4, scratch2, scratch1);
+  __ movn(scratch1, scratch2, scratch4);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+  // Compare loop.
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, min_length, scratch2, scratch4, v0,
+                                &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ mov(scratch2, length_delta);
+  __ mov(scratch4, zero_reg);
+  __ mov(v0, zero_reg);
+
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label ret;
+  __ Branch(&ret, eq, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(GREATER)));
+  __ Branch(&ret, gt, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(LESS)));
+  __ bind(&ret);
+  __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ Addu(scratch1, length,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(left, left, Operand(scratch1));
+  __ Addu(right, right, Operand(scratch1));
+  __ Subu(length, zero_reg, length);
+  Register index = length;  // index = -length;
+
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ Addu(scratch3, left, index);
+  __ lbu(scratch1, MemOperand(scratch3));
+  __ Addu(scratch3, right, index);
+  __ lbu(scratch2, MemOperand(scratch3));
+  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+  __ Addu(index, index, 1);
+  __ Branch(&loop, ne, index, Operand(zero_reg));
 }
 
 
 void StringCompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack frame on entry.
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
+
+  Label not_same;
+  __ Branch(&not_same, ne, a0, Operand(a1));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential ASCII strings.
+  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+  // Compare flat ASCII strings natively. Remove arguments from stack first.
+  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label string_add_runtime, call_builtin;
+  Builtins::JavaScript builtin_id = Builtins::ADD;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack on entry:
+  // sp[0]: second argument (right).
+  // sp[4]: first argument (left).
+
+  // Load the two arguments.
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (flags_ == NO_STRING_ADD_FLAGS) {
+    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+    // Load instance types.
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+    STATIC_ASSERT(kStringTag == 0);
+    // If either is not a string, go to runtime.
+    __ Or(t4, t0, Operand(t1));
+    __ And(t4, t4, Operand(kIsNotStringMask));
+    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+  } else {
+    // Here at least one of the arguments is definitely a string.
+    // We convert the one that is not known to be a string.
+    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_RIGHT;
+    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_LEFT;
+    }
+  }
+
+  // Both arguments are strings.
+  // a0: first string
+  // a1: second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  {
+    Label strings_not_empty;
+    // Check if either of the strings are empty. In that case return the other.
+    // These tests use zero-length check on string-length whch is an Smi.
+    // Assert that Smi::FromInt(0) is really 0.
+    STATIC_ASSERT(kSmiTag == 0);
+    ASSERT(Smi::FromInt(0) == 0);
+    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+    __ mov(v0, a0);       // Assume we'll return first string (from a0).
+    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
+    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
+    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
+    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
+    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+    __ Addu(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
+
+    __ bind(&strings_not_empty);
+  }
+
+  // Untag both string-lengths.
+  __ sra(a2, a2, kSmiTagSize);
+  __ sra(a3, a3, kSmiTagSize);
+
+  // Both strings are non-empty.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // Look at the length of the result of adding the two strings.
+  Label string_add_flat_result, longer_than_two;
+  // Adding two lengths can't overflow.
+  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+  __ Addu(t2, a2, Operand(a3));
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
+  __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+  // Check that both strings are non-external ASCII strings.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+                                                 &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+  __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&make_two_character_string);
+  // Resulting string has length 2 and first chars of two strings
+  // are combined into single halfword in a2 register.
+  // So we can fill resulting string without two loops by a single
+  // halfword store instruction (which assumes that processor is
+  // in a little endian mode).
+  __ li(t2, Operand(2));
+  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+  __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&longer_than_two);
+  // Check if resulting string will be flat.
+  __ Branch(&string_add_flat_result, lt, t2,
+           Operand(String::kMinNonFlatLength));
+  // Handle exceptionally long strings in the runtime system.
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+  ASSERT(IsPowerOf2(String::kMaxLength + 1));
+  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+  // If result is not supposed to be flat, allocate a cons string object.
+  // If both strings are ASCII the result is an ASCII cons string.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  Label non_ascii, allocated, ascii_data;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+  __ And(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+  // Allocate an ASCII cons string.
+  __ bind(&ascii_data);
+  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii);
+  // At least one of the strings is two-byte. Check whether it happens
+  // to contain only ASCII characters.
+  // t0: first instance type.
+  // t1: second instance type.
+  // Branch to if _both_ instances have kAsciiDataHintMask set.
+  __ And(at, t0, Operand(kAsciiDataHintMask));
+  __ and_(at, at, t1);
+  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+  __ xor_(t0, t0, t1);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+  // Allocate a two byte cons string.
+  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ Branch(&allocated);
+
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t2: sum of lengths.
+  __ bind(&string_add_flat_result);
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  // Check that both strings are sequential, meaning that we
+  // branch to runtime if either string tag is non-zero.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ Or(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringRepresentationMask));
+  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type
+  // t1: second string instance type
+  // t2: sum of lengths.
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ xor_(t3, t1, t0);
+  __ And(t3, t3, Operand(kStringEncodingMask));
+  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+  // And see if it's ASCII (0) or two-byte (1).
+  __ And(t3, t0, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // t2: length of resulting flat string
+  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+  // Load second argument and locate first character.
+  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_string_add_flat_result);
+  // Both strings are sequential two byte strings.
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: sum of length of strings.
+  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t3: result string.
+
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+  // Locate first character of second argument.
+  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result (after copy of first string).
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+  if (call_builtin.is_linked()) {
+    __ bind(&call_builtin);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+  }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+                                            int stack_offset,
+                                            Register arg,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Register scratch4,
+                                            Label* slow) {
+  // First check if the argument is already a string.
+  Label not_string, done;
+  __ JumpIfSmi(arg, &not_string);
+  __ GetObjectType(arg, scratch1, scratch1);
+  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+  // Check the number to string cache.
+  Label not_cached;
+  __ bind(&not_string);
+  // Puts the cached result into scratch1.
+  NumberToStringStub::GenerateLookupNumberStringCache(masm,
+                                                      arg,
+                                                      scratch1,
+                                                      scratch2,
+                                                      scratch3,
+                                                      scratch4,
+                                                      false,
+                                                      &not_cached);
+  __ mov(arg, scratch1);
+  __ sw(arg, MemOperand(sp, stack_offset));
+  __ jmp(&done);
+
+  // Check if the argument is a safe string wrapper.
+  __ bind(&not_cached);
+  __ JumpIfSmi(arg, slow);
+  __ GetObjectType(arg, scratch1, scratch2);  // map -> scratch1.
+  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ And(scratch2, scratch2, scratch4);
+  __ Branch(slow, ne, scratch2, Operand(scratch4));
+  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+  __ sw(arg, MemOperand(sp, stack_offset));
+
+  __ bind(&done);
 }
 
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::SMIS);
+  Label miss;
+  __ Or(a2, a1, a0);
+  __ JumpIfNotSmi(a2, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ Subu(v0, a0, a1);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(a1);
+    __ SmiUntag(a0);
+    __ Subu(v0, a1, a0);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  Label generic_stub;
+  Label unordered;
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &generic_stub);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or FPU is unsupported.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    // Load left and right operand.
+    __ Subu(a2, a1, Operand(kHeapObjectTag));
+    __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+    __ Subu(a2, a0, Operand(kHeapObjectTag));
+    __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+    Label fpu_eq, fpu_lt, fpu_gt;
+    // Compare operands (test if unordered).
+    __ c(UN, D, f0, f2);
+    // Don't base result on status bits when a NaN is involved.
+    __ bc1t(&unordered);
+    __ nop();
+
+    // Test if equal.
+    __ c(EQ, D, f0, f2);
+    __ bc1t(&fpu_eq);
+    __ nop();
+
+    // Test if unordered or less (unordered case is already handled).
+    __ c(ULT, D, f0, f2);
+    __ bc1t(&fpu_lt);
+    __ nop();
+
+    // Otherwise it's greater.
+    __ bc1f(&fpu_gt);
+    __ nop();
+
+    // Return a result of -1, 0, or 1.
+    __ bind(&fpu_eq);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&fpu_lt);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+
+    __ bind(&fpu_gt);
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+  __ bind(&generic_stub);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp1, tmp1, Operand(tmp2));
+  __ And(tmp1, tmp1, kIsSymbolMask);
+  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(v0, right);
+  // Symbols are compared by identity.
+  __ Ret(ne, left, Operand(right));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+  Register tmp3 = t0;
+  Register tmp4 = t1;
+  Register tmp5 = t2;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ Or(tmp3, tmp1, tmp2);
+  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+  // Fast check for identical strings.
+  Label left_ne_right;
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+  __ mov(v0, zero_reg);  // In the delay slot.
+  __ Ret();
+  __ bind(&left_ne_right);
+
+  // Handle not identical strings.
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp3, tmp1, Operand(tmp2));
+  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+  Label is_symbol;
+  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+  __ mov(v0, a0);  // In the delay slot.
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  __ Ret();
+  __ bind(&is_symbol);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::OBJECTS);
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &miss);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+  ASSERT(GetCondition() == eq);
+  __ Subu(v0, a0, Operand(a1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Push(a1, a0);
+  __ push(ra);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                             masm->isolate());
+  __ EnterInternalFrame();
+  __ Push(a1, a0);
+  __ li(t0, Operand(Smi::FromInt(op_)));
+  __ push(t0);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(ra);
+  __ pop(a0);
+  __ pop(a1);
+  __ Jump(a2);
 }
 
 
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
-                                Register scratch1,
-                                Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range) {
-  UNIMPLEMENTED_MIPS();
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  // No need to pop or drop anything, LeaveExitFrame will restore the old
+  // stack, thus dropping the allocated space for the return value.
+  // The saved ra is after the reserved stack space for the 4 args.
+  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+  if (FLAG_debug_code && EnableSlowAsserts()) {
+    // In case of an error the return address may point to a memory area
+    // filled with kZapValue by the GC.
+    // Dereference the address and check for this.
+    __ lw(t0, MemOperand(t9));
+    __ Assert(ne, "Received invalid return address.", t0,
+        Operand(reinterpret_cast<uint32_t>(kZapValue)));
+  }
+  __ Jump(t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    ExternalReference function) {
+  __ li(t9, Operand(function));
+  this->GenerateCall(masm, t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    Register target) {
+  __ Move(t9, target);
+  __ AssertStackIsAligned();
+  // Allocate space for arg slots.
+  __ Subu(sp, sp, kCArgsSlotsSize);
+
+  // Block the trampoline pool through the whole function to make sure the
+  // number of generated instructions is constant.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+  // We need to get the current 'pc' value, which is not available on MIPS.
+  Label find_ra;
+  masm->bal(&find_ra);  // ra = pc + 8.
+  masm->nop();  // Branch delay slot nop.
+  masm->bind(&find_ra);
+
+  const int kNumInstructionsToJump = 6;
+  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+  // Push return address (accessible to GC through exit frame pc).
+  // This spot for ra was reserved in EnterExitFrame.
+  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                    RelocInfo::CODE_TARGET), true);
+  // Call the function.
+  masm->Jump(t9);
+  // Make sure the stored 'ra' points to this position.
+  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+    __ Subu(index, index, Operand(1));
+    __ And(index, index, Operand(
+         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+
+    __ sll(scratch0, index, 1);
+    __ Addu(tmp, properties, scratch0);
+    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    ASSERT(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ Branch(done, eq, entity_name, Operand(tmp));
+
+    if (i != kInlinedProbes - 1) {
+      // Stop if found the property.
+      __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+      // Check if the entry name is not a symbol.
+      __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+      __ lbu(entity_name,
+             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+      __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+      __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+      // Restore the properties.
+      __ lw(properties,
+            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+    }
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+       a2.bit() | a1.bit() | a0.bit());
+
+  __ MultiPush(spill_mask);
+  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ li(a1, Operand(Handle<String>(name)));
+  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, eq, v0, Operand(zero_reg));
+  __ Branch(miss, ne, v0, Operand(zero_reg));
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  // Compute the capacity mask.
+  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
+  __ Subu(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(scratch2, scratch2, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    }
+    __ srl(scratch2, scratch2, String::kHashShift);
+    __ And(scratch2, scratch1, scratch2);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+
+    __ mov(at, scratch2);
+    __ sll(scratch2, scratch2, 1);
+    __ Addu(scratch2, scratch2, at);
+
+    // Check if the key is identical to the name.
+    __ sll(at, scratch2, 2);
+    __ Addu(scratch2, elements, at);
+    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ Branch(done, eq, name, Operand(at));
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ MultiPush(spill_mask);
+  __ Move(a0, elements);
+  __ Move(a1, name);
+  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ mov(scratch2, a2);
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, ne, v0, Operand(zero_reg));
+  __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Registers:
+  //  result: StringDictionary to probe
+  //  a1: key
+  //  : StringDictionary to probe.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = v0;
+  Register dictionary = a0;
+  Register key = a1;
+  Register index = a2;
+  Register mask = a3;
+  Register hash = t0;
+  Register undefined = t1;
+  Register entry_key = t2;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ sra(mask, mask, kSmiTagSize);
+  __ Subu(mask, mask, Operand(1));
+
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(index, hash, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    } else {
+      __ mov(index, hash);
+    }
+    __ srl(index, index, String::kHashShift);
+    __ And(index, mask, index);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ sll(index, index, 2);
+    __ Addu(index, index, dictionary);
+    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+    // Stop if found the property.
+    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a symbol.
+      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ lbu(entry_key,
+             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ And(result, entry_key, Operand(kIsSymbolMask));
+      __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result, zero_reg);
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ li(result, 1);
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ mov(result, zero_reg);
+  __ Ret();
 }
 
 
@@ -749,4 +6969,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
-
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 675730a..aa224bc 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,206 +39,111 @@
 // TranscendentalCache runtime function.
 class TranscendentalCacheStub: public CodeStub {
  public:
-  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
-      : type_(type) {}
+  enum ArgumentType {
+    TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+    UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+  };
+
+  TranscendentalCacheStub(TranscendentalCache::Type type,
+                          ArgumentType argument_type)
+      : type_(type), argument_type_(argument_type) { }
   void Generate(MacroAssembler* masm);
  private:
   TranscendentalCache::Type type_;
+  ArgumentType argument_type_;
+  void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
   Major MajorKey() { return TranscendentalCache; }
-  int MinorKey() { return type_; }
+  int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
 };
 
 
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Register tos_;
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
- public:
-  static const int kUnknownIntValue = -1;
-
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      Register lhs,
-                      Register rhs,
-                      int constant_rhs = kUnknownIntValue)
+  UnaryOpStub(Token::Value op,
+              UnaryOverwriteMode mode,
+              UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
       : op_(op),
         mode_(mode),
-        lhs_(lhs),
-        rhs_(rhs),
-        constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) { }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        lhs_(LhsRegister(RegisterBits::decode(key))),
-        rhs_(RhsRegister(RegisterBits::decode(key))),
-        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
-        runtime_operands_type_(type_info),
-        name_(NULL) { }
+        operand_type_(operand_type) {
+  }
 
  private:
   Token::Value op_;
-  OverwriteMode mode_;
-  Register lhs_;
-  Register rhs_;
-  int constant_rhs_;
-  bool specialized_on_rhs_;
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-  char* name_;
+  UnaryOverwriteMode mode_;
 
-  static const int kMaxKnownRhs = 0x40000000;
-  static const int kKnownRhsKeyBits = 6;
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
 
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 3> {};
-  class RegisterBits: public BitField<bool, 11, 1> {};
-  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
+  virtual void PrintName(StringStream* stream);
 
-  Major MajorKey() { return GenericBinaryOp; }
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
   int MinorKey() {
-    ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
-           (lhs_.is(a1) && rhs_.is(a0)));
-    // Encode the parameters in a unique 16 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt())
-           | TypeInfoBits::encode(runtime_operands_type_)
-           | RegisterBits::encode(lhs_.is(a0));
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
   }
 
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
   void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                             Register lhs,
-                             Register rhs);
-  void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                               Label* not_smi,
-                               Register lhs,
-                               Register rhs,
-                               const Builtins::JavaScript& builtin);
+
   void GenerateTypeTransition(MacroAssembler* masm);
 
-  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
-    if (constant_rhs == kUnknownIntValue) return false;
-    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
-    if (op == Token::MOD) {
-      if (constant_rhs <= 1) return false;
-      if (constant_rhs <= 10) return true;
-      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
-      return false;
-    }
-    return false;
-  }
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
 
-  int MinorKeyForKnownInt() {
-    if (!specialized_on_rhs_) return 0;
-    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
-    ASSERT(IsPowerOf2(constant_rhs_));
-    int key = 12;
-    int d = constant_rhs_;
-    while ((d & 1) == 0) {
-      key++;
-      d >>= 1;
-    }
-    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
-    return key;
-  }
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
 
-  int KnownBitsForMinorKey(int key) {
-    if (!key) return 0;
-    if (key <= 11) return key - 1;
-    int d = 1;
-    while (key != 12) {
-      key--;
-      d <<= 1;
-    }
-    return d;
-  }
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
 
-  Register LhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a0 : a1;
-  }
-
-  Register RhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a1 : a0;
-  }
-
-  bool HasSmiSmiFastPath() {
-    return op_ != Token::DIV;
-  }
-
-  bool ShouldGenerateSmiCode() {
-    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
+    return UnaryOpIC::ToState(operand_type_);
   }
 
-  const char* GetName();
-
   virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
+    code->set_unary_op_type(operand_type_);
   }
-
-#ifdef DEBUG
-  void Print() {
-    if (!specialized_on_rhs_) {
-      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
-    } else {
-      PrintF("GenericBinaryOpStub (%s by %d)\n",
-             Token::String(op_),
-             constant_rhs_);
-    }
-  }
-#endif
 };
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+
+class BinaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
-    UNIMPLEMENTED_MIPS();
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED) {
+    use_fpu_ = CpuFeatures::IsSupported(FPU);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_fpu_(FPUBits::decode(key)),
         operands_type_(operands_type),
-        result_type_(result_type),
-        name_(NULL) { }
+        result_type_(result_type) { }
 
  private:
   enum SmiCodeGenerateHeapNumberResults {
@@ -251,32 +156,19 @@
   bool use_fpu_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
-           "(mode %d, runtime_type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class FPUBits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -293,6 +185,7 @@
                            Label* not_numbers,
                            Label* gc_required);
   void GenerateSmiCode(MacroAssembler* masm,
+                       Label* use_runtime,
                        Label* gc_required,
                        SmiCodeGenerateHeapNumberResults heapnumber_results);
   void GenerateLoadArguments(MacroAssembler* masm);
@@ -301,7 +194,9 @@
   void GenerateSmiStub(MacroAssembler* masm);
   void GenerateInt32Stub(MacroAssembler* masm);
   void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -316,15 +211,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -334,24 +229,36 @@
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+  // Omit left string check in stub (left is definitely a string).
+  NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+  // Omit right string check in stub (right is definitely a string).
+  NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+  // Omit both string checks in stub.
+  NO_STRING_CHECK_IN_STUB =
+      NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
 };
 
 
 class StringAddStub: public CodeStub {
  public:
-  explicit StringAddStub(StringAddFlags flags) {
-    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
-  }
+  explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
  private:
   Major MajorKey() { return StringAdd; }
-  int MinorKey() { return string_check_ ? 0 : 1; }
+  int MinorKey() { return flags_; }
 
   void Generate(MacroAssembler* masm);
 
-  // Should the stub check whether arguments are strings?
-  bool string_check_;
+  void GenerateConvertArgument(MacroAssembler* masm,
+                               int stack_offset,
+                               Register arg,
+                               Register scratch1,
+                               Register scratch2,
+                               Register scratch3,
+                               Register scratch4,
+                               Label* slow);
+
+  const StringAddFlags flags_;
 };
 
 
@@ -372,7 +279,6 @@
   StringCompareStub() { }
 
   // Compare two flat ASCII strings and returns result in v0.
-  // Does not use the stack.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -381,11 +287,28 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in v0.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register length,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Label* chars_not_equal);
 };
 
 
@@ -423,12 +346,6 @@
   }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
 };
 
 
@@ -455,14 +372,6 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("NumberToStringStub\n");
-  }
-#endif
 };
 
 
@@ -480,30 +389,217 @@
   int MinorKey() { return 0; }
 
   bool NeedsImmovableCode() { return true; }
+};
 
-  const char* GetName() { return "RegExpCEntryStub"; }
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+  DirectCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+  void GenerateCall(MacroAssembler* masm,
+                                ExternalReference function);
+  void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+  Major MajorKey() { return DirectCEntry; }
+  int MinorKey() { return 0; }
+
+  bool NeedsImmovableCode() { return true; }
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  enum Destination {
+    kFPURegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will still be scratched. If
+  // either a0 or a1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with a0 and a1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm,
+                                   Register object,
+                                   Register dst,
+                                   Register heap_number_map,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   FPURegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+  // the result either in |double_dst| or |dst2:dst1|, depending on
+  // |destination|.
+  // Warning: The value in |int_scratch| will be changed in the process!
+  static void ConvertIntToDouble(MacroAssembler* masm,
+                                 Register int_scratch,
+                                 Destination destination,
+                                 FPURegister double_dst,
+                                 Register dst1,
+                                 Register dst2,
+                                 Register scratch2,
+                                 FPURegister single_scratch);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm,
+                                      Register object,
+                                      Destination destination,
+                                      FPURegister double_dst,
+                                      Register dst1,
+                                      Register dst2,
+                                      Register heap_number_map,
+                                      Register scratch1,
+                                      Register scratch2,
+                                      FPURegister single_scratch,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when FPU is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm,
+                                Register object,
+                                Register dst,
+                                Register heap_number_map,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                FPURegister double_scratch,
+                                Label* not_int32);
+
+  // Generate non FPU code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm,
+                                   Register src1,
+                                   Register src2,
+                                   Register dst,
+                                   Register scratch,
+                                   Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when FPU is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in v0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+                                          Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         FPURegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
 };
 
 
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
-                                Register scratch1,
-                                Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range);
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return LookupModeBits::encode(mode_);
+  }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  LookupMode mode_;
+};
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
deleted file mode 100644
index be9ae9e..0000000
--- a/src/mips/codegen-mips-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
-  __ b(&entry_label_);
-  __ nop();
-}
-
-
-// Note: this has been hacked for submisson. Mips branches require two
-//  additional operands: Register src1, const Operand& src2.
-void DeferredCode::Branch(Condition cond) {
-  __ Branch(&entry_label_, cond, zero_reg, Operand(0));
-}
-
-
-void Reference::GetValueAndSpill() {
-  GetValue();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_CODEGEN_MIPS_INL_H_
-
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index c1149df..4400b64 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,61 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-mips-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  // On MIPS you either have a completely spilled frame or you
-  // handle it yourself, but at the moment there's no automation
-  // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -90,1124 +47,6 @@
 }
 
 
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      previous_(owner->state()) {
-  owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
-                           JumpTarget* true_target,
-                           JumpTarget* false_target)
-    : CodeGenState(owner),
-      true_target_(true_target),
-      false_target_(false_target) {
-  owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
-                                           Slot* slot,
-                                           TypeInfo type_info)
-    : CodeGenState(owner),
-      slot_(slot) {
-  owner->set_state(this);
-  old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
-  owner()->set_type_info(slot_, old_type_info_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      cc_reg_(cc_always),
-      state_(NULL),
-      loop_nesting_(0),
-      type_info_(NULL),
-      function_return_(JumpTarget::BIDIRECTIONAL),
-      function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
-  UNIMPLEMENTED_MIPS();
-  return 0;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Register tmp,
-    Register tmp2,
-    JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-void CodeGenerator::LoadCondition(Expression* x,
-                                  JumpTarget* true_target,
-                                  JumpTarget* false_target,
-                                  bool force_cc) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Load(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  UNIMPLEMENTED_MIPS();
-  return EAGER_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::~Reference() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
-                              JumpTarget* false_target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           OverwriteMode overwrite_mode,
-                                           GenerateInlineSmi inline_smi,
-                                           int constant_rhs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             int value,
-                             bool reversed,
-                             OverwriteMode overwrite_mode,
-                             Register tos)
-      : op_(op),
-        value_(value),
-        reversed_(reversed),
-        overwrite_mode_(overwrite_mode),
-        tos_register_(tos) {
-    set_comment("[ DeferredInlinedSmiOperation");
-  }
-
-  virtual void Generate();
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
-  // methods, it is the responsibility of the deferred code to save and restore
-  // registers.
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
-  void JumpToAnswerOutOfRange(Condition cond,
-                              Register cmp1,
-                              const Operand& cmp2);
-
- private:
-  void GenerateNonSmiInput();
-  void GenerateAnswerOutOfRange();
-  void WriteNonSmiAnswer(Register answer,
-                         Register heap_number,
-                         Register scratch);
-
-  Token::Value op_;
-  int value_;
-  bool reversed_;
-  OverwriteMode overwrite_mode_;
-  Register tos_register_;
-  Label non_smi_input_;
-  Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
-                                                   Register cmp1,
-                                                   const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
-                                                        Register cmp1,
-                                                        const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere.  The tos_register_ is not used by the
-// virtual frame.  On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
-                                                   Register heap_number,
-                                                   Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
-                                 Handle<Object> value,
-                                 bool reversed,
-                                 OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On MIPS we load registers condReg1 and condReg2 with the values which should
-// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
-// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
-void CodeGenerator::Comparison(Condition cc,
-                               Expression* left,
-                               Expression* right,
-                               bool strict) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CheckStack() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
-  DeferredCountOperation(Register value,
-                         bool is_increment,
-                         bool is_postfix,
-                         int target_size)
-      : value_(value),
-        is_increment_(is_increment),
-        is_postfix_(is_postfix),
-        target_size_(target_size) {}
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register value_;
-  bool is_increment_;
-  bool is_postfix_;
-  int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetNamedValue(Register receiver,
-                                          Handle<String> name,
-                                          bool is_contextual)
-      : receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceGetKeyedValue(Register key, Register receiver)
-      : key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver)
-      : value_(value), key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceSetNamedValue(Register value,
-                                 Register receiver,
-                                 Handle<String> name)
-      : value_(value), receiver_(receiver), name_(name) {
-    set_comment("[ DeferredReferenceSetNamedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceSetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
-                                   WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support.
-
-
-Handle<String> Reference::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return Handle<String>();
-}
-
-
-void Reference::DupIfPersist() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::GetValue() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return name_;
-}
-
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0a2cd45..a8de9c8 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,204 +37,16 @@
 namespace v8 {
 namespace internal {
 
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
-// Use floating-point coprocessor instructions. This flag is raised when
-// -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
-#else
-static const bool IsMipsSoftFloatABI = true;
-#endif
-
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name. Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Generate code to pop a reference, push the value of the reference,
-  // and then spill the stack frame.
-  inline void GetValueAndSpill();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
-  // This is in preparation for something that uses the reference on the stack.
-  // If we need this reference afterwards get then dup it now.  Otherwise mark
-  // it as used.
-  inline void DupIfPersist();
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  virtual ~CodeGenState();
-
-  virtual JumpTarget* true_target() const { return NULL; }
-  virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
-  inline CodeGenerator* owner() { return owner_; }
-  inline CodeGenState* previous() const { return previous_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own pair of branch labels.
-  ConditionCodeGenState(CodeGenerator* owner,
-                        JumpTarget* true_target,
-                        JumpTarget* false_target);
-
-  virtual JumpTarget* true_target() const { return true_target_; }
-  virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
-  TypeInfoCodeGenState(CodeGenerator* owner,
-                       Slot* slot_number,
-                       TypeInfo info);
-  virtual ~TypeInfoCodeGenState();
-
-  virtual JumpTarget* true_target() const { return previous()->true_target(); }
-  virtual JumpTarget* false_target() const {
-    return previous()->false_target();
-  }
-
- private:
-  Slot* slot_;
-  TypeInfo old_type_info_;
-};
-
 
 // -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -----------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
  public:
-  // Compilation mode.  Either the compiler is used as the primary
-  // compiler and needs to setup everything or the compiler is used as
-  // the secondary compiler for split compilation and has to handle
-  // bailouts.
-  enum Mode {
-    PRIMARY,
-    SECONDARY
-  };
-
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -248,9 +60,7 @@
   // Print the code after compiling it.
   static void PrintCode(Handle<Code> code, CompilationInfo* info);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
-#endif
 
   static void SetFunctionInfo(Handle<JSFunction> fun,
                               FunctionLiteral* lit,
@@ -261,50 +71,14 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  TypeInfo type_info(Slot* slot) {
-    int index = NumberOfSlot(slot);
-    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
-    return (*type_info_)[index];
-  }
-
-  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
-    int index = NumberOfSlot(slot);
-    ASSERT(index >= kInvalidSlotNumber);
-    if (index != kInvalidSlotNumber) {
-      TypeInfo previous_value = (*type_info_)[index];
-      (*type_info_)[index] = info;
-      return previous_value;
-    }
-    return TypeInfo::Unknown();
-  }
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     // This is in correlation with the padding in MacroAssembler::Abort.
     return FLAG_debug_code ? 45 : 20;
   }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
   static int GetInlinedNamedStoreInstructionsAfterPatch() {
     ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
     // Magic number 5: instruction count after patched map load:
@@ -313,317 +87,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-
-  // Construction/Destruction.
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors.
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  static const int kInvalidSlotNumber = -1;
-
-  int NumberOfSlot(Slot* slot);
-  // State
-  bool has_cc() const { return cc_reg_ != cc_always; }
-
-  JumpTarget* true_target() const { return state_->true_target(); }
-  JumpTarget* false_target() const { return state_->false_target(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).  The return value should
-  // be in v0.
-  void GenerateReturnSequence();
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  void StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  MemOperand SlotOperand(Slot* slot, Register tmp);
-
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
-                                               Register tmp,
-                                               Register tmp2,
-                                               JumpTarget* slow);
-
-  void LoadCondition(Expression* x,
-                     JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool force_cc);
-  void Load(Expression* x);
-  void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
-
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Store a keyed property. Key and receiver are on the stack and the value is
-  // in a0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         JumpTarget* slow);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the stack to a slot.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Load a named property, returning it in v0. The receiver is passed on the
-  // stack, and remains there.
-  void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // Store to a named property. If the store is contextual, value is passed on
-  // the frame and consumed. Otherwise, receiver and value are passed on the
-  // frame and consumed. The result is returned in v0.
-  void EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a keyed property, leaving it in v0. The receiver and key are
-  // passed on the stack, and remain there.
-  void EmitKeyedLoad();
-
-  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(Token::Value op,
-                              OverwriteMode overwrite_mode,
-                              GenerateInlineSmi inline_smi,
-                              int known_rhs =
-                                GenericBinaryOpStub::kUnknownIntValue);
-
-  void VirtualFrameBinaryOperation(Token::Value op,
-                                   OverwriteMode overwrite_mode,
-                                   int known_rhs =
-                                      GenericBinaryOpStub::kUnknownIntValue);
-
-  void SmiOperation(Token::Value op,
-                    Handle<Object> value,
-                    bool reversed,
-                    OverwriteMode mode);
-
-  void Comparison(Condition cc,
-                  Expression* left,
-                  Expression* right,
-                  bool strict = false);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  // Control flow
-  void Branch(bool if_true, JumpTarget* target);
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  static Handle<Code> ComputeLazyCompile(int argc);
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-  void GenerateSubString(ZoneList<Expression*>* args);
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.
-  bool HasValidEntryRegisters();
-#endif
-
-  List<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  Condition cc_reg_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  Vector<TypeInfo>* type_info_;
-  // Jump targets
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class InlineRuntimeFunctionsTable;
-  friend class LCodeGen;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 16e49c9..d0a7af5 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Registers
+// Registers.
 
 
 // These register names are defined in a way to match the native disassembler
@@ -145,7 +145,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
 
 bool Instruction::IsForbiddenInBranchDelay() const {
   const int op = OpcodeFieldRaw();
@@ -191,6 +191,7 @@
   const int op = OpcodeFieldRaw();
   switch (op) {
     case JAL:
+      return true;
     case REGIMM:
       switch (RtFieldRaw()) {
         case BGEZAL:
@@ -272,7 +273,7 @@
         case MOVCI:
           return kRegisterType;
         default:
-          UNREACHABLE();
+          return kUnsupported;
       };
       break;
     case SPECIAL2:
@@ -281,7 +282,7 @@
         case CLZ:
           return kRegisterType;
         default:
-          UNREACHABLE();
+          return kUnsupported;
       };
       break;
     case SPECIAL3:
@@ -290,18 +291,18 @@
         case EXT:
           return kRegisterType;
         default:
-          UNREACHABLE();
+          return kUnsupported;
       };
       break;
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (RsFieldRawNoAssert()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           return kImmediateType;
         default:
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -336,12 +337,12 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. eg: j imm26
+    // 26 bits immediate type instructions. eg: j imm26.
     case J:
     case JAL:
       return kJumpType;
     default:
-      UNREACHABLE();
+      return kUnsupported;
   };
   return kUnsupported;
 }
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index b20e9a2..d76ae59 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,6 +47,19 @@
 #endif
 
 
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
 //
@@ -58,7 +71,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
 
 // Number of general purpose registers.
 static const int kNumRegisters = 32;
@@ -80,8 +93,27 @@
 static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
 
 // FCSR constants.
-static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
-static const uint32_t kFCSRFlagShift = 2;
+static const uint32_t kFCSRInexactFlagBit = 2;
+static const uint32_t kFCSRUnderflowFlagBit = 3;
+static const uint32_t kFCSROverflowFlagBit = 4;
+static const uint32_t kFCSRDivideByZeroFlagBit = 5;
+static const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+static const uint32_t kFCSRFlagMask =
+    kFCSRInexactFlagMask |
+    kFCSRUnderflowFlagMask |
+    kFCSROverflowFlagMask |
+    kFCSRDivideByZeroFlagMask |
+    kFCSRInvalidOpFlagMask;
+
+static const uint32_t kFCSRExceptionFlagMask =
+    kFCSRFlagMask ^ kFCSRInexactFlagMask;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -101,7 +133,6 @@
   static const int32_t kMinValue = 0x80000000;
 
  private:
-
   static const char* names_[kNumSimuRegisters];
   static const RegisterAlias aliases_[];
 };
@@ -121,7 +152,6 @@
   };
 
  private:
-
   static const char* names_[kNumFPURegisters];
   static const RegisterAlias aliases_[];
 };
@@ -133,8 +163,6 @@
 // On MIPS all instructions are 32 bits.
 typedef int32_t Instr;
 
-typedef unsigned char byte_;
-
 // Special Software Interrupt codes when used in the presence of the MIPS
 // simulator.
 enum SoftwareInterruptCodes {
@@ -142,6 +170,18 @@
   call_rt_redirected = 0xfffff
 };
 
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+//   the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+//   instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+//   debugger.
+static const uint32_t kMaxWatchpointCode = 31;
+static const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
 // ----- Fields offset and length.
 static const int kOpcodeShift   = 26;
 static const int kOpcodeBits    = 6;
@@ -161,6 +201,12 @@
 static const int kImm16Bits  = 16;
 static const int kImm26Shift = 0;
 static const int kImm26Bits  = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits  = 28;
+
+// In branches and jumps immediate fields point to words, not bytes,
+// and are therefore shifted by 2.
+static const int kImmFieldShift = 2;
 
 static const int kFsShift       = 11;
 static const int kFsBits        = 5;
@@ -175,11 +221,12 @@
 static const int kFBtrueShift   = 16;
 static const int kFBtrueBits    = 1;
 
-// ----- Miscellianous useful masks.
+// ----- Miscellaneous useful masks.
 // Instruction bit masks.
 static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
 static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
 static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
 static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
 static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
 static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
@@ -190,7 +237,7 @@
 static const int  kHiMask       =   0xffff << 16;
 static const int  kLoMask       =   0xffff;
 static const int  kSignMask     =   0x80000000;
-
+static const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
 
 // ----- MIPS Opcodes and Function Fields.
 // We use this presentation to stay close to the table representation in
@@ -215,7 +262,7 @@
   XORI      =   ((1 << 3) + 6) << kOpcodeShift,
   LUI       =   ((1 << 3) + 7) << kOpcodeShift,
 
-  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class.
   BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
   BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
   BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
@@ -247,12 +294,12 @@
 enum SecondaryField {
   // SPECIAL Encoding of Function Field.
   SLL       =   ((0 << 3) + 0),
+  MOVCI     =   ((0 << 3) + 1),
   SRL       =   ((0 << 3) + 2),
   SRA       =   ((0 << 3) + 3),
   SLLV      =   ((0 << 3) + 4),
   SRLV      =   ((0 << 3) + 6),
   SRAV      =   ((0 << 3) + 7),
-  MOVCI     =   ((0 << 3) + 1),
 
   JR        =   ((1 << 3) + 0),
   JALR      =   ((1 << 3) + 1),
@@ -393,7 +440,7 @@
 
   cc_always     = 16,
 
-  // aliases
+  // Aliases.
   carry         = Uless,
   not_carry     = Ugreater_equal,
   zero          = equal,
@@ -455,14 +502,38 @@
 
 // ----- Coprocessor conditions.
 enum FPUCondition {
-  F,    // False
-  UN,   // Unordered
-  EQ,   // Equal
-  UEQ,  // Unordered or Equal
-  OLT,  // Ordered or Less Than
-  ULT,  // Unordered or Less Than
-  OLE,  // Ordered or Less Than or Equal
-  ULE   // Unordered or Less Than or Equal
+  kNoFPUCondition = -1,
+
+  F     = 0,  // False.
+  UN    = 1,  // Unordered.
+  EQ    = 2,  // Equal.
+  UEQ   = 3,  // Unordered or Equal.
+  OLT   = 4,  // Ordered or Less Than.
+  ULT   = 5,  // Unordered or Less Than.
+  OLE   = 6,  // Ordered or Less Than or Equal.
+  ULE   = 7   // Unordered or Less Than or Equal.
+};
+
+
+// FPU rounding modes.
+enum FPURoundingMode {
+  RN = 0 << 0,  // Round to Nearest.
+  RZ = 1 << 0,  // Round towards zero.
+  RP = 2 << 0,  // Round towards Plus Infinity.
+  RM = 3 << 0,  // Round towards Minus Infinity.
+
+  // Aliases.
+  kRoundToNearest = RN,
+  kRoundToZero = RZ,
+  kRoundToPlusInf = RP,
+  kRoundToMinusInf = RM
+};
+
+static const uint32_t kFPURoundingModeMask = 3 << 0;
+
+enum CheckForInexactConversion {
+  kCheckForInexactConversion,
+  kDontCheckForInexactConversion
 };
 
 
@@ -494,7 +565,7 @@
 extern const Instr kPushInstruction;
 // sw(r, MemOperand(sp, 0))
 extern const Instr kPushRegPattern;
-//  lw(r, MemOperand(sp, 0))
+// lw(r, MemOperand(sp, 0))
 extern const Instr kPopRegPattern;
 extern const Instr kLwRegFpOffsetPattern;
 extern const Instr kSwRegFpOffsetPattern;
@@ -673,7 +744,7 @@
 
   inline int32_t Imm26Value() const {
     ASSERT(InstructionType() == kJumpType);
-    return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
+    return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
   }
 
   // Say if the instruction should not be used in a branch delay slot.
@@ -687,7 +758,7 @@
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instruction.
   // Use the At(pc) function to create references to Instruction.
-  static Instruction* At(byte_* pc) {
+  static Instruction* At(byte* pc) {
     return reinterpret_cast<Instruction*>(pc);
   }
 
@@ -700,11 +771,9 @@
 // -----------------------------------------------------------------------------
 // MIPS assembly various constants.
 
-
-static const int kArgsSlotsSize  = 4 * Instruction::kInstrSize;
-static const int kArgsSlotsNum   = 4;
 // C/C++ argument slots size.
-static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
+static const int kCArgSlotCount = 4;
+static const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
 // JS argument slots size.
 static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
 // Assembly builtins argument slots size.
@@ -720,4 +789,3 @@
 } }   // namespace v8::internal
 
 #endif    // #ifndef V8_MIPS_CONSTANTS_H_
-
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 36f577b..26e95fb 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -48,19 +48,25 @@
 
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(FPU);
 }
 
 
 void CPU::FlushICache(void* start, size_t size) {
+  // Nothing to do, flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
 #if !defined (USE_SIMULATOR)
   int res;
 
-  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
 
   if (res) {
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 35df69b..e323c50 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 namespace v8 {
@@ -40,106 +40,259 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  return Debug::IsDebugBreakAtReturn(rinfo());
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  // Mips return sequence:
+  // mov sp, fp
+  // lw fp, sp(0)
+  // lw ra, sp(4)
+  // addiu sp, sp, 8
+  // addiu sp, sp, N
+  // jr ra
+  // nop (in branch delay slot)
+
+  // Make sure this constant matches the number if instrucntions we emit.
+  ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+  CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+  // li and Call pseudo-instructions emit two instructions each.
+  patcher.masm()->li(v8::internal::t9,
+      Operand(reinterpret_cast<int32_t>(
+          Isolate::Current()->debug()->debug_break_return()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+
+  // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+  // patcher.masm()->bkpt(0);
 }
 
 
 // Restore the JS frame exit code.
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kJSReturnSequenceInstructions);
 }
 
 
 // A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(IsDebugBreakSlot());
+  // Check whether the debug break slot instructions have been patched.
+  return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  // Patch the code changing the debug break slot code from:
+  //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  // to a call to the debug break slot code.
+  //   li t9, address   (lui t9 / ori t9 instruction pair)
+  //   call t9          (jalr t9 / nop instruction pair)
+  CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+  patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+      Isolate::Current()->debug()->debug_break_slot()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
 }
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kDebugBreakSlotInstructions);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList object_regs,
+                                          RegList non_object_regs) {
+  __ EnterInternalFrame();
+
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  if ((object_regs | non_object_regs) != 0) {
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ And(at, reg, 0xc0000000);
+          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+        }
+        __ sll(reg, reg, kSmiTagSize);
+      }
+    }
+    __ MultiPush(object_regs | non_object_regs);
+  }
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ mov(a0, zero_reg);  // No arguments.
+  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
+
+  // Restore the register values from the expression stack.
+  if ((object_regs | non_object_regs) != 0) {
+    __ MultiPop(object_regs | non_object_regs);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ srl(reg, reg, kSmiTagSize);
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ li(reg, kDebugZapValue);
+      }
+    }
+  }
+
+  __ LeaveInternalFrame();
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  __ li(t9, Operand(
+      ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+  __ lw(t9, MemOperand(t9));
+  __ Jump(t9);
+}
+
+
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC load (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  // Registers a0 and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC store (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  // Registers a0, a1, and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra  : return address
+  //  -- a0  : key
+  //  -- a1  : receiver
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC call (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2: name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
 }
 
 
 void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for construct call (from builtins-mips.cc).
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
 }
 
 
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In places other than IC call sites it is expected that v0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
 }
 
 
 void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GenerateSlot(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Generate enough nop's to make space for a call instruction. Avoid emitting
+  // the trampoline pool in the debug break slot code.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+  Label check_codesize;
+  __ bind(&check_codesize);
+  __ RecordDebugBreakSlot();
+  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+    __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+  }
+  ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+            masm->InstructionsGeneratedSince(&check_codesize));
 }
 
 
 void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In the places where a debug break slot is inserted no registers can contain
+  // object pointers.
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
 void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 4b69859..18b6231 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -39,7 +39,7 @@
 namespace internal {
 
 
-int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 10;
 
 
 int Deoptimizer::patch_size() {
@@ -78,6 +78,11 @@
 }
 
 
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  UNIMPLEMENTED();
+}
+
+
 void Deoptimizer::EntryGenerator::Generate() {
   UNIMPLEMENTED();
 }
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index b7ceb2b..fde0c58 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,7 @@
 //
 //   NameConverter converter;
 //   Disassembler d(converter);
-//   for (byte_* pc = begin; pc < end;) {
+//   for (byte* pc = begin; pc < end;) {
 //     v8::internal::EmbeddedVector<char, 256> buffer;
 //     byte* prev_pc = pc;
 //     pc += d.InstructionDecode(buffer, pc);
@@ -85,7 +85,7 @@
 
   // Writes one disassembled instruction into 'buffer' (0-terminated).
   // Returns the length of the disassembled machine instruction in bytes.
-  int InstructionDecode(byte_* instruction);
+  int InstructionDecode(byte* instruction);
 
  private:
   // Bottleneck functions to print into the out_buffer.
@@ -103,6 +103,8 @@
   void PrintFd(Instruction* instr);
   void PrintSa(Instruction* instr);
   void PrintSd(Instruction* instr);
+  void PrintSs1(Instruction* instr);
+  void PrintSs2(Instruction* instr);
   void PrintBc(Instruction* instr);
   void PrintCc(Instruction* instr);
   void PrintFunction(Instruction* instr);
@@ -110,7 +112,7 @@
   void PrintUImm16(Instruction* instr);
   void PrintSImm16(Instruction* instr);
   void PrintXImm16(Instruction* instr);
-  void PrintImm26(Instruction* instr);
+  void PrintXImm26(Instruction* instr);
   void PrintCode(Instruction* instr);   // For break and trap instructions.
   // Printing of instruction name.
   void PrintInstructionName(Instruction* instr);
@@ -212,13 +214,29 @@
 }
 
 
-// Print the integer value of the rd field, (when it is not used as reg).
+// Print the integer value of the rd field, when it is not used as reg.
 void Decoder::PrintSd(Instruction* instr) {
   int sd = instr->RdValue();
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
 }
 
 
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+  int ss = instr->RdValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+  int ss = instr->RdValue();
+  int pos = instr->SaValue();
+  out_buffer_pos_ +=
+      OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
 // Print the integer value of the cc field for the bc1t/f instructions.
 void Decoder::PrintBc(Instruction* instr) {
   int cc = instr->FBccValue();
@@ -242,7 +260,7 @@
 
 // Print 16-bit signed immediate value.
 void Decoder::PrintSImm16(Instruction* instr) {
-  int32_t imm = ((instr->Imm16Value())<<16)>>16;
+  int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
 }
 
@@ -255,9 +273,9 @@
 
 
 // Print 26-bit immediate value.
-void Decoder::PrintImm26(Instruction* instr) {
-  int32_t imm = instr->Imm26Value();
-  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+void Decoder::PrintXImm26(Instruction* instr) {
+  uint32_t imm = instr->Imm26Value() << kImmFieldShift;
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
 }
 
 
@@ -298,15 +316,15 @@
 // complexity of FormatOption.
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'r');
-  if (format[1] == 's') {  // 'rs: Rs register
+  if (format[1] == 's') {  // 'rs: Rs register.
     int reg = instr->RsValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'rt: rt register
+  } else if (format[1] == 't') {  // 'rt: rt register.
     int reg = instr->RtValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'rd: rd register
+  } else if (format[1] == 'd') {  // 'rd: rd register.
     int reg = instr->RdValue();
     PrintRegister(reg);
     return 2;
@@ -320,15 +338,15 @@
 // complexity of FormatOption.
 int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'f');
-  if (format[1] == 's') {  // 'fs: fs register
+  if (format[1] == 's') {  // 'fs: fs register.
     int reg = instr->FsValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'ft: ft register
+  } else if (format[1] == 't') {  // 'ft: ft register.
     int reg = instr->FtValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'fd: fd register
+  } else if (format[1] == 'd') {  // 'fd: fd register.
     int reg = instr->FdValue();
     PrintFPURegister(reg);
     return 2;
@@ -345,12 +363,12 @@
 // characters that were consumed from the formatting string.
 int Decoder::FormatOption(Instruction* instr, const char* format) {
   switch (format[0]) {
-    case 'c': {   // 'code for break or trap instructions
+    case 'c': {   // 'code for break or trap instructions.
       ASSERT(STRING_STARTS_WITH(format, "code"));
       PrintCode(instr);
       return 4;
     }
-    case 'i': {   // 'imm16u or 'imm26
+    case 'i': {   // 'imm16u or 'imm26.
       if (format[3] == '1') {
         ASSERT(STRING_STARTS_WITH(format, "imm16"));
         if (format[5] == 's') {
@@ -365,18 +383,18 @@
         }
         return 6;
       } else {
-        ASSERT(STRING_STARTS_WITH(format, "imm26"));
-        PrintImm26(instr);
-        return 5;
+        ASSERT(STRING_STARTS_WITH(format, "imm26x"));
+        PrintXImm26(instr);
+        return 6;
       }
     }
-    case 'r': {   // 'r: registers
+    case 'r': {   // 'r: registers.
       return FormatRegister(instr, format);
     }
-    case 'f': {   // 'f: FPUregisters
+    case 'f': {   // 'f: FPUregisters.
       return FormatFPURegister(instr, format);
     }
-    case 's': {   // 'sa
+    case 's': {   // 'sa.
       switch (format[1]) {
         case 'a': {
           ASSERT(STRING_STARTS_WITH(format, "sa"));
@@ -388,6 +406,17 @@
           PrintSd(instr);
           return 2;
         }
+        case 's': {
+          if (format[2] == '1') {
+              ASSERT(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
+              PrintSs1(instr);
+              return 3;
+          } else {
+              ASSERT(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
+              PrintSs2(instr);
+              return 3;
+          }
+        }
       }
     }
     case 'b': {   // 'bc - Special for bc1 cc field.
@@ -432,29 +461,29 @@
 
 void Decoder::DecodeTypeRegister(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // bc1 handled in DecodeTypeImmediate.
           UNREACHABLE();
           break;
         case MFC1:
-          Format(instr, "mfc1   'rt, 'fs");
+          Format(instr, "mfc1    'rt, 'fs");
           break;
         case MFHC1:
-          Format(instr, "mfhc1  'rt, 'fs");
+          Format(instr, "mfhc1   'rt, 'fs");
           break;
         case MTC1:
-          Format(instr, "mtc1   'rt, 'fs");
+          Format(instr, "mtc1    'rt, 'fs");
           break;
         // These are called "fs" too, although they are not FPU registers.
         case CTC1:
-          Format(instr, "ctc1   'rt, 'fs");
+          Format(instr, "ctc1    'rt, 'fs");
           break;
         case CFC1:
-          Format(instr, "cfc1   'rt, 'fs");
+          Format(instr, "cfc1    'rt, 'fs");
           break;
         case MTHC1:
-          Format(instr, "mthc1  'rt, 'fs");
+          Format(instr, "mthc1   'rt, 'fs");
           break;
         case D:
           switch (instr->FunctionFieldRaw()) {
@@ -480,7 +509,7 @@
               Format(instr, "neg.d   'fd, 'fs");
               break;
             case SQRT_D:
-              Format(instr, "sqrt.d   'fd, 'fs");
+              Format(instr, "sqrt.d  'fd, 'fs");
               break;
             case CVT_W_D:
               Format(instr, "cvt.w.d 'fd, 'fs");
@@ -592,134 +621,134 @@
     case SPECIAL:
       switch (instr->FunctionFieldRaw()) {
         case JR:
-          Format(instr, "jr   'rs");
+          Format(instr, "jr      'rs");
           break;
         case JALR:
-          Format(instr, "jalr 'rs");
+          Format(instr, "jalr    'rs");
           break;
         case SLL:
           if ( 0x0 == static_cast<int>(instr->InstructionBits()))
             Format(instr, "nop");
           else
-            Format(instr, "sll  'rd, 'rt, 'sa");
+            Format(instr, "sll     'rd, 'rt, 'sa");
           break;
         case SRL:
           if (instr->RsValue() == 0) {
-            Format(instr, "srl  'rd, 'rt, 'sa");
+            Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
             if (mips32r2) {
-              Format(instr, "rotr  'rd, 'rt, 'sa");
+              Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRA:
-          Format(instr, "sra  'rd, 'rt, 'sa");
+          Format(instr, "sra     'rd, 'rt, 'sa");
           break;
         case SLLV:
-          Format(instr, "sllv 'rd, 'rt, 'rs");
+          Format(instr, "sllv    'rd, 'rt, 'rs");
           break;
         case SRLV:
           if (instr->SaValue() == 0) {
-            Format(instr, "srlv 'rd, 'rt, 'rs");
+            Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
             if (mips32r2) {
-              Format(instr, "rotrv 'rd, 'rt, 'rs");
+              Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRAV:
-          Format(instr, "srav 'rd, 'rt, 'rs");
+          Format(instr, "srav    'rd, 'rt, 'rs");
           break;
         case MFHI:
-          Format(instr, "mfhi 'rd");
+          Format(instr, "mfhi    'rd");
           break;
         case MFLO:
-          Format(instr, "mflo 'rd");
+          Format(instr, "mflo    'rd");
           break;
         case MULT:
-          Format(instr, "mult 'rs, 'rt");
+          Format(instr, "mult    'rs, 'rt");
           break;
         case MULTU:
-          Format(instr, "multu  'rs, 'rt");
+          Format(instr, "multu   'rs, 'rt");
           break;
         case DIV:
-          Format(instr, "div  'rs, 'rt");
+          Format(instr, "div     'rs, 'rt");
           break;
         case DIVU:
-          Format(instr, "divu 'rs, 'rt");
+          Format(instr, "divu    'rs, 'rt");
           break;
         case ADD:
-          Format(instr, "add  'rd, 'rs, 'rt");
+          Format(instr, "add     'rd, 'rs, 'rt");
           break;
         case ADDU:
-          Format(instr, "addu 'rd, 'rs, 'rt");
+          Format(instr, "addu    'rd, 'rs, 'rt");
           break;
         case SUB:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "sub     'rd, 'rs, 'rt");
           break;
         case SUBU:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "subu    'rd, 'rs, 'rt");
           break;
         case AND:
-          Format(instr, "and  'rd, 'rs, 'rt");
+          Format(instr, "and     'rd, 'rs, 'rt");
           break;
         case OR:
           if (0 == instr->RsValue()) {
-            Format(instr, "mov  'rd, 'rt");
+            Format(instr, "mov     'rd, 'rt");
           } else if (0 == instr->RtValue()) {
-            Format(instr, "mov  'rd, 'rs");
+            Format(instr, "mov     'rd, 'rs");
           } else {
-            Format(instr, "or   'rd, 'rs, 'rt");
+            Format(instr, "or      'rd, 'rs, 'rt");
           }
           break;
         case XOR:
-          Format(instr, "xor  'rd, 'rs, 'rt");
+          Format(instr, "xor     'rd, 'rs, 'rt");
           break;
         case NOR:
-          Format(instr, "nor  'rd, 'rs, 'rt");
+          Format(instr, "nor     'rd, 'rs, 'rt");
           break;
         case SLT:
-          Format(instr, "slt  'rd, 'rs, 'rt");
+          Format(instr, "slt     'rd, 'rs, 'rt");
           break;
         case SLTU:
-          Format(instr, "sltu 'rd, 'rs, 'rt");
+          Format(instr, "sltu    'rd, 'rs, 'rt");
           break;
         case BREAK:
           Format(instr, "break, code: 'code");
           break;
         case TGE:
-          Format(instr, "tge  'rs, 'rt, code: 'code");
+          Format(instr, "tge     'rs, 'rt, code: 'code");
           break;
         case TGEU:
-          Format(instr, "tgeu 'rs, 'rt, code: 'code");
+          Format(instr, "tgeu    'rs, 'rt, code: 'code");
           break;
         case TLT:
-          Format(instr, "tlt  'rs, 'rt, code: 'code");
+          Format(instr, "tlt     'rs, 'rt, code: 'code");
           break;
         case TLTU:
-          Format(instr, "tltu 'rs, 'rt, code: 'code");
+          Format(instr, "tltu    'rs, 'rt, code: 'code");
           break;
         case TEQ:
-          Format(instr, "teq  'rs, 'rt, code: 'code");
+          Format(instr, "teq     'rs, 'rt, code: 'code");
           break;
         case TNE:
-          Format(instr, "tne  'rs, 'rt, code: 'code");
+          Format(instr, "tne     'rs, 'rt, code: 'code");
           break;
         case MOVZ:
-          Format(instr, "movz 'rd, 'rs, 'rt");
+          Format(instr, "movz    'rd, 'rs, 'rt");
           break;
         case MOVN:
-          Format(instr, "movn 'rd, 'rs, 'rt");
+          Format(instr, "movn    'rd, 'rs, 'rt");
           break;
         case MOVCI:
           if (instr->Bit(16)) {
-            Format(instr, "movt 'rd, 'rs, 'Cc");
+            Format(instr, "movt    'rd, 'rs, 'bc");
           } else {
-            Format(instr, "movf 'rd, 'rs, 'Cc");
+            Format(instr, "movf    'rd, 'rs, 'bc");
           }
           break;
         default:
@@ -729,10 +758,10 @@
     case SPECIAL2:
       switch (instr->FunctionFieldRaw()) {
         case MUL:
-          Format(instr, "mul  'rd, 'rs, 'rt");
+          Format(instr, "mul     'rd, 'rs, 'rt");
           break;
         case CLZ:
-          Format(instr, "clz  'rd, 'rs");
+          Format(instr, "clz     'rd, 'rs");
           break;
         default:
           UNREACHABLE();
@@ -742,7 +771,7 @@
       switch (instr->FunctionFieldRaw()) {
         case INS: {
           if (mips32r2) {
-            Format(instr, "ins  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
           }
@@ -750,7 +779,7 @@
         }
         case EXT: {
           if (mips32r2) {
-            Format(instr, "ext  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
           }
@@ -785,16 +814,16 @@
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
-          Format(instr, "bltz 'rs, 'imm16u");
+          Format(instr, "bltz    'rs, 'imm16u");
           break;
         case BLTZAL:
-          Format(instr, "bltzal 'rs, 'imm16u");
+          Format(instr, "bltzal  'rs, 'imm16u");
           break;
         case BGEZ:
-          Format(instr, "bgez 'rs, 'imm16u");
+          Format(instr, "bgez    'rs, 'imm16u");
           break;
         case BGEZAL:
-          Format(instr, "bgezal 'rs, 'imm16u");
+          Format(instr, "bgezal  'rs, 'imm16u");
           break;
         default:
           UNREACHABLE();
@@ -802,90 +831,90 @@
     break;  // Case REGIMM.
     // ------------- Branch instructions.
     case BEQ:
-      Format(instr, "beq  'rs, 'rt, 'imm16u");
+      Format(instr, "beq     'rs, 'rt, 'imm16u");
       break;
     case BNE:
-      Format(instr, "bne  'rs, 'rt, 'imm16u");
+      Format(instr, "bne     'rs, 'rt, 'imm16u");
       break;
     case BLEZ:
-      Format(instr, "blez 'rs, 'imm16u");
+      Format(instr, "blez    'rs, 'imm16u");
       break;
     case BGTZ:
-      Format(instr, "bgtz 'rs, 'imm16u");
+      Format(instr, "bgtz    'rs, 'imm16u");
       break;
     // ------------- Arithmetic instructions.
     case ADDI:
-      Format(instr, "addi   'rt, 'rs, 'imm16s");
+      Format(instr, "addi    'rt, 'rs, 'imm16s");
       break;
     case ADDIU:
-      Format(instr, "addiu  'rt, 'rs, 'imm16s");
+      Format(instr, "addiu   'rt, 'rs, 'imm16s");
       break;
     case SLTI:
-      Format(instr, "slti   'rt, 'rs, 'imm16s");
+      Format(instr, "slti    'rt, 'rs, 'imm16s");
       break;
     case SLTIU:
-      Format(instr, "sltiu  'rt, 'rs, 'imm16u");
+      Format(instr, "sltiu   'rt, 'rs, 'imm16u");
       break;
     case ANDI:
-      Format(instr, "andi   'rt, 'rs, 'imm16x");
+      Format(instr, "andi    'rt, 'rs, 'imm16x");
       break;
     case ORI:
-      Format(instr, "ori    'rt, 'rs, 'imm16x");
+      Format(instr, "ori     'rt, 'rs, 'imm16x");
       break;
     case XORI:
-      Format(instr, "xori   'rt, 'rs, 'imm16x");
+      Format(instr, "xori    'rt, 'rs, 'imm16x");
       break;
     case LUI:
-      Format(instr, "lui    'rt, 'imm16x");
+      Format(instr, "lui     'rt, 'imm16x");
       break;
     // ------------- Memory instructions.
     case LB:
-      Format(instr, "lb     'rt, 'imm16s('rs)");
+      Format(instr, "lb      'rt, 'imm16s('rs)");
       break;
     case LH:
-      Format(instr, "lh     'rt, 'imm16s('rs)");
+      Format(instr, "lh      'rt, 'imm16s('rs)");
       break;
     case LWL:
-      Format(instr, "lwl    'rt, 'imm16s('rs)");
+      Format(instr, "lwl     'rt, 'imm16s('rs)");
       break;
     case LW:
-      Format(instr, "lw     'rt, 'imm16s('rs)");
+      Format(instr, "lw      'rt, 'imm16s('rs)");
       break;
     case LBU:
-      Format(instr, "lbu    'rt, 'imm16s('rs)");
+      Format(instr, "lbu     'rt, 'imm16s('rs)");
       break;
     case LHU:
-      Format(instr, "lhu    'rt, 'imm16s('rs)");
+      Format(instr, "lhu     'rt, 'imm16s('rs)");
       break;
     case LWR:
-      Format(instr, "lwr    'rt, 'imm16s('rs)");
+      Format(instr, "lwr     'rt, 'imm16s('rs)");
       break;
     case SB:
-      Format(instr, "sb     'rt, 'imm16s('rs)");
+      Format(instr, "sb      'rt, 'imm16s('rs)");
       break;
     case SH:
-      Format(instr, "sh     'rt, 'imm16s('rs)");
+      Format(instr, "sh      'rt, 'imm16s('rs)");
       break;
     case SWL:
-      Format(instr, "swl    'rt, 'imm16s('rs)");
+      Format(instr, "swl     'rt, 'imm16s('rs)");
       break;
     case SW:
-      Format(instr, "sw     'rt, 'imm16s('rs)");
+      Format(instr, "sw      'rt, 'imm16s('rs)");
       break;
     case SWR:
-      Format(instr, "swr    'rt, 'imm16s('rs)");
+      Format(instr, "swr     'rt, 'imm16s('rs)");
       break;
     case LWC1:
-      Format(instr, "lwc1   'ft, 'imm16s('rs)");
+      Format(instr, "lwc1    'ft, 'imm16s('rs)");
       break;
     case LDC1:
-      Format(instr, "ldc1   'ft, 'imm16s('rs)");
+      Format(instr, "ldc1    'ft, 'imm16s('rs)");
       break;
     case SWC1:
-      Format(instr, "swc1   'ft, 'imm16s('rs)");
+      Format(instr, "swc1    'ft, 'imm16s('rs)");
       break;
     case SDC1:
-      Format(instr, "sdc1   'ft, 'imm16s('rs)");
+      Format(instr, "sdc1    'ft, 'imm16s('rs)");
       break;
     default:
       UNREACHABLE();
@@ -897,10 +926,10 @@
 void Decoder::DecodeTypeJump(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
     case J:
-      Format(instr, "j    'imm26");
+      Format(instr, "j       'imm26x");
       break;
     case JAL:
-      Format(instr, "jal  'imm26");
+      Format(instr, "jal     'imm26x");
       break;
     default:
       UNREACHABLE();
@@ -909,7 +938,7 @@
 
 
 // Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
   Instruction* instr = Instruction::At(instr_ptr);
   // Print raw instruction bytes.
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -929,6 +958,7 @@
       break;
     }
     default: {
+      Format(instr, "UNSUPPORTED");
       UNSUPPORTED_MIPS();
     }
   }
@@ -944,15 +974,13 @@
 
 namespace disasm {
 
-using v8::internal::byte_;
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
+const char* NameConverter::NameOfAddress(byte* addr) const {
   v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
   return tmp_buffer_.start();
 }
 
 
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
   return NameOfAddress(addr);
 }
 
@@ -968,12 +996,12 @@
 
 
 const char* NameConverter::NameOfByteCPURegister(int reg) const {
-  UNREACHABLE();  // MIPS does not have the concept of a byte register
+  UNREACHABLE();  // MIPS does not have the concept of a byte register.
   return "nobytereg";
 }
 
 
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
   // The default name converter is called for unknown code. So we will not try
   // to access any memory.
   return "";
@@ -990,25 +1018,25 @@
 
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
-                                    byte_* instruction) {
+                                    byte* instruction) {
   v8::internal::Decoder d(converter_, buffer);
   return d.InstructionDecode(instruction);
 }
 
 
 // The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
   return -1;
 }
 
 
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
   NameConverter converter;
   Disassembler d(converter);
-  for (byte_* pc = begin; pc < end;) {
+  for (byte* pc = begin; pc < end;) {
     v8::internal::EmbeddedVector<char, 128> buffer;
     buffer[0] = '\0';
-    byte_* prev_pc = pc;
+    byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
     fprintf(f, "%p    %08x      %s\n",
             prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index e2e0c91..faaa0e0 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -38,8 +38,7 @@
 
 
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  UNIMPLEMENTED_MIPS();
-  return fp;
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index f507590..2c83893 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -30,7 +30,6 @@
 #ifndef V8_MIPS_FRAMES_MIPS_H_
 #define V8_MIPS_FRAMES_MIPS_H_
 
-
 namespace v8 {
 namespace internal {
 
@@ -40,13 +39,22 @@
 static const int kNumRegs = 32;
 
 static const RegList kJSCallerSaved =
-  1 << 2 |  // v0
-  1 << 4 |  // a0
-  1 << 5 |  // a1
-  1 << 6 |  // a2
-  1 << 7;   // a3
+  1 << 2  |  // v0
+  1 << 3  |  // v1
+  1 << 4  |  // a0
+  1 << 5  |  // a1
+  1 << 6  |  // a2
+  1 << 7  |  // a3
+  1 << 8  |  // t0
+  1 << 9  |  // t1
+  1 << 10 |  // t2
+  1 << 11 |  // t3
+  1 << 12 |  // t4
+  1 << 13 |  // t5
+  1 << 14 |  // t6
+  1 << 15;   // t7
 
-static const int kNumJSCallerSaved = 5;
+static const int kNumJSCallerSaved = 14;
 
 
 // Return the code of the n-th caller-saved register available to JavaScript
@@ -56,19 +64,30 @@
 
 // Callee-saved registers preserved when switching from C to JavaScript.
 static const RegList kCalleeSaved =
-  // Saved temporaries.
-  1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
-  1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
-  // gp, sp, fp
-  1 << 28 | 1 << 29 | 1 << 30;
+  1 << 16 |  // s0
+  1 << 17 |  // s1
+  1 << 18 |  // s2
+  1 << 19 |  // s3
+  1 << 20 |  // s4
+  1 << 21 |  // s5
+  1 << 22 |  // s6 (roots in Javascript code)
+  1 << 23 |  // s7 (cp in Javascript code)
+  1 << 30;   // fp/s8
 
-static const int kNumCalleeSaved = 11;
+static const int kNumCalleeSaved = 9;
 
+static const RegList kCalleeSavedFPU =
+  1 << 20 |  // f20
+  1 << 22 |  // f22
+  1 << 24 |  // f24
+  1 << 26 |  // f26
+  1 << 28 |  // f28
+  1 << 30;   // f30
 
+static const int kNumCalleeSavedFPU = 6;
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
-// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
+static const int kNumSafepointRegisters = 24;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
@@ -79,15 +98,53 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+static const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+  kUndefIndex,  // zero_reg
+  kUndefIndex,  // at
+  0,   // v0
+  1,   // v1
+  2,   // a0
+  3,   // a1
+  4,   // a2
+  5,   // a3
+  6,   // t0
+  7,   // t1
+  8,   // t2
+  9,   // t3
+  10,  // t4
+  11,  // t5
+  12,  // t6
+  13,  // t7
+  14,  // s0
+  15,  // s1
+  16,  // s2
+  17,  // s3
+  18,  // s4
+  19,  // s5
+  20,  // s6
+  21,  // s7
+  kUndefIndex,  // t8
+  kUndefIndex,  // t9
+  kUndefIndex,  // k0
+  kUndefIndex,  // k1
+  kUndefIndex,  // gp
+  kUndefIndex,  // sp
+  22,  // fp
+  kUndefIndex
+};
+
 
 // ----------------------------------------------------
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset  = 0 * kPointerSize;
-  static const int kStateOffset = 1 * kPointerSize;
-  static const int kFPOffset    = 2 * kPointerSize;
-  static const int kPCOffset    = 3 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kStateOffset   = 1 * kPointerSize;
+  static const int kContextOffset = 2 * kPointerSize;
+  static const int kFPOffset      = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
   static const int kSize = kPCOffset + kPointerSize;
 };
@@ -101,22 +158,24 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -1 * kPointerSize;
-  // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
-  static const int kCodeOffset = -1 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  // See some explanation in MacroAssembler::EnterExitFrame.
+  // This marks the top of the extra allocated stack space.
+  static const int kStackSpaceOffset = -3 * kPointerSize;
 
-  // TODO(mips): Use a patched sp value on the stack instead.
-  // A marker of 0 indicates that double registers are saved.
-  static const int kMarkerOffset = -2 * kPointerSize;
+  static const int kCodeOffset = -2 * kPointerSize;
+
+  static const int kSPOffset = -1 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
   // The calling JS function is between FP and PC.
   static const int kCallerPCOffset = +1 * kPointerSize;
 
+  // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+  static const int kCallerSPOffset = +2 * kPointerSize;
+
   // FP-relative displacement of the caller's SP.
-  static const int kCallerSPDisplacement = +3 * kPointerSize;
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
 };
 
 
@@ -134,8 +193,6 @@
   static const int kRArgsSlotsSize = 4 * kPointerSize;
   static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
 
-  // C/C++ argument slots size.
-  static const int kCArgsSlotsSize = 4 * kPointerSize;
   // JS argument slots size.
   static const int kJSArgsSlotsSize = 0 * kPointerSize;
   // Assembly builtins argument slots size.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 87507ff..9a210c4 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -38,7 +38,7 @@
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -53,6 +53,74 @@
 
 #define __ ACCESS_MASM(masm_)
 
+
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Always taken before patched.
+    __ Branch(target, eq, at, Operand(zero_reg));
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Never taken before patched.
+    __ Branch(target, ne, at, Operand(zero_reg));
+  }
+
+  void EmitPatchInfo() {
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+      Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+      __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+      info_emitted_ = true;
+#endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
+  }
+
+ private:
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
@@ -68,189 +136,545 @@
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-mips.h for its layout.
 void FullCodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(info_ == NULL);
+  info_ = info;
+  scope_ = info->scope();
+  SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop-at");
+  }
+#endif
+
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). t1 is zero for method calls and non-zero for
+  // function calls.
+  if (info->is_strict_mode() || info->is_native()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+    int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  int locals_count = info->scope()->num_stack_slots();
+
+  __ Push(ra, fp, cp, a1);
+  if (locals_count > 0) {
+    // Load undefined value here, so the value is ready for the loop
+    // below.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  }
+  // Adjust fp to point to caller's fp.
+  __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    for (int i = 0; i < locals_count; i++) {
+      __ push(at);
+    }
+  }
+
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment cmnt(masm_, "[ Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    function_in_register = false;
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = info->scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                                 (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        __ li(a1, Operand(Context::SlotOffset(var->index())));
+        __ addu(a2, cp, a1);
+        __ sw(a0, MemOperand(a2, 0));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(a2, cp);
+        __ RecordWrite(a2, a1, a3);
+      }
+    }
+  }
+
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register) {
+      // Load this again, if it's used by the local context below.
+      __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    } else {
+      __ mov(a3, a1);
+    }
+    // Receiver is just before the parameters on the caller's stack.
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
+    __ Addu(a2, fp,
+           Operand(StandardFrameConstants::kCallerSPOffset + offset));
+    __ li(a1, Operand(Smi::FromInt(num_parameters)));
+    __ Push(a3, a2, a1);
+
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiever and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub::Type type;
+    if (is_strict_mode()) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+    }
+    ArgumentsAccessStub stub(type);
+    __ CallStub(&stub);
+
+    SetVar(arguments, v0, a1, a2);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+    { Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+
+    { Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+      Label ok;
+      __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+      __ Branch(&ok, hs, sp, Operand(t0));
+      StackCheckStub stub;
+      __ CallStub(&stub);
+      __ bind(&ok);
+    }
+
+    { Comment cmnt(masm_, "[ Body");
+      ASSERT(loop_depth() == 0);
+      VisitStatements(function()->body());
+      ASSERT(loop_depth() == 0);
+    }
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
 }
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(Smi::FromInt(0) == 0);
+  __ mov(v0, zero_reg);
 }
 
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Stack check");
+  Label ok;
+  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(t0));
+  StackCheckStub stub;
+  // Record a mapping of this PC offset to the OSR id.  This is used to find
+  // the AST id from the unoptimized code in order to use it as a key into
+  // the deoptimization input data found in the optimized code.
+  RecordStackCheck(stmt->OsrEntryId());
+
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::EmitReturnSequence() {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ Branch(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in v0.
+      __ push(v0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+      __ RecordJSReturn();
+      masm_->mov(sp, fp);
+      masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+      masm_->Addu(sp, sp, Operand(sp_delta));
+      masm_->Jump(ra);
+    }
+
+#ifdef DEBUG
+    // Check that the size of the code used for returning is large enough
+    // for the debugger's requirements.
+    ASSERT(Assembler::kJSReturnSequenceInstructions <=
+           masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+  }
 }
 
 
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
 }
 
 
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+  __ push(result_register());
 }
 
 
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+  // For simplicity we always test the accumulator register.
+  codegen()->GetVar(result_register(), var);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(this);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  __ li(result_register(), Operand(lit));
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  // Immediates cannot be pushed directly.
+  __ li(result_register(), Operand(lit));
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ li(result_register(), Operand(lit));
+    codegen()->DoTest(this);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ sw(reg, MemOperand(sp, 0));
 }
 
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == materialize_false);
+  __ bind(materialize_true);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  __ push(at);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  __ push(at);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(at, value_root_index);
+  __ push(at);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  }
 }
 
 
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+                               Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    ToBooleanStub stub(result_register());
+    __ CallStub(&stub);
+    __ mov(at, zero_reg);
+  } else {
+    // Call the runtime to find the boolean value of the source and then
+    // translate it into control flow to the pair of labels.
+    __ push(result_register());
+    __ CallRuntime(Runtime::kToBool, 1);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  }
+  Split(ne, v0, Operand(at), if_true, if_false, fall_through);
 }
 
 
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void FullCodeGenerator::Split(Condition cc,
-//                               Register lhs,
-//                               const Operand&  rhs,
-//                               Label* if_true,
-//                               Label* if_false,
-//                               Label* fall_through) {
 void FullCodeGenerator::Split(Condition cc,
+                              Register lhs,
+                              const Operand&  rhs,
                               Label* if_true,
                               Label* if_false,
                               Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (if_false == fall_through) {
+    __ Branch(if_true, cc, lhs, rhs);
+  } else if (if_true == fall_through) {
+    __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+  } else {
+    __ Branch(if_true, cc, lhs, rhs);
+    __ Branch(if_false);
+  }
 }
 
 
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  ASSERT(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
+  }
+  return MemOperand(fp, offset);
 }
 
 
-void FullCodeGenerator::Move(Register destination, Slot* source) {
-  UNIMPLEMENTED_MIPS();
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  // Use destination as scratch.
+  MemOperand location = VarOperand(var, dest);
+  __ lw(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+                               Register src,
+                               Register scratch0,
+                               Register scratch1) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  ASSERT(!scratch0.is(src));
+  ASSERT(!scratch0.is(scratch1));
+  ASSERT(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
+  __ sw(src, location);
+  // Emit the write barrier code if the location is in the heap.
+  if (var->IsContextSlot()) {
+    __ RecordWrite(scratch0,
+                   Operand(Context::SlotOffset(var->index())),
+                   scratch1,
+                   src);
+  }
 }
 
 
@@ -258,452 +682,3506 @@
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
-  UNIMPLEMENTED_MIPS();
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ Branch(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+    Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+    __ bind(&skip);
+  }
 }
 
 
-void FullCodeGenerator::Move(Slot* dst,
-                             Register src,
-                             Register scratch1,
-                             Register scratch2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
+void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
                                         Variable::Mode mode,
-                                        FunctionLiteral* function) {
-  UNIMPLEMENTED_MIPS();
+                                        FunctionLiteral* function,
+                                        int* global_count) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      ++(*global_count);
+      break;
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (function != NULL) {
+        Comment cmnt(masm_, "[ Declaration");
+        VisitForAccumulatorValue(function);
+        __ sw(result_register(), StackOperand(variable));
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+          Comment cmnt(masm_, "[ Declaration");
+          __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+          __ sw(t0, StackOperand(variable));
+      }
+      break;
+
+      case Variable::CONTEXT:
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+        __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+        __ Check(ne, "Declaration in with context.",
+                 a1, Operand(t0));
+        __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+        __ Check(ne, "Declaration in catch context.",
+                 a1, Operand(t0));
+      }
+      if (function != NULL) {
+        Comment cmnt(masm_, "[ Declaration");
+        VisitForAccumulatorValue(function);
+        __ sw(result_register(), ContextOperand(cp, variable->index()));
+        int offset = Context::SlotOffset(variable->index());
+        // We know that we have written a function, which is not a smi.
+        __ mov(a1, cp);
+        __ RecordWrite(a1, Operand(offset), a2, result_register());
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+          Comment cmnt(masm_, "[ Declaration");
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ sw(at, ContextOperand(cp, variable->index()));
+          // No write barrier since the_hole_value is in old space.
+          PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      }
+      break;
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Declaration");
+      __ li(a2, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ li(a1, Operand(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (function != NULL) {
+        __ Push(cp, a2, a1);
+        // Push initial value for function declaration.
+        VisitForStackValue(function);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+          __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+          __ Push(cp, a2, a1, a0);
+      } else {
+        ASSERT(Smi::FromInt(0) == 0);
+        __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
+        __ Push(cp, a2, a1, a0);
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
+    }
+  }
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  UNIMPLEMENTED_MIPS();
-}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ li(a1, Operand(pairs));
+  __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+  __ Push(cp, a1, a0);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
 }
 
 
 void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+    __ mov(a0, result_register());  // CompareStub requires args in a0, a1.
+
+    // Perform the comparison as if via '==='.
+    __ lw(a1, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ or_(a2, a1, a0);
+      patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+      __ Branch(&next_test, ne, a1, Operand(a0));
+      __ Drop(1);  // Switch value is no longer needed.
+      __ Branch(clause->body_target());
+
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    patch_site.EmitPatchInfo();
+
+    __ Branch(&next_test, ne, v0, Operand(zero_reg));
+    __ Drop(1);  // Switch value is no longer needed.
+    __ Branch(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ Branch(nested_statement.break_label());
+  } else {
+    __ Branch(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_label());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForAccumulatorValue(stmt->enumerable());
+  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(at));
+  Register null_value = t1;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(null_value));
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(a0, &convert);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ bind(&convert);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);
+  __ bind(&done_convert);
+  __ push(a0);
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  Label next, call_runtime;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = t2;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = t3;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ mov(a1, a0);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register a1 contains the
+  // current JS object we've reached through the prototype chain.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in a2 for the subsequent
+  // prototype load.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (a3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ Branch(&check_prototype, eq, a1, Operand(a0));
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+  __ Branch(&next, ne, a1, Operand(null_value));
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(a0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(a2, v0);
+  __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  __ Branch(&fixed_array, ne, a1, Operand(at));
+
+  // We got a map in register v0. Get the enumeration cache from it.
+  __ bind(&use_cache);
+  __ LoadInstanceDescriptors(v0, a1);
+  __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
+  __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(v0);  // Map.
+  __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(a2, a1, a0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register v0. Iterate through that.
+  __ bind(&fixed_array);
+  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(a1, v0);
+  __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to a0, load the length to a1.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
+
+  // Get the current entry of the array into register a3.
+  __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
+  __ lw(a3, MemOperand(t0));  // Current entry.
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register a2.
+  __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ lw(a1, MemOperand(sp, 4 * kPointerSize));
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&update_each, eq, t0, Operand(a2));
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(a1);  // Enumerable.
+  __ push(a3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ mov(a3, result_register());
+  __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register a3.
+  __ bind(&update_each);
+  __ mov(result_register(), a3);
+  // Perform the assignment as if via '='.
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_label());
+  __ pop(a0);
+  __ Addu(a0, a0, Operand(Smi::FromInt(1)));
+  __ push(a0);
+
+  EmitStackCheck(stmt);
+  __ Branch(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_label());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
-  UNIMPLEMENTED_MIPS();
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      !pretenure &&
+      scope()->is_function_scope() &&
+      info->num_literals() == 0) {
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    __ li(a0, Operand(info));
+    __ push(a0);
+    __ CallStub(&stub);
+  } else {
+    __ li(a0, Operand(info));
+    __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+                              : Heap::kFalseValueRootIndex);
+    __ Push(cp, a0, a1);
+    __ CallRuntime(Runtime::kNewClosure, 3);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ VariableProxy");
+  EmitVariableLoad(expr);
 }
 
 
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Label* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
+  Register current = cp;
+  Register next = a1;
+  Register temp = a2;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      // Load next context in chain.
+      __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at global context.
+    __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+    __ Branch(&fast, eq, temp, Operand(t0));
+    // Check that extension is NULL.
+    __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ Branch(slow, ne, temp, Operand(zero_reg));
+    // Load next context in chain.
+    __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+    __ Branch(&loop);
+    __ bind(&fast);
+  }
+
+  __ lw(a0, GlobalObjectOperand());
+  __ li(a2, Operand(var->name()));
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+      ? RelocInfo::CODE_TARGET
+      : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  __ Call(ic, mode);
 }
 
 
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow,
-    Label* done) {
-  UNIMPLEMENTED_MIPS();
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  ASSERT(var->IsContextSlot());
+  Register context = cp;
+  Register next = a3;
+  Register temp = t0;
+
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is NULL.
+  __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ Branch(slow, ne, temp, Operand(zero_reg));
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, var->index());
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+                                                  TypeofState typeof_state,
+                                                  Label* slow,
+                                                  Label* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    __ Branch(done);
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == Variable::CONST) {
+      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+      __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+      __ movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+    }
+    __ Branch(done);
+  }
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
+
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "Global variable");
+      // Use inline caching. Variable name is passed in a2 and the global
+      // object (receiver) in a0.
+      __ lw(a0, GlobalObjectOperand());
+      __ li(a2, Operand(var->name()));
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      context()->Plug(v0);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot()
+                              ? "Context variable"
+                              : "Stack variable");
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        GetVar(v0, var);
+        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+        if (var->mode() == Variable::LET) {
+          Label done;
+          __ Branch(&done, ne, at, Operand(zero_reg));
+          __ li(a0, Operand(var->name()));
+          __ push(a0);
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          __ bind(&done);
+        } else {
+          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+          __ movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+        }
+        context()->Plug(v0);
+      }
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      Comment cmnt(masm_, "Lookup variable");
+      __ li(a1, Operand(var->name()));
+      __ Push(cp, a1);  // Context and name.
+      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      __ bind(&done);
+      context()->Plug(v0);
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Label materialized;
+  // Registers will be used as follows:
+  // t1 = materialized value (RegExp literal)
+  // t0 = JS function, literals array
+  // a3 = literal index
+  // a2 = RegExp pattern
+  // a1 = RegExp flags
+  // a0 = RegExp literal clone
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ lw(t1, FieldMemOperand(t0, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, t1, Operand(at));
+
+  // Create regexp literal using runtime function.
+  // Result will be in v0.
+  __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(expr->pattern()));
+  __ li(a1, Operand(expr->flags()));
+  __ Push(t0, a3, a2, a1);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mov(t1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(t1);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ pop(t1);
+
+  __ bind(&allocated);
+
+  // After this, registers are used as follows:
+  // v0: Newly allocated regexp.
+  // t1: Materialized regexp.
+  // a2: temp.
+  __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_properties()));
+  int flags = expr->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= expr->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ li(a0, Operand(Smi::FromInt(flags)));
+  __ Push(a3, a2, a1, a0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+  } else {
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  }
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in v0.
+  bool result_saved = false;
+
+  // Mark all computed expressions that are bound to a key that
+  // is shadowed by a later occurrence of the same key. For the
+  // marked expressions, no store code is emitted.
+  expr->CalculateEmitStore();
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(v0);  // Save result on stack.
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(a0, result_register());
+            __ li(a2, Operand(key->handle()));
+            __ lw(a1, MemOperand(sp));
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ li(a0, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+          __ push(a0);
+          __ CallRuntime(Runtime::kSetProperty, 4);
+        } else {
+          __ Drop(3);
+        }
+        break;
+      case ObjectLiteral::Property::GETTER:
+      case ObjectLiteral::Property::SETTER:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(a1);
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        break;
+    }
+  }
+
+  if (expr->has_function()) {
+    ASSERT(result_saved);
+    __ lw(a0, MemOperand(sp));
+    __ push(a0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+  __ mov(a0, result_register());
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_elements()));
+  __ Push(a3, a2, a1);
+  if (expr->constant_elements()->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+    __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+        1, a1, a2);
+  } else if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    __ CallStub(&stub);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(v0);
+      result_saved = true;
+    }
+    VisitForAccumulatorValue(subexpr);
+
+    // Store the subexpression value in the array's elements.
+    __ lw(a1, MemOperand(sp));  // Copy of array literal.
+    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ sw(result_register(), FieldMemOperand(a1, offset));
+
+    // Update the write barrier for the array store with v0 as the scratch
+    // register.
+    __ RecordWrite(a1, Operand(offset), a2, result_register());
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Assignment");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* property = expr->target()->AsProperty();
+  if (property != NULL) {
+    assign_type = (property->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForAccumulatorValue(property->obj());
+        __ push(result_register());
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case KEYED_PROPERTY:
+      // We need the key and receiver on both the stack and in v0 and a1.
+      if (expr->is_compound()) {
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
+        __ lw(a1, MemOperand(sp, 0));
+        __ push(v0);
+      } else {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    __ push(v0);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+        ? OVERWRITE_RIGHT
+        : NO_OVERWRITE;
+    SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
+                            op,
+                            mode,
+                            expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op, mode);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(v0);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  __ mov(a0, result_register());
+  __ li(a2, Operand(key->handle()));
+  // Call load IC. It has arguments receiver and property name a0 and a2.
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  __ mov(a0, result_register());
+  // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
-                                              Expression* left,
-                                              Expression* right) {
-  UNIMPLEMENTED_MIPS();
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+
+  // Get the arguments.
+  Register left = a1;
+  Register right = a0;
+  __ pop(left);
+  __ mov(a0, result_register());
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  BinaryOpStub stub(op, mode);
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
+  __ jmp(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub, see
+  // BinaryOpStub::GenerateSmiSmiOperation for comments.
+  switch (op) {
+    case Token::SAR:
+      __ Branch(&stub_call);
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(right, left, scratch1);
+      __ And(v0, right, Operand(~kSmiTagMask));
+      break;
+    case Token::SHL: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::SHR: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(scratch1, scratch1, scratch2);
+      __ And(scratch2, scratch1, 0xc0000000);
+      __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::MUL: {
+      __ SmiUntag(scratch1, right);
+      __ Mult(left, scratch1);
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+      __ mflo(v0);
+      __ Branch(&done, ne, v0, Operand(zero_reg));
+      __ Addu(scratch2, right, left);
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      break;
+    }
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
+  __ mov(a0, result_register());
+  __ pop(a1);
+  BinaryOpStub stub(op, mode);
+  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
-  UNIMPLEMENTED_MIPS();
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ mov(a1, result_register());
+      __ pop(a0);  // Restore value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ mov(a1, result_register());
+      __ pop(a2);
+      __ pop(a0);  // Restore value.
+      Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+        : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic);
+      break;
+    }
+  }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  UNIMPLEMENTED_MIPS();
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
+    __ mov(a0, result_register());
+    __ li(a2, Operand(var->name()));
+    __ lw(a1, GlobalObjectOperand());
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+
+  } else if (op == Token::INIT_CONST) {
+    // Const initializers need a write barrier.
+    ASSERT(!var->IsParameter());  // No const parameters.
+    if (var->IsStackLocal()) {
+      Label skip;
+      __ lw(a1, StackOperand(var));
+      __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+      __ Branch(&skip, ne, a1, Operand(t0));
+      __ sw(result_register(), StackOperand(var));
+      __ bind(&skip);
+    } else {
+      ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+      // Like var declarations, const declarations are hoisted to function
+      // scope.  However, unlike var initializers, const initializers are
+      // able to drill a hole to that function context, even from inside a
+      // 'with' context.  We thus bypass the normal static scope lookup for
+      // var->IsContextSlot().
+      __ push(v0);
+      __ li(a0, Operand(var->name()));
+      __ Push(cp, a0);  // Context and name.
+      __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+    }
+
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    if (var->IsLookupSlot()) {
+      __ push(v0);  // Value.
+      __ li(a1, Operand(var->name()));
+      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ Push(cp, a1, a0);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
+    } else {
+      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      Label assign;
+      MemOperand location = VarOperand(var, a1);
+      __ lw(a3, location);
+      __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+      __ Branch(&assign, ne, a3, Operand(t0));
+      __ li(a3, Operand(var->name()));
+      __ push(a3);
+      __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      // Perform the assignment.
+      __ bind(&assign);
+      __ sw(result_register(), location);
+      if (var->IsContextSlot()) {
+        // RecordWrite may destroy all its register arguments.
+        __ mov(a3, result_register());
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWrite(a1, Operand(offset), a2, a3);
+      }
+    }
+
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
+    if (var->IsStackAllocated() || var->IsContextSlot()) {
+      MemOperand location = VarOperand(var, a1);
+      if (FLAG_debug_code && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ lw(a2, location);
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ Check(eq, "Let binding re-initialization.", a2, Operand(t0));
+      }
+      // Perform the assignment.
+      __ sw(v0, location);
+      if (var->IsContextSlot()) {
+        __ mov(a3, v0);
+        __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
+      }
+    } else {
+      ASSERT(var->IsLookupSlot());
+      __ push(v0);  // Value.
+      __ li(a1, Operand(var->name()));
+      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ Push(cp, a1, a0);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
+    }
+  }
+    // Non-initializing assignments to consts are ignored.
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  ASSERT(prop->key()->AsLiteral() != NULL);
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    __ lw(t0, MemOperand(sp, kPointerSize));  // Receiver is now under value.
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());  // Load the value.
+  __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+  // Load receiver to a1. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a1, MemOperand(sp));
+  } else {
+    __ pop(a1);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a keyed store IC.
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    // Receiver is now under the key and value.
+    __ lw(t0, MemOperand(sp, 2 * kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  // Call keyed store IC.
+  // The arguments are:
+  // - a0 is the value,
+  // - a1 is the key,
+  // - a2 is the receiver.
+  __ mov(a0, result_register());
+  __ pop(a1);  // Key.
+  // Load receiver to a2. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a2, MemOperand(sp));
+  } else {
+    __ pop(a2);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitProperty(Property* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    VisitForAccumulatorValue(expr->obj());
+    EmitNamedPropertyLoad(expr);
+    context()->Plug(v0);
+  } else {
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
+    __ pop(a1);
+    EmitKeyedPropertyLoad(expr);
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+    __ li(a2, Operand(name));
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+  __ Call(ic, mode, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+                                            Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  // Swap the name of the function and the receiver on the stack to follow
+  // the calling convention for call ICs.
+  __ pop(a1);
+  __ push(v0);
+  __ push(a1);
+
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
+  __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);  // Drop the key still on the stack.
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+  // Code common for calls using the call stub.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  CallFunctionStub stub(arg_count, flags);
+  __ CallStub(&stub);
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
+  // Push copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+  } else {
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  }
+  __ push(a1);
+
+  // Push the receiver of the enclosing function and do runtime call.
+  int receiver_offset = 2 + info_->scope()->num_parameters();
+  __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
+  __ push(a1);
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ li(a1, Operand(Smi::FromInt(strict_mode)));
+  __ push(a1);
+
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
+  Comment cmnt(masm_, "[ Call");
+  Expression* callee = expr->expression();
+  VariableProxy* proxy = callee->AsVariableProxy();
+  Property* property = callee->AsProperty();
+
+  if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      __ push(a2);  // Reserved receiver slot.
+
+      // Push the arguments.
+      for (int i = 0; i < arg_count; i++) {
+        VisitForStackValue(args->at(i));
+      }
+
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly
+      // in generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
+        // Push the function and resolve eval.
+        __ push(v0);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
+      // Push a copy of the function (found below the arguments) and
+      // resolve eval.
+      __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ push(a1);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      __ bind(&done);
+
+      // The runtime call returns a pair of values in v0 (function) and
+      // v1 (receiver). Touch up the stack with the right values.
+      __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
+    }
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ CallStub(&stub);
+    RecordJSReturnSite(expr);
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    context()->DropAndPlug(1, v0);
+  } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
+    // Push global object as receiver for the call IC.
+    __ lw(a0, GlobalObjectOperand());
+    __ push(a0);
+    EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+    // Call to a lookup slot (dynamically introduced variable).
+    Label slow, done;
+
+    { PreservePositionScope scope(masm()->positions_recorder());
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+    }
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in v0)
+    // and the object holding it (returned in v1).
+    __ push(context_register());
+    __ li(a2, Operand(proxy->name()));
+    __ push(a2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ Push(v0, v1);  // Function, receiver.
+
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      Label call;
+      __ Branch(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(v0);
+      // The receiver is implicitly the global receiver. Indicate this
+      // by passing the hole to the call function stub.
+      __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+      __ push(a1);
+      __ bind(&call);
+    }
+
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+  } else if (property != NULL) {
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(property->obj());
+    }
+    if (property->key()->IsPropertyName()) {
+      EmitCallWithIC(expr,
+                     property->key()->AsLiteral()->handle(),
+                     RelocInfo::CODE_TARGET);
+    } else {
+      EmitKeyedCallWithIC(expr, property->key());
+    }
+  } else {
+    // Call to an arbitrary expression not handled specially above.
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+    }
+    // Load global receiver object.
+    __ lw(a1, GlobalObjectOperand());
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ push(a1);
+    // Emit function call.
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+  }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  VisitForStackValue(expr->expression());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function and argument count into a1 and a0.
+  __ li(a0, Operand(arg_count));
+  __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+  Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  __ Branch(if_true, eq, v0, Operand(at));
+  __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(if_false, ne, at, Operand(zero_reg));
+  __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  if (FLAG_debug_code) __ AbortIfSmi(v0);
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ Branch(if_true, ne, t0, Operand(zero_reg));
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
+  __ Branch(if_false, eq, a2, Operand(t0));
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ LoadInstanceDescriptors(a1, t0);
+  __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: descriptor array
+  // a3: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a2, a2, t1);
+
+  // Calculate location of the first key name.
+  __ Addu(t0,
+          t0,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+                  DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+  // used in the loop below.
+  __ li(t2, Operand(FACTORY->value_of_symbol()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ lw(a3, MemOperand(t0, 0));
+  __ Branch(if_false, eq, a3, Operand(t2));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ bind(&entry);
+  __ Branch(&loop, ne, t0, Operand(a2));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+  __ JumpIfSmi(a2, if_false);
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+  __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ Branch(if_false, ne, a2, Operand(a3));
+
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ jmp(if_true);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a2);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(if_false);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_ARRAY_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // Get the frame pointer for the calling frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne,
+            a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ pop(a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in a1 and the formal
+  // parameter count in a0.
+  VisitForAccumulatorValue(args->at(0));
+  __ mov(a1, v0);
+  __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&exit, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(v0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ GetObjectType(v0, v0, a1);  // Map is now in v0.
+  __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+
+  // Check if the constructor in the map is a function.
+  __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+  // v0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ Branch(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(v0, Heap::kObject_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  // Save the new heap number in callee-saved register s0, since
+  // we call out to external C code below.
+  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+
+  // Allocate a heap number.
+  __ CallRuntime(Runtime::kNumberAlloc, 0);
+  __ mov(s0, v0);   // Save result in s0, so it is saved thru CFunc call.
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in v0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(FPU)) {
+    __ PrepareCallCFunction(1, a0);
+    __ li(a0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+
+    CpuFeatures::Scope scope(FPU);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    __ li(a1, Operand(0x41300000));
+    // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+    __ Move(f12, v0, a1);
+    // Move 0x4130000000000000 to FPU.
+    __ Move(f14, zero_reg, a1);
+    // Subtract and store the result in the heap number.
+    __ sub_d(f0, f12, f14);
+    __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+    __ mov(v0, s0);
+  } else {
+    __ PrepareCallCFunction(2, a0);
+    __ mov(a0, s0);
+    __ li(a1, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
+  }
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(v0, &done);
+  // If the object is not a value type, return the object.
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+  __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  MathPowStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
+  __ pop(a1);  // v0 = value. a1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(a1, &done);
+
+  // If the object is not a value type, return the value.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+  // Store the value.
+  __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForStackValue(args->at(0));
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(v0, a1);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(a1);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch = a2;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &need_conversion,
+                                      &need_conversion,
+                                      &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ li(result, Operand(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  VisitForAccumulatorValue(args->last());  // Function.
+
+  // InvokeFunction requires the function in a1. Move it in there.
+  __ mov(a1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(a1, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  RegExpConstructResultStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = a0;
+  Register index1 = a1;
+  Register index2 = a2;
+  Register elements = a3;
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  __ lw(object, MemOperand(sp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
+  // Map is now in scratch1.
+
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+  __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
+
+  // Check the object's elements are in fast case and writable.
+  __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
+
+  // Check that both indices are smis.
+  __ lw(index1, MemOperand(sp, 1 * kPointerSize));
+  __ lw(index2, MemOperand(sp, 0));
+  __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+  // Check that both indices are valid.
+  Label not_hi;
+  __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+  __ Branch(&slow_case, ls, scratch1, Operand(index1));
+  __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
+  __ Branch(&slow_case, ls, scratch1, Operand(index2));
+  __ bind(&not_hi);
+
+  // Bring the address of the elements into index1 and index2.
+  __ Addu(scratch1, elements,
+      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index1, scratch1, index1);
+  __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index2, scratch1, index2);
+
+  // Swap elements.
+  __ lw(scratch1, MemOperand(index1, 0));
+  __ lw(scratch2, MemOperand(index2, 0));
+  __ sw(scratch1, MemOperand(index2, 0));
+  __ sw(scratch2, MemOperand(index1, 0));
+
+  Label new_space;
+  __ InNewSpace(elements, scratch1, eq, &new_space);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask).
+
+  __ mov(scratch1, elements);
+  __ RecordWriteHelper(elements, index1, scratch2);
+  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ Drop(3);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&slow_case);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      isolate()->global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+    context()->Plug(v0);
+    return;
+  }
+
+  VisitForAccumulatorValue(args->at(1));
+
+  Register key = v0;
+  Register cache = a1;
+  __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ lw(cache,
+         ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ lw(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // a2 now holds finger offset as a smi.
+  __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // a3 now points to the start of fixed array elements.
+  __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(a3, a3, at);
+  // a3 now points to key of indexed element of cache.
+  __ lw(a2, MemOperand(a3));
+  __ Branch(&not_found, ne, key, Operand(a2));
+
+  __ lw(v0, MemOperand(a3, kPointerSize));
+  __ Branch(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  Register right = v0;
+  Register left = a1;
+  Register tmp = a2;
+  Register tmp2 = a3;
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));  // Result (right) in v0.
+  __ pop(left);
+
+  Label done, fail, ok;
+  __ Branch(&ok, eq, left, Operand(right));
+  // Fail if either is a non-HeapObject.
+  __ And(tmp, left, Operand(right));
+  __ And(at, tmp, Operand(kSmiTagMask));
+  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+  __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ Branch(&fail, ne, tmp, Operand(tmp2));
+  __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
+  __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
+  __ Branch(&ok, eq, tmp, Operand(tmp2));
+  __ bind(&fail);
+  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  __ jmp(&done);
+  __ bind(&ok);
+  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(v0);
+  }
+
+  __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ IndexFromHash(v0, v0);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop,
+      empty_separator_loop, one_char_separator_loop,
+      one_char_separator_loop_entry, long_separator_loop;
+
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(0));
+
+  // All aliases of the same register have disjoint lifetimes.
+  Register array = v0;
+  Register elements = no_reg;  // Will be v0.
+  Register result = no_reg;  // Will be v0.
+  Register separator = a1;
+  Register array_length = a2;
+  Register result_pos = no_reg;  // Will be a2.
+  Register string_length = a3;
+  Register string = t0;
+  Register element = t1;
+  Register elements_end = t2;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t4;
+  Register scratch4 = v1;
+
+  // Separator operand is on the stack.
+  __ pop(separator);
+
+  // Check that the array is a JSArray.
+  __ JumpIfSmi(array, &bailout);
+  __ GetObjectType(array, scratch1, scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+  // Check that the array has fast elements.
+  __ CheckFastElements(scratch1, scratch2, &bailout);
+
+  // If the array has length zero, return the empty string.
+  __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+  __ SmiUntag(array_length);
+  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+  __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+  __ Branch(&done);
+
+  __ bind(&non_trivial_array);
+
+  // Get the FixedArray containing array's elements.
+  elements = array;
+  __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+  array = no_reg;  // End of array's live range.
+
+  // Check that all array elements are sequential ASCII strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ mov(string_length, zero_reg);
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  // Loop condition: while (element < elements_end).
+  // Live values in registers:
+  //   elements: Fixed array of strings.
+  //   array_length: Length of the fixed array of strings (not smi)
+  //   separator: Separator string
+  //   string_length: Accumulated sum of string lengths (smi).
+  //   element: Current array element.
+  //   elements_end: Array end.
+  if (FLAG_debug_code) {
+    __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+        array_length, Operand(zero_reg));
+  }
+  __ bind(&loop);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ JumpIfSmi(string, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ Branch(&loop, lt, element, Operand(elements_end));
+
+  // If array_length is 1, return elements[0], a string.
+  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+  __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+  __ Branch(&done);
+
+  __ bind(&not_size_one_array);
+
+  // Live values in registers:
+  //   separator: Separator string
+  //   array_length: Length of the array.
+  //   string_length: Sum of string lengths (smi).
+  //   elements: FixedArray of strings.
+
+  // Check that the separator is a flat ASCII string.
+  __ JumpIfSmi(separator, &bailout);
+  __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+  // Add (separator length times array_length) - separator length to the
+  // string_length to get the length of the result string. array_length is not
+  // smi but the other values are, so the result is a smi.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ Subu(string_length, string_length, Operand(scratch1));
+  __ Mult(array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ mfhi(scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+  __ mflo(scratch2);
+  __ And(scratch3, scratch2, Operand(0x80000000));
+  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ SmiUntag(string_length);
+
+  // Get first element in the array to free up the elements register to be used
+  // for the result.
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  result = elements;  // End of live range for elements.
+  elements = no_reg;
+  // Live values in registers:
+  //   element: First array element
+  //   separator: Separator string
+  //   string_length: Length of result string (not smi)
+  //   array_length: Length of the array.
+  __ AllocateAsciiString(result,
+                         string_length,
+                         scratch1,
+                         scratch2,
+                         elements_end,
+                         &bailout);
+  // Prepare for looping. Set up elements_end to end of the array. Set
+  // result_pos to the position of the result where to write the first
+  // character.
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  result_pos = array_length;  // End of live range for array_length.
+  array_length = no_reg;
+  __ Addu(result_pos,
+          result,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  // Check the length of the separator.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ li(at, Operand(Smi::FromInt(1)));
+  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+  __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+  // Empty separator case.
+  __ bind(&empty_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+
+  // Copy next array element to the result.
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // One-character separator case.
+  __ bind(&one_char_separator);
+  // Replace separator with its ascii character value.
+  __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator.
+  __ jmp(&one_char_separator_loop_entry);
+
+  __ bind(&one_char_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Single separator ascii char (in lower byte).
+
+  // Copy the separator character to the result.
+  __ sb(separator, MemOperand(result_pos));
+  __ Addu(result_pos, result_pos, 1);
+
+  // Copy next array element to the result.
+  __ bind(&one_char_separator_loop_entry);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // Long separator case (separator is more than one character). Entry is at the
+  // label long_separator below.
+  __ bind(&long_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Separator string.
+
+  // Copy the separator to the result.
+  __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string,
+          separator,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+
+  __ bind(&long_separator);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  __ bind(&bailout);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  UNIMPLEMENTED_MIPS();
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+
+  if (expr->is_jsruntime()) {
+    // Prepare for calling JS runtime function.
+    __ lw(a0, GlobalObjectOperand());
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
+    __ push(a0);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  if (expr->is_jsruntime()) {
+    // Call the JS runtime function.
+    __ li(a2, Operand(expr->name()));
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+    Handle<Code> ic =
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+    __ Call(ic, mode, expr->id());
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // Call the C runtime function.
+    __ CallRuntime(expr->function(), arg_count);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+        __ push(a1);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(v0);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
+        // Delete of an unqualified identifier is disallowed in strict mode
+        // but "delete this" is allowed.
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        if (var->IsUnallocated()) {
+          __ lw(a2, GlobalObjectOperand());
+          __ li(a1, Operand(var->name()));
+          __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+          __ Push(a2, a1, a0);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(var->is_this());
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          __ push(context_register());
+          __ li(a2, Operand(var->name()));
+          __ push(a2);
+          __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+          context()->Plug(v0);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else {
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ CallRuntime(Runtime::kTypeof, 1);
+      context()->Plug(v0);
+      break;
+    }
+
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForAccumulatorValue(expr->expression());
+      Label no_conversion;
+      __ JumpIfSmi(result_register(), &no_conversion);
+      __ mov(a0, result_register());
+      ToNumberStub convert_stub;
+      __ CallStub(&convert_stub);
+      __ bind(&no_conversion);
+      context()->Plug(result_register());
+      break;
+    }
+
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+      break;
+
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // GenericUnaryOpStub expects the argument to be in a0.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CountOperation");
+  SetSourcePosition(expr->position());
+
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  // Expression can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->expression()->AsProperty();
+  // In case of a property we use the uninitialized expression context
+  // of the key to detect a named property.
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ li(at, Operand(Smi::FromInt(0)));
+      __ push(at);
+    }
+    if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForAccumulatorValue(prop->obj());
+      __ push(v0);
+      EmitNamedPropertyLoad(prop);
+    } else {
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ lw(a1, MemOperand(sp, 0));
+      __ push(v0);
+      EmitKeyedPropertyLoad(prop);
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
+
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ JumpIfSmi(v0, &no_conversion);
+  __ mov(a0, v0);
+  ToNumberStub convert_stub;
+  __ CallStub(&convert_stub);
+  __ bind(&no_conversion);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(v0);
+          break;
+        case NAMED_PROPERTY:
+          __ sw(v0, MemOperand(sp, kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+          break;
+      }
+    }
+  }
+  __ mov(a0, result_register());
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  __ li(a1, Operand(Smi::FromInt(count_value)));
+
+  if (ShouldInlineSmiCase(expr->op())) {
+    __ AdduAndCheckForOverflow(v0, a0, a1, t0);
+    __ BranchOnOverflow(&stub_call, t0);  // Do stub on overflow.
+
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    patch_site.EmitJumpIfSmi(v0, &done);
+    __ bind(&stub_call);
+  }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
+  BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  patch_site.EmitPatchInfo();
+  __ bind(&done);
+
+  // Store the value returned in v0.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(v0);
+        }
+        // For all contexts except EffectConstant we have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(v0);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
+      __ pop(a1);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ pop(a1);  // Key.
+      __ pop(a2);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!context()->IsEffect());
+  ASSERT(!context()->IsTest());
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
+    Comment cmnt(masm_, "Global variable");
+    __ lw(a0, GlobalObjectOperand());
+    __ li(a2, Operand(proxy->name()));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    // Use a regular load, not a contextual load, to avoid a reference
+    // error.
+    __ Call(ic);
+    PrepareForBailout(expr, TOS_REG);
+    context()->Plug(v0);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    __ li(a0, Operand(proxy->name()));
+    __ Push(cp, a0);
+    __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
+    __ bind(&done);
+
+    context()->Plug(v0);
+  } else {
+    // This expression cannot throw a reference error at the top level.
+    VisitInCurrentContext(expr);
+  }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(expr);
+  }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  if (check->Equals(isolate()->heap()->number_symbol())) {
+    __ JumpIfSmi(v0, if_true);
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->string_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => false.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg),
+          if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (FLAG_harmony_typeof &&
+             check->Equals(isolate()->heap()->null_symbol())) {
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => true.
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->function_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
+    Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  } else if (check->Equals(isolate()->heap()->object_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    if (!FLAG_harmony_typeof) {
+      __ LoadRoot(at, Heap::kNullValueRootIndex);
+      __ Branch(if_true, eq, v0, Operand(at));
+    }
+    // Check for JS objects => true.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Check for undetectable objects => false.
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else {
+    if (if_false != fall_through) __ jmp(if_false);
+  }
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
-                                          Expression* left,
-                                          Expression* right,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  Split(eq, v0, Operand(at), if_true, if_false, fall_through);
 }
 
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetSourcePosition(expr->position());
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
+  Token::Value op = expr->op();
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+      Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForStackValue(expr->right());
+      InstanceofStub stub(InstanceofStub::kNoFlags);
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      // The stub returns 0 for true.
+      Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cc = eq;
+      switch (op) {
+        case Token::EQ_STRICT:
+        case Token::EQ:
+          cc = eq;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::LT:
+          cc = lt;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::GT:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = lt;
+          __ mov(a1, result_register());
+          __ pop(a0);
+         break;
+        case Token::LTE:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = ge;
+          __ mov(a1, result_register());
+          __ pop(a0);
+          break;
+        case Token::GTE:
+          cc = ge;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::IN:
+        case Token::INSTANCEOF:
+        default:
+          UNREACHABLE();
+      }
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ Or(a2, a0, Operand(a1));
+        patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+        Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      patch_site.EmitPatchInfo();
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareToNull");
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ mov(a0, result_register());
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  if (expr->is_strict()) {
+    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ And(at, a0, Operand(kSmiTagMask));
+    __ Branch(if_false, eq, at, Operand(zero_reg));
+    // It can be an undetectable object.
+    __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  context()->Plug(v0);
 }
 
 
 Register FullCodeGenerator::result_register() {
-  UNIMPLEMENTED_MIPS();
   return v0;
 }
 
 
 Register FullCodeGenerator::context_register() {
-  UNIMPLEMENTED_MIPS();
   return cp;
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ sw(value, MemOperand(fp, frame_offset));
 }
 
 
 void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(dst, ContextOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_global_scope()) {
+    // Contexts nested in the global context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ li(at, Operand(Smi::FromInt(0)));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+  } else {
+    ASSERT(declaration_scope->is_function_scope());
+    __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+  __ push(at);
 }
 
 
@@ -711,12 +4189,56 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta).
+  __ Subu(a1, ra, Operand(masm_->CodeObject()));
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(0 == kSmiTag);
+  __ Addu(a1, a1, Operand(a1));  // Convert to smi.
+  __ push(a1);
 }
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Restore result register from stack.
+  __ pop(a1);
+  // Uncook return address and return.
+  __ pop(result_register());
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ sra(a1, a1, 1);  // Un-smi-tag value.
+  __ Addu(at, a1, Operand(masm_->CodeObject()));
+  __ Jump(at);
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth,
+    int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ lw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ PopTryHandler();
+  __ Call(finally_entry_);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
 }
 
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index fa8a7bb..a76c215 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "code-stubs.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -48,52 +48,674 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register elements,
+                                                  Register scratch0,
+                                                  Register scratch1,
+                                                  Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   elements: holds the property dictionary on fall through.
+  // Scratch registers:
+  //   scratch0: used to holds the receiver map.
+  //   scratch1: used to holds the receiver instance type, receiver bit mask
+  //     and elements map.
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the receiver is a valid JS object.
+  __ GetObjectType(receiver, scratch0, scratch1);
+  __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // If this assert fails, we have to check upper bound too.
+  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+  // Check that the global object does not require access checks.
+  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                           (1 << Map::kHasNamedInterceptor)));
+  __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+  __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss,
+                                   Register elements,
+                                   Register name,
+                                   Register result,
+                                   Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at,
+         scratch1,
+         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  __ lw(result,
+        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ sw(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+                                         support_wrappers);
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register map,
+                                           Register scratch,
+                                           int interceptor_bit,
+                                           Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+                                  Register receiver,
+                                  Register key,
+                                  Register elements,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  Register result,
+                                  Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode (not dictionary).
+    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(not_fast_array, ne, scratch1, Operand(at));
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // Check that the key (index) is within bounds.
+  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+  // Fast case: Do the load.
+  __ Addu(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(at, at, scratch1);
+  __ lw(scratch2, MemOperand(at));
+
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ Branch(out_of_range, eq, scratch2, Operand(at));
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+                                   Register key,
+                                   Register map,
+                                   Register hash,
+                                   Label* index_string,
+                                   Label* not_symbol) {
+  // The key is not a smi.
+  // Is it a string?
+  __ GetObjectType(key, map, hash);
+  __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+  // Is the string an array index, with cached numeric value?
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+  __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(index_string, eq, at, Operand(zero_reg));
+
+  // Is the string a symbol?
+  // map: key map
+  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(at, hash, Operand(kIsSymbolMask));
+  __ Branch(not_symbol, eq, at, Operand(zero_reg));
 }
 
 
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a1    : receiver
+  //  -- a2    : name
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MONOMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ JumpIfSmi(a1, &number, t1);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+  __ Branch(&boolean, eq, a1, Operand(t0));
+  __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+  __ Branch(&miss, ne, a1, Operand(t1));
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  __ bind(&miss);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss,
+                                     Register scratch) {
+  // a1: function
+
+  // Check that the value isn't a smi.
+  __ JumpIfSmi(a1, miss);
+
+  // Check that the value is a JSFunction.
+  __ GetObjectType(a1, scratch, scratch);
+  __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+  // a0: elements
+  // Search the dictionary - put result in register a1.
+  GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+  GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+  } else {
+    __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+  }
+
+  // Get the receiver of the function from the stack.
+  __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ Push(a3, a2);
+
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to a1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ lw(a2, MemOperand(sp, argc * kPointerSize));
+    __ andi(t0, a2, kSmiTagMask);
+    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+    __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a2, MemOperand(sp, argc * kPointerSize));
+    __ bind(&invoke);
+  }
+  // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
 void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+  Label index_smi, index_string;
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a2, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+  GenerateFastArrayLoad(
+      masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+  __ bind(&do_call);
+  // receiver in a1 is not used after this point.
+  // a2: key
+  // a1: function
+
+  GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+  __ bind(&check_number_dictionary);
+  // a2: key
+  // a3: elements map
+  // t0: elements pointer
+  // Check whether the elements is a number dictionary.
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow_load, ne, a3, Operand(at));
+  __ sra(a0, a2, kSmiTagSize);
+  // a0: untagged index
+  __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
+  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&slow_load);
+  // This branch is taken when calling KeyedCallIC_Miss is neither required
+  // nor beneficial.
+  __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+  __ EnterInternalFrame();
+  __ push(a2);  // Save the key.
+  __ Push(a1, a2);  // Pass the receiver and the key.
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(a2);  // Restore the key.
+  __ LeaveInternalFrame();
+  __ mov(a1, v0);
+  __ jmp(&do_call);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+  // The key is known to be a symbol.
+  // If the receiver is a regular JS object with slow properties then do
+  // a quick inline probe of the receiver's dictionary.
+  // Otherwise do the monomorphic cache probe.
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+  __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+  GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&lookup_monomorphic_cache);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
+  // Fall through on miss.
+
+  __ bind(&slow_call);
+  // This branch is taken if:
+  // - the receiver requires boxing or access check,
+  // - the key is neither smi nor symbol,
+  // - the value loaded is not a function,
+  // - there is hope that the runtime will create a monomorphic call stub,
+  //   that will get fetched next time.
+  __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+  GenerateMiss(masm, argc);
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, a2);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
 }
 
 
 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Check if the name is a string.
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+  __ IsObjectJSStringType(a2, a0, &miss);
+
+  GenerateCallNormal(masm, argc);
+  __ bind(&miss);
+  GenerateMiss(masm, argc);
 }
 
 
@@ -101,122 +723,790 @@
 Object* LoadIC_Miss(Arguments args);
 
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a0, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+  // a1: elements
+  GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+  __ Ret();
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ mov(a3, a0);
+  __ Push(a3, a2);
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+                                                Register object,
+                                                Register key,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                Label* unmapped_case,
+                                                Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+  // Check that the key is a positive smi.
+  __ And(scratch1, key, Operand(0x8000001));
+  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ li(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, key, scratch3);
+  __ Addu(scratch3, scratch3, Operand(kOffset));
+
+  __ Addu(scratch2, scratch1, scratch3);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ li(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, scratch2, scratch3);
+  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  __ Addu(scratch2, scratch1, scratch3);
+  return MemOperand(scratch2);
 }
 
 
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+  __ li(scratch, Operand(kPointerSize >> 1));
+  __ mul(scratch, key, scratch);
+  __ Addu(scratch,
+          scratch,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Addu(scratch, backing_store, scratch);
+  return MemOperand(scratch);
 }
 
 
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow, notin;
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
+  __ lw(v0, mapped_location);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in a2.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
+  __ lw(a2, unmapped_location);
+  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+  __ Branch(&slow, eq, a2, Operand(a3));
+  __ mov(v0, a2);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+  Label slow, notin;
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
+  __ sw(a0, mapped_location);
+  // Verify mapped_location MemOperand is register, with no offset.
+  ASSERT_EQ(mapped_location.offset(), 0);
+  __ RecordWrite(a3, mapped_location.rm(), t5);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in a3.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
+  __ sw(a0, unmapped_location);
+  ASSERT_EQ(unmapped_location.offset(), 0);
+  __ RecordWrite(a3, unmapped_location.rm(), t5);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+                                             int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  Label slow, notin;
+  // Load receiver.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+  MemOperand mapped_location =
+      GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
+  __ lw(a1, mapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow, a3);
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in a3.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
+  __ lw(a1, unmapped_location);
+  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+  __ Branch(&slow, eq, a1, Operand(a3));
+  GenerateFunctionTailCall(masm, argc, &slow, a3);
+  __ bind(&slow);
+  GenerateMiss(masm, argc);
 }
 
 
 Object* KeyedLoadIC_Miss(Arguments args);
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ Push(a1, a0);
+
+  // Perform tail call to the entry.
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow, check_string, index_smi, index_string, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(a2, a3, &check_number_dictionary);
+
+  GenerateFastArrayLoad(
+      masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // a0: key
+  // a3: elements map
+  // t0: elements
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow, ne, a3, Operand(at));
+  __ sra(a2, a0, kSmiTagSize);
+  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
+  __ Ret();
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+                      1,
+                      a2,
+                      a3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+  GenerateKeyedLoadReceiverCheck(
+       masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+  __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ sra(at, t0, String::kHashShift);
+  __ xor_(a3, a3, at);
+  __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+  // Load the key (consisting of map and symbol) from the cache and
+  // check for match.
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+  __ li(t0, Operand(cache_keys));
+  __ sll(at, a3, kPointerSizeLog2 + 1);
+  __ addu(t0, t0, at);
+  __ lw(t1, MemOperand(t0));  // Move t0 to symbol.
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Branch(&slow, ne, a2, Operand(t1));
+  __ lw(t1, MemOperand(t0));
+  __ Branch(&slow, ne, a0, Operand(t1));
+
+  // Get field offset.
+  // a0     : key
+  // a1     : receiver
+  // a2     : receiver's map
+  // a3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+  __ li(t0, Operand(cache_field_offsets));
+  __ sll(at, a3, kPointerSizeLog2);
+  __ addu(at, t0, at);
+  __ lw(t1, MemOperand(at));
+  __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+  __ Subu(t1, t1, t2);
+  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+  // Load in-object property.
+  __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ addu(t2, t2, t1);  // Index from start of object.
+  __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ sll(at, t2, kPointerSizeLog2);
+  __ addu(at, a1, at);
+  __ lw(v0, MemOperand(at));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ sll(t0, t1, kPointerSizeLog2);
+  __ Addu(t0, t0, a1);
+  __ lw(v0, MemOperand(t0));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // a1: receiver
+  // a0: key
+  // a3: elements
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+  // Load the property to v0.
+  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, key);
+  // Now jump to the place where smi keys are handled.
+  __ Branch(&index_smi);
 }
 
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key (index)
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss;
+
+  Register receiver = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm, false);
 }
 
 
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+  __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
                                    StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, fast, array, extra, exit;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  Register elements = a3;  // Elements array of the receiver.
+  // t0 is used as ip in the arm version.
+  // t3-t4 are used as temporaries.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+
+  // Get the map of the object.
+  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+  // Check if the object is a JS array or not.
+  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+  // Check that the object is some kind of JSObject.
+  __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+  __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+
+  // Object case: Check key against length in the elements array.
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&fast, lo, key, Operand(t0));
+  // Fall thru to slow if un-tagged index >= length.
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+
+  // Entry registers are intact.
+  // a0: value.
+  // a1: key.
+  // a2: receiver.
+
+  GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+
+  __ bind(&extra);
+  // Only support writing to array[array.length].
+  __ Branch(&slow, ne, key, Operand(t0));
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&slow, hs, key, Operand(t0));
+  // Calculate key + 1 as smi.
+  STATIC_ASSERT(0 == kSmiTag);
+  __ Addu(t3, key, Operand(Smi::FromInt(1)));
+  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+
+  __ bind(&array);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+
+  // Check the key against the length in the array.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&extra, hs, key, Operand(t0));
+  // Fall through to fast case.
+
+  __ bind(&fast);
+  // Fast case, store the value to the elements backing store.
+  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t4, t4, Operand(t1));
+  __ sw(value, MemOperand(t4));
+  // Skip write barrier if the written value is a smi.
+  __ JumpIfSmi(value, &exit);
+
+  // Update write barrier for the elements array address.
+  __ Subu(t3, t4, Operand(elements));
+
+  __ RecordWrite(elements, Operand(t3), t4, t5);
+  __ bind(&exit);
+
+  __ mov(v0, a0);  // Return the value written.
+  __ Ret();
 }
 
 
 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &slow);
+
+  // Check that the key is an array index, that is Uint32.
+  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+  // Get the map of the receiver.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+  // Everything is fine, call runtime.
+  __ Push(a1, a0);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(ExternalReference(
+       IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  // We can't use MultiPush as the order of the registers is important.
+  __ Push(a2, a1, a0);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = a1;
+  Register value = a0;
+  Register scratch = a3;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Check that elements are FixedArray.
+  // We rely on StoreIC_ArrayLength below to deal with all types of
+  // fast elements (including COW).
+  __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+  __ GetObjectType(scratch, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+  // Check that value is a smi.
+  __ JumpIfNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ Push(receiver, value);
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+
+  __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(a1, a0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
@@ -224,18 +1514,119 @@
 
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
-  UNIMPLEMENTED_MIPS();
-  return kNoCondition;
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
 }
 
 
 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
-  UNIMPLEMENTED_MIPS();
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+  State state = TargetState(previous_state, false, x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+
+  // Activate inlined smi code.
+  if (previous_state == UNINITIALIZED) {
+    PatchInlinedSmiCode(address());
+  }
 }
 
 
 void PatchInlinedSmiCode(Address address) {
-  // Currently there is no smi inlining in the MIPS full code generator.
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  if (!Assembler::IsAndImmediate(instr)) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetImmediate16(instr);
+  delta += Assembler::GetRs(instr) * kImm16Mask;
+  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+  // signals that nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
+           address, andi_instruction_address, delta);
+  }
+#endif
+
+  Address patch_address =
+      andi_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+  ASSERT(Assembler::IsBranch(branch_instr));
+  if (Assembler::IsBeq(branch_instr)) {
+    // This is patching a "jump if not smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(ne);
+  } else {
+    ASSERT(Assembler::IsBne(branch_instr));
+    // This is patching a "jump if smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(eq);
+  }
 }
 
 
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
deleted file mode 100644
index bd6d60b..0000000
--- a/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
-    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
-    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void JumpTarget::DoJump() {
-  UNIMPLEMENTED_MIPS();
-}
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void JumpTarget::DoBranch(Condition cc, Hint ignored,
-//     Register src1, const Operand& src2) {
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-#undef BRANCH_ARGS_CHECK
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 345d912..2aec684 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index e11dfab..ebc1e43 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -78,7 +78,7 @@
 
   bool HasEnvironment() const {
     UNIMPLEMENTED();
-    return NULL;
+    return false;
   }
 
   virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
@@ -213,15 +213,13 @@
 
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+  explicit LChunk(HGraph* graph) { }
 
   HGraph* graph() const {
     UNIMPLEMENTED();
     return NULL;
   }
 
-  CompilationInfo* info() const { return NULL; }
-
   const ZoneList<LPointerMap*>* pointer_maps() const {
     UNIMPLEMENTED();
     return NULL;
@@ -271,6 +269,11 @@
 
   void MarkEmptyBlocks() { UNIMPLEMENTED(); }
 
+  CompilationInfo* info() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
 #ifdef DEBUG
   void Verify() { UNIMPLEMENTED(); }
 #endif
@@ -279,7 +282,7 @@
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
 
   // Build the sequence for the graph.
   LChunk* Build() {
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index bd4ab48..1c0af5d 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -25,106 +25,28 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <limits.h>  // For LONG_MIN, LONG_MAX
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
 
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(HEAP->undefined_value()) {
-}
-
-
-// Arguments macros
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
-#define REGISTER_TARGET_BODY(Name) \
-void MacroAssembler::Name(Register target, \
-                          BranchDelaySlot bd) { \
-  Name(Operand(target), bd); \
-} \
-void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
-                          BranchDelaySlot bd) { \
-  Name(Operand(target), COND_ARGS, bd); \
-}
-
-
-#define INT_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
-                          BranchDelaySlot bd) { \
-  Name(Operand(target, rmode), bd); \
-} \
-void MacroAssembler::Name(intptr_t target, \
-                          RelocInfo::Mode rmode, \
-                          COND_TYPED_ARGS, \
-                          BranchDelaySlot bd) { \
-  Name(Operand(target, rmode), COND_ARGS, bd); \
-}
-
-
-#define BYTE_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
-                          BranchDelaySlot bd) { \
-  Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
-} \
-void MacroAssembler::Name(byte* target, \
-                          RelocInfo::Mode rmode, \
-                          COND_TYPED_ARGS, \
-                          BranchDelaySlot bd) { \
-  Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
-}
-
-
-#define CODE_TARGET_BODY(Name) \
-void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
-                          BranchDelaySlot bd) { \
-  Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
-} \
-void MacroAssembler::Name(Handle<Code> target, \
-                          RelocInfo::Mode rmode, \
-                          COND_TYPED_ARGS, \
-                          BranchDelaySlot bd) { \
-  Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
-}
-
-
-REGISTER_TARGET_BODY(Jump)
-REGISTER_TARGET_BODY(Call)
-INT_PTR_TARGET_BODY(Jump)
-INT_PTR_TARGET_BODY(Call)
-BYTE_PTR_TARGET_BODY(Jump)
-BYTE_PTR_TARGET_BODY(Call)
-CODE_TARGET_BODY(Jump)
-CODE_TARGET_BODY(Call)
-
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-#undef REGISTER_TARGET_BODY
-#undef BYTE_PTR_TARGET_BODY
-#undef CODE_TARGET_BODY
-
-
-void MacroAssembler::Ret(BranchDelaySlot bd) {
-  Jump(Operand(ra), bd);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
-    BranchDelaySlot bd) {
-  Jump(Operand(ra), cond, r1, r2, bd);
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -161,7 +83,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register address,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     Label not_in_new_space;
     InNewSpace(object, scratch, ne, &not_in_new_space);
@@ -191,6 +113,82 @@
 }
 
 
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ASSERT(num_unsaved >= 0);
+  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+  PushSafepointRegisters();
+  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    sdc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    ldc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  PopSafepointRegisters();
+}
+
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+                                                             Register dst) {
+  sw(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  sw(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  lw(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+
+
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cc,
@@ -230,7 +228,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -262,7 +260,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(address, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -271,7 +269,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Allocation support
+// Allocation support.
 
 
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
@@ -297,15 +295,15 @@
   lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     // Read the first word and compare to the global_context_map.
     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kGlobalContextMapRootIndex);
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
   }
 
   // Check if both contexts are the same.
@@ -313,9 +311,9 @@
   Branch(&same_contexts, eq, scratch, Operand(at));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     mov(holder_reg, at);  // Move at to its holding place.
     LoadRoot(at, Heap::kNullValueRootIndex);
     Check(ne, "JSGlobalProxy::context() should not be null.",
@@ -326,7 +324,7 @@
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
     // Restore at is not needed. at is reloaded below.
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
     // Restore at to holder's context.
     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
   }
@@ -345,8 +343,128 @@
 }
 
 
+void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiUntag(scratch);
+
+  // Xor original key with a seed.
+  xor_(reg0, reg0, scratch);
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  nor(scratch, reg0, zero_reg);
+  sll(at, reg0, 15);
+  addu(reg0, scratch, at);
+
+  // hash = hash ^ (hash >> 12);
+  srl(at, reg0, 12);
+  xor_(reg0, reg0, at);
+
+  // hash = hash + (hash << 2);
+  sll(at, reg0, 2);
+  addu(reg0, reg0, at);
+
+  // hash = hash ^ (hash >> 4);
+  srl(at, reg0, 4);
+  xor_(reg0, reg0, at);
+
+  // hash = hash * 2057;
+  li(scratch, Operand(2057));
+  mul(reg0, reg0, scratch);
+
+  // hash = hash ^ (hash >> 16);
+  srl(at, reg0, 16);
+  xor_(reg0, reg0, at);
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+                                              Register elements,
+                                              Register key,
+                                              Register result,
+                                              Register reg0,
+                                              Register reg1,
+                                              Register reg2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // reg0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // reg1 - Used to hold the capacity mask of the dictionary.
+  //
+  // reg2 - Used for the index into the dictionary.
+  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
+  Label done;
+
+  GetNumberHash(reg0, reg1);
+
+  // Compute the capacity mask.
+  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  sra(reg1, reg1, kSmiTagSize);
+  Subu(reg1, reg1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use reg2 for index calculations and keep the hash intact in reg0.
+    mov(reg2, reg0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(reg2, reg2, reg1);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    sll(at, reg2, 1);  // 2x.
+    addu(reg2, reg2, at);  // reg2 = reg2 * 3.
+
+    // Check if the key is identical to the name.
+    sll(at, reg2, kPointerSizeLog2);
+    addu(reg2, elements, at);
+
+    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
+    if (i != kProbes - 1) {
+      Branch(&done, eq, key, Operand(at));
+    } else {
+      Branch(miss, ne, key, Operand(at));
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a normal property.
+  // reg2: elements + (index * kPointerSize).
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+  Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
 // ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
 
 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
@@ -500,6 +618,15 @@
 }
 
 
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+  ASSERT(rt.is_reg());
+  ASSERT(!at.is(rs));
+  ASSERT(!at.is(rt.rm()));
+  li(at, -1);
+  xor_(rs, rt.rm(), at);
+}
+
+
 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     slt(rd, rs, rt.rm());
@@ -581,78 +708,121 @@
     }
     // We need always the same number of instructions as we may need to patch
     // this code to load another value which may need 2 instructions to load.
-    if (is_int16(j.imm32_)) {
-      nop();
-      addiu(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kHiMask)) {
-      nop();
-      ori(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kImm16Mask)) {
-      nop();
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-    } else {
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-      ori(rd, rd, (j.imm32_ & kImm16Mask));
-    }
+    lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+    ori(rd, rd, (j.imm32_ & kImm16Mask));
   }
 }
 
 
-// Exception-generating instructions and debugging support
-void MacroAssembler::stop(const char* msg) {
-  // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
-  // We use the 0x54321 value to be able to find it easily when reading memory.
-  break_(0x54321);
-}
-
-
 void MacroAssembler::MultiPush(RegList regs) {
-  int16_t NumSaved = 0;
-  int16_t NumToPush = NumberOfBitsSet(regs);
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
 
-  addiu(sp, sp, -4 * NumToPush);
+  Subu(sp, sp, Operand(stack_offset));
   for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
-      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+      stack_offset -= kPointerSize;
+      sw(ToRegister(i), MemOperand(sp, stack_offset));
     }
   }
 }
 
 
 void MacroAssembler::MultiPushReversed(RegList regs) {
-  int16_t NumSaved = 0;
-  int16_t NumToPush = NumberOfBitsSet(regs);
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
 
-  addiu(sp, sp, -4 * NumToPush);
+  Subu(sp, sp, Operand(stack_offset));
   for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
-      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+      stack_offset -= kPointerSize;
+      sw(ToRegister(i), MemOperand(sp, stack_offset));
     }
   }
 }
 
 
 void MacroAssembler::MultiPop(RegList regs) {
-  int16_t NumSaved = 0;
+  int16_t stack_offset = 0;
 
   for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
-      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+      lw(ToRegister(i), MemOperand(sp, stack_offset));
+      stack_offset += kPointerSize;
     }
   }
-  addiu(sp, sp, 4 * NumSaved);
+  addiu(sp, sp, stack_offset);
 }
 
 
 void MacroAssembler::MultiPopReversed(RegList regs) {
-  int16_t NumSaved = 0;
+  int16_t stack_offset = 0;
 
   for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
-      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+      lw(ToRegister(i), MemOperand(sp, stack_offset));
+      stack_offset += kPointerSize;
     }
   }
-  addiu(sp, sp, 4 * NumSaved);
+  addiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPushFPU(RegList regs) {
+  CpuFeatures::Scope scope(FPU);
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kDoubleSize;
+
+  Subu(sp, sp, Operand(stack_offset));
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kDoubleSize;
+      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPushReversedFPU(RegList regs) {
+  CpuFeatures::Scope scope(FPU);
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kDoubleSize;
+
+  Subu(sp, sp, Operand(stack_offset));
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kDoubleSize;
+      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPopFPU(RegList regs) {
+  CpuFeatures::Scope scope(FPU);
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+      stack_offset += kDoubleSize;
+    }
+  }
+  addiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPopReversedFPU(RegList regs) {
+  CpuFeatures::Scope scope(FPU);
+  int16_t stack_offset = 0;
+
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+      stack_offset += kDoubleSize;
+    }
+  }
+  addiu(sp, sp, stack_offset);
 }
 
 
@@ -661,15 +831,20 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size < 32);
+  ASSERT(pos + size < 33);
 
   if (mips32r2) {
     ext_(rt, rs, pos, size);
   } else {
     // Move rs to rt and shift it left then right to get the
     // desired bitfield on the right side and zeroes on the left.
-    sll(rt, rs, 32 - (pos + size));
-    srl(rt, rt, 32 - size);
+    int shift_left = 32 - (pos + size);
+    sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
+
+    int shift_right = 32 - size;
+    if (shift_right > 0) {
+      srl(rt, rt, shift_right);
+    }
   }
 }
 
@@ -711,28 +886,32 @@
 }
 
 
-void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
-  // Move the data from fs to t4.
-  mfc1(t4, fs);
-  return Cvt_d_uw(fd, t4);
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              FPURegister fs,
+                              FPURegister scratch) {
+  // Move the data from fs to t8.
+  mfc1(t8, fs);
+  Cvt_d_uw(fd, t8, scratch);
 }
 
 
-void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              Register rs,
+                              FPURegister scratch) {
   // Convert rs to a FP value in fd (and fd + 1).
   // We do this by converting rs minus the MSB to avoid sign conversion,
-  // then adding 2^31-1 and 1 to the result.
+  // then adding 2^31 to the result (if needed).
 
-  ASSERT(!fd.is(f20));
+  ASSERT(!fd.is(scratch));
   ASSERT(!rs.is(t9));
-  ASSERT(!rs.is(t8));
+  ASSERT(!rs.is(at));
 
-  // Save rs's MSB to t8
-  And(t8, rs, 0x80000000);
+  // Save rs's MSB to t9.
+  Ext(t9, rs, 31, 1);
   // Remove rs's MSB.
-  And(t9, rs, 0x7FFFFFFF);
-  // Move t9 to fd
-  mtc1(t9, fd);
+  Ext(at, rs, 0, 31);
+  // Move the result to fd.
+  mtc1(at, fd);
 
   // Convert fd to a real FP value.
   cvt_d_w(fd, fd);
@@ -741,41 +920,39 @@
 
   // If rs's MSB was 0, it's done.
   // Otherwise we need to add that to the FP register.
-  Branch(&conversion_done, eq, t8, Operand(zero_reg));
+  Branch(&conversion_done, eq, t9, Operand(zero_reg));
 
-  // First load 2^31 - 1 into f20.
-  Or(t9, zero_reg, 0x7FFFFFFF);
-  mtc1(t9, f20);
+  // Load 2^31 into f20 as its float representation.
+  li(at, 0x41E00000);
+  mtc1(at, FPURegister::from_code(scratch.code() + 1));
+  mtc1(zero_reg, scratch);
+  // Add it to fd.
+  add_d(fd, fd, scratch);
 
-  // Convert it to FP and add it to fd.
-  cvt_d_w(f20, f20);
-  add_d(fd, fd, f20);
-  // Now add 1.
-  Or(t9, zero_reg, 1);
-  mtc1(t9, f20);
-
-  cvt_d_w(f20, f20);
-  add_d(fd, fd, f20);
   bind(&conversion_done);
 }
 
 
-void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
-  Trunc_uw_d(fs, t4);
-  mtc1(t4, fd);
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                FPURegister fs,
+                                FPURegister scratch) {
+  Trunc_uw_d(fs, t8, scratch);
+  mtc1(t8, fd);
 }
 
 
-void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
-  ASSERT(!fd.is(f22));
-  ASSERT(!rs.is(t6));
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                Register rs,
+                                FPURegister scratch) {
+  ASSERT(!fd.is(scratch));
+  ASSERT(!rs.is(at));
 
-  // Load 2^31 into f22.
-  Or(t6, zero_reg, 0x80000000);
-  Cvt_d_uw(f22, t6);
-
-  // Test if f22 > fd.
-  c(OLT, D, fd, f22);
+  // Load 2^31 into scratch as its float representation.
+  li(at, 0x41E00000);
+  mtc1(at, FPURegister::from_code(scratch.code() + 1));
+  mtc1(zero_reg, scratch);
+  // Test if scratch > fd.
+  c(OLT, D, fd, scratch);
 
   Label simple_convert;
   // If fd < 2^31 we can convert it normally.
@@ -783,18 +960,17 @@
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
-
-  sub_d(f22, fd, f22);
-  trunc_w_d(f22, f22);
-  mfc1(rs, f22);
-  or_(rs, rs, t6);
+  sub_d(scratch, fd, scratch);
+  trunc_w_d(scratch, scratch);
+  mfc1(rs, scratch);
+  Or(rs, rs, 1 << 31);
 
   Label done;
   Branch(&done);
   // Simple conversion.
   bind(&simple_convert);
-  trunc_w_d(f22, fd);
-  mfc1(rs, f22);
+  trunc_w_d(scratch, fd);
+  mfc1(rs, scratch);
 
   bind(&done);
 }
@@ -839,7 +1015,7 @@
   Subu(scratch2, scratch2, Operand(zero_exponent));
   // Dest already has a Smi zero.
   Branch(&done, lt, scratch2, Operand(zero_reg));
-  if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (!CpuFeatures::IsSupported(FPU)) {
     // We have a shifted exponent between 0 and 30 in scratch2.
     srl(dest, scratch2, HeapNumber::kExponentShift);
     // We now have the exponent in dest.  Subtract from 30 to get
@@ -848,7 +1024,7 @@
     subu(dest, at, dest);
   }
   bind(&right_exponent);
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // MIPS FPU instructions implementing double precision to integer
     // conversion using round to zero. Since the FP value was qualified
@@ -898,6 +1074,147 @@
 }
 
 
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+                                                 Register input_high,
+                                                 Register input_low,
+                                                 Register scratch) {
+  Label done, normal_exponent, restore_sign;
+  // Extract the biased exponent in result.
+  Ext(result,
+      input_high,
+      HeapNumber::kExponentShift,
+      HeapNumber::kExponentBits);
+
+  // Check for Infinity and NaNs, which should return 0.
+  Subu(scratch, result, HeapNumber::kExponentMask);
+  movz(result, zero_reg, scratch);
+  Branch(&done, eq, scratch, Operand(zero_reg));
+
+  // Express exponent as delta to (number of mantissa bits + 31).
+  Subu(result,
+       result,
+       Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+  // If the delta is strictly positive, all bits would be shifted away,
+  // which means that we can return 0.
+  Branch(&normal_exponent, le, result, Operand(zero_reg));
+  mov(result, zero_reg);
+  Branch(&done);
+
+  bind(&normal_exponent);
+  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+  // Calculate shift.
+  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+  // Save the sign.
+  Register sign = result;
+  result = no_reg;
+  And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+  // to check for this specific case.
+  Label high_shift_needed, high_shift_done;
+  Branch(&high_shift_needed, lt, scratch, Operand(32));
+  mov(input_high, zero_reg);
+  Branch(&high_shift_done);
+  bind(&high_shift_needed);
+
+  // Set the implicit 1 before the mantissa part in input_high.
+  Or(input_high,
+     input_high,
+     Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+  // Shift the mantissa bits to the correct position.
+  // We don't need to clear non-mantissa bits as they will be shifted away.
+  // If they weren't, it would mean that the answer is in the 32bit range.
+  sllv(input_high, input_high, scratch);
+
+  bind(&high_shift_done);
+
+  // Replace the shifted bits with bits from the lower mantissa word.
+  Label pos_shift, shift_done;
+  li(at, 32);
+  subu(scratch, at, scratch);
+  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+  // Negate scratch.
+  Subu(scratch, zero_reg, scratch);
+  sllv(input_low, input_low, scratch);
+  Branch(&shift_done);
+
+  bind(&pos_shift);
+  srlv(input_low, input_low, scratch);
+
+  bind(&shift_done);
+  Or(input_high, input_high, Operand(input_low));
+  // Restore sign if necessary.
+  mov(scratch, sign);
+  result = sign;
+  sign = no_reg;
+  Subu(result, zero_reg, input_high);
+  movz(result, input_high, scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+                                      FPURegister double_input,
+                                      FPURegister single_scratch,
+                                      Register scratch,
+                                      Register input_high,
+                                      Register input_low) {
+  CpuFeatures::Scope scope(FPU);
+  ASSERT(!input_high.is(result));
+  ASSERT(!input_low.is(result));
+  ASSERT(!input_low.is(input_high));
+  ASSERT(!scratch.is(result) &&
+         !scratch.is(input_high) &&
+         !scratch.is(input_low));
+  ASSERT(!single_scratch.is(double_input));
+
+  Label done;
+  Label manual;
+
+  // Clear cumulative exception flags and save the FCSR.
+  Register scratch2 = input_high;
+  cfc1(scratch2, FCSR);
+  ctc1(zero_reg, FCSR);
+  // Try a conversion to a signed integer.
+  trunc_w_d(single_scratch, double_input);
+  mfc1(result, single_scratch);
+  // Retrieve and restore the FCSR.
+  cfc1(scratch, FCSR);
+  ctc1(scratch2, FCSR);
+  // Check for overflow and NaNs.
+  And(scratch,
+      scratch,
+      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+  // If we had no exceptions we are done.
+  Branch(&done, eq, scratch, Operand(zero_reg));
+
+  // Load the double value and perform a manual truncation.
+  Move(input_low, input_high, double_input);
+  EmitOutOfInt32RangeTruncate(result,
+                              input_high,
+                              input_low,
+                              scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+                                           Register src,
+                                           int num_least_bits) {
+  And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
 // Emulated condtional branches do not emit a nop in the branch delay slot.
 //
 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
@@ -906,7 +1223,54 @@
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
 
+bool MacroAssembler::UseAbsoluteCodePointers() {
+  if (is_trampoline_emitted()) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+  BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Jr(L, bdslot);
+  } else {
+    BranchShort(L, bdslot);
+  }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Label skip;
+    Condition neg_cond = NegateCondition(cond);
+    BranchShort(&skip, neg_cond, rs, rt);
+    Jr(L, bdslot);
+    bind(&skip);
+  } else {
+    BranchShort(L, cond, rs, rt, bdslot);
+  }
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
   b(offset);
 
   // Emit a nop in the branch delay slot if required.
@@ -915,9 +1279,9 @@
 }
 
 
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
-                            const Operand& rt,
-                            BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+                                 const Operand& rt,
+                                 BranchDelaySlot bdslot) {
   BRANCH_ARGS_CHECK(cond, rs, rt);
   ASSERT(!rs.is(zero_reg));
   Register r2 = no_reg;
@@ -937,7 +1301,7 @@
       case ne:
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           bgtz(rs, offset);
@@ -989,7 +1353,8 @@
         break;
       case Uless:
         if (r2.is(zero_reg)) {
-          b(offset);
+          // No code needs to be emitted.
+          return;
         } else {
           sltu(scratch, rs, r2);
           bne(scratch, zero_reg, offset);
@@ -1028,7 +1393,7 @@
         li(r2, rt);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           bgtz(rs, offset);
@@ -1048,7 +1413,7 @@
         } else {
           r2 = scratch;
           li(r2, rt);
-          sltu(scratch, rs, r2);
+          slt(scratch, rs, r2);
           beq(scratch, zero_reg, offset);
         }
         break;
@@ -1101,7 +1466,8 @@
         break;
       case Uless:
         if (rt.imm32_ == 0) {
-          b(offset);
+          // No code needs to be emitted.
+          return;
         } else if (is_int16(rt.imm32_)) {
           sltiu(scratch, rs, rt.imm32_);
           bne(scratch, zero_reg, offset);
@@ -1132,7 +1498,7 @@
 }
 
 
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
   // We use branch_offset as an argument for the branch instructions to be sure
   // it is called just before generating the branch instruction, as needed.
 
@@ -1144,9 +1510,9 @@
 }
 
 
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
-                            const Operand& rt,
-                            BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+                                 const Operand& rt,
+                                 BranchDelaySlot bdslot) {
   BRANCH_ARGS_CHECK(cond, rs, rt);
 
   int32_t offset;
@@ -1170,7 +1536,7 @@
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           offset = shifted_branch_offset(L, false);
@@ -1234,8 +1600,8 @@
         break;
       case Uless:
         if (r2.is(zero_reg)) {
-          offset = shifted_branch_offset(L, false);
-          b(offset);
+          // No code needs to be emitted.
+          return;
         } else {
           sltu(scratch, rs, r2);
           offset = shifted_branch_offset(L, false);
@@ -1265,23 +1631,26 @@
         b(offset);
         break;
       case eq:
+        ASSERT(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         offset = shifted_branch_offset(L, false);
         beq(rs, r2, offset);
         break;
       case ne:
+        ASSERT(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           offset = shifted_branch_offset(L, false);
           bgtz(rs, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, r2, rs);
@@ -1298,9 +1667,10 @@
           offset = shifted_branch_offset(L, false);
           beq(scratch, zero_reg, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
-          sltu(scratch, rs, r2);
+          slt(scratch, rs, r2);
           offset = shifted_branch_offset(L, false);
           beq(scratch, zero_reg, offset);
         }
@@ -1314,6 +1684,7 @@
           offset = shifted_branch_offset(L, false);
           bne(scratch, zero_reg, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, rs, r2);
@@ -1326,6 +1697,7 @@
           offset = shifted_branch_offset(L, false);
           blez(rs, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, r2, rs);
@@ -1339,6 +1711,7 @@
           offset = shifted_branch_offset(L, false);
           bgtz(rs, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, r2, rs);
@@ -1355,6 +1728,7 @@
           offset = shifted_branch_offset(L, false);
           beq(scratch, zero_reg, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, rs, r2);
@@ -1364,13 +1738,14 @@
         break;
      case Uless:
         if (rt.imm32_ == 0) {
-          offset = shifted_branch_offset(L, false);
-          b(offset);
+          // No code needs to be emitted.
+          return;
         } else if (is_int16(rt.imm32_)) {
           sltiu(scratch, rs, rt.imm32_);
           offset = shifted_branch_offset(L, false);
           bne(scratch, zero_reg, offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, rs, r2);
@@ -1383,6 +1758,7 @@
           offset = shifted_branch_offset(L, false);
           b(offset);
         } else {
+          ASSERT(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, r2, rs);
@@ -1402,11 +1778,49 @@
 }
 
 
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+  BranchAndLinkShort(offset, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Jalr(L, bdslot);
+  } else {
+    BranchAndLinkShort(L, bdslot);
+  }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Label skip;
+    Condition neg_cond = NegateCondition(cond);
+    BranchShort(&skip, neg_cond, rs, rt);
+    Jalr(L, bdslot);
+    bind(&skip);
+  } else {
+    BranchAndLinkShort(L, cond, rs, rt, bdslot);
+  }
+}
+
+
 // We need to use a bgezal or bltzal, but they can't be used directly with the
 // slt instructions. We could use sub or add instead but we would miss overflow
 // cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(int16_t offset,
-                                   BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+                                        BranchDelaySlot bdslot) {
   bal(offset);
 
   // Emit a nop in the branch delay slot if required.
@@ -1415,9 +1829,9 @@
 }
 
 
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
-                                   const Operand& rt,
-                                   BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+                                        Register rs, const Operand& rt,
+                                        BranchDelaySlot bdslot) {
   BRANCH_ARGS_CHECK(cond, rs, rt);
   Register r2 = no_reg;
   Register scratch = at;
@@ -1444,7 +1858,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1497,7 +1911,7 @@
 }
 
 
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
   bal(shifted_branch_offset(L, false));
 
   // Emit a nop in the branch delay slot if required.
@@ -1506,9 +1920,9 @@
 }
 
 
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
-                                   const Operand& rt,
-                                   BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+                                        const Operand& rt,
+                                        BranchDelaySlot bdslot) {
   BRANCH_ARGS_CHECK(cond, rs, rt);
 
   int32_t offset;
@@ -1539,7 +1953,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1604,137 +2018,230 @@
 }
 
 
-void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (target.is_reg()) {
-      jr(target.rm());
-  } else {
-    if (!MustUseReg(target.rmode_)) {
-        j(target.imm32_);
-    } else {
-      li(t9, target);
-      jr(t9);
-    }
-  }
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
-void MacroAssembler::Jump(const Operand& target,
-                          Condition cond, Register rs, const Operand& rt,
-                          BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-  BRANCH_ARGS_CHECK(cond, rs, rt);
-  if (target.is_reg()) {
-    if (cond == cc_always) {
-      jr(target.rm());
-    } else {
-      Branch(2, NegateCondition(cond), rs, rt);
-      jr(target.rm());
-    }
-  } else {  // Not register target.
-    if (!MustUseReg(target.rmode_)) {
-      if (cond == cc_always) {
-        j(target.imm32_);
-      } else {
-        Branch(2, NegateCondition(cond), rs, rt);
-        j(target.imm32_);  // Will generate only one instruction.
-      }
-    } else {  // MustUseReg(target)
-      li(t9, target);
-      if (cond == cc_always) {
-        jr(t9);
-      } else {
-        Branch(2, NegateCondition(cond), rs, rt);
-        jr(t9);  // Will generate only one instruction.
-      }
-    }
-  }
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (target.is_reg()) {
-      jalr(target.rm());
-  } else {    // !target.is_reg()
-    if (!MustUseReg(target.rmode_)) {
-      jal(target.imm32_);
-    } else {  // MustUseReg(target)
-      li(t9, target);
-      jalr(t9);
-    }
-  }
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target,
-                          Condition cond, Register rs, const Operand& rt,
-                          BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-  BRANCH_ARGS_CHECK(cond, rs, rt);
-  if (target.is_reg()) {
-    if (cond == cc_always) {
-      jalr(target.rm());
-    } else {
-      Branch(2, NegateCondition(cond), rs, rt);
-      jalr(target.rm());
-    }
-  } else {    // !target.is_reg()
-    if (!MustUseReg(target.rmode_)) {
-      if (cond == cc_always) {
-        jal(target.imm32_);
-      } else {
-        Branch(2, NegateCondition(cond), rs, rt);
-        jal(target.imm32_);  // Will generate only one instruction.
-      }
-    } else {  // MustUseReg(target)
-      li(t9, target);
-      if (cond == cc_always) {
-        jalr(t9);
-      } else {
-        Branch(2, NegateCondition(cond), rs, rt);
-        jalr(t9);  // Will generate only one instruction.
-      }
-    }
-  }
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
-void MacroAssembler::Drop(int count,
+void MacroAssembler::Jump(Register target,
                           Condition cond,
-                          Register reg,
-                          const Operand& op) {
-  if (count <= 0) {
-    return;
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (cond == cc_always) {
+    jr(target);
+  } else {
+    BRANCH_ARGS_CHECK(cond, rs, rt);
+    Branch(2, NegateCondition(cond), rs, rt);
+    jr(target);
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bd == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jump(intptr_t target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  li(t9, Operand(target, rmode));
+  Jump(t9, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Address target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+}
+
+
+int MacroAssembler::CallSize(Register target,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  int size = 0;
+
+  if (cond == cc_always) {
+    size += 1;
+  } else {
+    size += 3;
   }
 
-  Label skip;
+  if (bd == PROTECT)
+    size += 1;
 
-  if (cond != al) {
-    Branch(&skip, NegateCondition(cond), reg, op);
-  }
+  return size * kInstrSize;
+}
 
-  if (count > 0) {
-    addiu(sp, sp, count * kPointerSize);
-  }
 
-  if (cond != al) {
-    bind(&skip);
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  if (cond == cc_always) {
+    jalr(target);
+  } else {
+    BRANCH_ARGS_CHECK(cond, rs, rt);
+    Branch(2, NegateCondition(cond), rs, rt);
+    jalr(target);
   }
+  // Emit a nop in the branch delay slot if required.
+  if (bd == PROTECT)
+    nop();
+
+  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Address target,
+                             RelocInfo::Mode rmode,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  int size = CallSize(t9, cond, rs, rt, bd);
+  return size + 2 * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  int32_t target_int = reinterpret_cast<int32_t>(target);
+  // Must record previous source positions before the
+  // li() generates a new code target.
+  positions_recorder()->WriteRecordedPositions();
+  li(t9, Operand(target_int, rmode), true);
+  Call(t9, cond, rs, rt, bd);
+  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+                             RelocInfo::Mode rmode,
+                             unsigned ast_id,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  return CallSize(reinterpret_cast<Address>(code.location()),
+      rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          unsigned ast_id,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+    SetRecordedAstId(ast_id);
+    rmode = RelocInfo::CODE_TARGET_WITH_ID;
+  }
+  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
+  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond,
+                         Register rs,
+                         const Operand& rt,
+                         BranchDelaySlot bd) {
+  Jump(ra, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint32_t imm28;
+  imm28 = jump_address(L);
+  imm28 &= kImm28Mask;
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    j(imm28);
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint32_t imm32;
+  imm32 = jump_address(L);
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    lui(at, (imm32 & kHiMask) >> kLuiShift);
+    ori(at, at, (imm32 & kImm16Mask));
+  }
+  jr(at);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint32_t imm32;
+  imm32 = jump_address(L);
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    lui(at, (imm32 & kHiMask) >> kLuiShift);
+    ori(at, at, (imm32 & kImm16Mask));
+  }
+  jalr(at);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
@@ -1759,6 +2266,29 @@
 }
 
 
+void MacroAssembler::Drop(int count,
+                          Condition cond,
+                          Register reg,
+                          const Operand& op) {
+  if (count <= 0) {
+    return;
+  }
+
+  Label skip;
+
+  if (cond != al) {
+     Branch(&skip, NegateCondition(cond), reg, op);
+  }
+
+  addiu(sp, sp, count * kPointerSize);
+
+  if (cond != al) {
+    bind(&skip);
+  }
+}
+
+
+
 void MacroAssembler::Swap(Register reg1,
                           Register reg2,
                           Register scratch) {
@@ -1779,10 +2309,9 @@
 }
 
 
-void MacroAssembler::Move(Register dst, Register src) {
-  if (!dst.is(src)) {
-    mov(dst, src);
-  }
+void MacroAssembler::Push(Handle<Object> handle) {
+  li(at, Operand(handle));
+  push(at);
 }
 
 
@@ -1800,12 +2329,18 @@
 
 
 // ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+
   // The return address is passed in register ra.
   if (try_location == IN_JAVASCRIPT) {
     if (type == TRY_CATCH_HANDLER) {
@@ -1813,19 +2348,16 @@
     } else {
       li(t0, Operand(StackHandler::TRY_FINALLY));
     }
-    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
-           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
-           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
-           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
     // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
     lw(t1, MemOperand(t2));
 
     addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, 12));
-    sw(fp, MemOperand(sp, 8));
-    sw(t0, MemOperand(sp, 4));
-    sw(t1, MemOperand(sp, 0));
+    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
+    sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
+    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
 
     // Link this handler as the new current one.
     sw(sp, MemOperand(t2));
@@ -1833,25 +2365,23 @@
   } else {
     // Must preserve a0-a3, and s0 (argv).
     ASSERT(try_location == IN_JS_ENTRY);
-    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
-           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
-           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
-           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-
     // The frame pointer does not point to a JS frame so we save NULL
     // for fp. We expect the code throwing an exception to check fp
     // before dereferencing it to restore the context.
     li(t0, Operand(StackHandler::ENTRY));
 
     // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
     lw(t1, MemOperand(t2));
 
+    ASSERT(Smi::FromInt(0) == 0);  // Used for no context.
+
     addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, 12));
-    sw(zero_reg, MemOperand(sp, 8));
-    sw(t0, MemOperand(sp, 4));
-    sw(t1, MemOperand(sp, 0));
+    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
+    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
+    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
 
     // Link this handler as the new current one.
     sw(sp, MemOperand(t2));
@@ -1860,14 +2390,164 @@
 
 
 void MacroAssembler::PopTryHandler() {
-  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   pop(a1);
   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
-  li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   sw(a1, MemOperand(at));
 }
 
 
+void MacroAssembler::Throw(Register value) {
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
+                                   isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Restore the next handler.
+  pop(a2);
+  sw(a2, MemOperand(a3));
+
+  // Restore context and frame pointer, discard state (a3).
+  MultiPop(a3.bit() | cp.bit() | fp.bit());
+
+  // If the handler is a JS frame, restore the context to the frame.
+  // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
+  // of them.
+  Label done;
+  Branch(&done, eq, fp, Operand(zero_reg));
+  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&done);
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Drop sp to the top stack handler.
+  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  lw(a2, MemOperand(sp, kStateOffset));
+  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  lw(sp, MemOperand(sp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  pop(a2);
+  sw(a2, MemOperand(a3));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+           Isolate::kExternalCaughtExceptionAddress, isolate());
+    li(a0, Operand(false, RelocInfo::NONE));
+    li(a2, Operand(external_caught));
+    sw(a0, MemOperand(a2));
+
+    // Set pending exception and v0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                        isolate())));
+    sw(v0, MemOperand(a2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         cp
+  //         fp
+  //         ra
+
+  // Restore context and frame pointer, discard state (r2).
+  MultiPop(a2.bit() | cp.bit() | fp.bit());
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay slot.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
 void MacroAssembler::AllocateInNewSpace(int object_size,
                                         Register result,
                                         Register scratch1,
@@ -1875,7 +2555,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1923,7 +2603,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -1954,7 +2634,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1992,7 +2672,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -2015,7 +2695,7 @@
   Branch(gc_required, Ugreater, scratch2, Operand(t9));
 
   // Update allocation top. result temporarily holds the new top.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     And(t9, scratch2, Operand(kObjectAlignmentMask));
     Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
   }
@@ -2147,6 +2827,46 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kSlicedStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kSlicedAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
 // Allocates a heap number or jumps to the label if the young space is full and
 // a scavenge is needed.
 void MacroAssembler::AllocateHeapNumber(Register result,
@@ -2174,8 +2894,8 @@
                                                  Register scratch1,
                                                  Register scratch2,
                                                  Label* gc_required) {
-  LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
-  AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
 }
 
@@ -2206,12 +2926,79 @@
 }
 
 
+void MacroAssembler::CopyBytes(Register src,
+                               Register dst,
+                               Register length,
+                               Register scratch) {
+  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+  // Align src before copying in word size chunks.
+  bind(&align_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&align_loop_1);
+  And(scratch, src, kPointerSize - 1);
+  Branch(&word_loop, eq, scratch, Operand(zero_reg));
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+  // Copy bytes in word size chunks.
+  bind(&word_loop);
+  if (emit_debug_code()) {
+    And(scratch, src, kPointerSize - 1);
+    Assert(eq, "Expecting alignment for CopyBytes",
+        scratch, Operand(zero_reg));
+  }
+  Branch(&byte_loop, lt, length, Operand(kPointerSize));
+  lw(scratch, MemOperand(src));
+  Addu(src, src, kPointerSize);
+
+  // TODO(kalmard) check if this can be optimized to use sw in most cases.
+  // Can't use unaligned access - copy byte by byte.
+  sb(scratch, MemOperand(dst, 0));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 1));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 2));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 3));
+  Addu(dst, dst, 4);
+
+  Subu(length, length, Operand(kPointerSize));
+  Branch(&word_loop);
+
+  // Copy the last bytes if any left.
+  bind(&byte_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&byte_loop_1);
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+  bind(&done);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+                                       Register scratch,
+                                       Label* fail) {
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2220,12 +3007,27 @@
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  bind(&fail);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Heap::RootListIndex index,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2234,8 +3036,74 @@
 }
 
 
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+  CpuFeatures::Scope scope(FPU);
+  if (IsMipsSoftFloatABI) {
+    Move(dst, v0, v1);
+  } else {
+    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+  } else {
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    if (dreg2.is(f12)) {
+      ASSERT(!dreg1.is(f14));
+      Move(f14, dreg2);
+      Move(f12, dreg1);
+    } else {
+      Move(f12, dreg1);
+      Move(f14, dreg2);
+    }
+  } else {
+    Move(a0, a1, dreg1);
+    Move(a2, a3, dreg2);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+                                             Register reg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+    Move(a2, reg);
+  } else {
+    Move(a2, reg);
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be t1 to
+  // follow the calling convention which requires the call type to be
+  // in t1.
+  ASSERT(dst.is(t1));
+  if (call_kind == CALL_AS_FUNCTION) {
+    li(dst, Operand(Smi::FromInt(1)));
+  } else {
+    li(dst, Operand(Smi::FromInt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2243,7 +3111,8 @@
                                     Register code_reg,
                                     Label* done,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -2278,13 +3147,11 @@
         li(a2, Operand(expected.immediate()));
       }
     }
+  } else if (actual.is_immediate()) {
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
   } else {
-    if (actual.is_immediate()) {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
-      li(a0, Operand(actual.immediate()));
-    } else {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
-    }
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
   }
 
   if (!definitely_matches) {
@@ -2296,10 +3163,13 @@
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      if (post_call_generator != NULL) post_call_generator->Generate();
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      SetCallKind(t1, call_kind);
+      Call(adaptor);
+      call_wrapper.AfterCall();
       jmp(done);
     } else {
+      SetCallKind(t1, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
@@ -2311,15 +3181,18 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
-                 post_call_generator);
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code);
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(t1, call_kind);
     Jump(code);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2332,13 +3205,17 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                CallKind call_kind) {
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code, rmode);
   } else {
+    SetCallKind(t1, call_kind);
     Jump(code, rmode);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2350,7 +3227,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -2365,13 +3243,14 @@
   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
@@ -2384,7 +3263,7 @@
   if (V8::UseCrankshaft()) {
     UNIMPLEMENTED_MIPS();
   } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
   }
 }
 
@@ -2402,8 +3281,8 @@
                                             Register scratch,
                                             Label* fail) {
   lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
-  Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
+  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
 }
 
 
@@ -2478,21 +3357,147 @@
 
 
 // -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
+}
+
+
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+                                         Register r1, const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
+      kNoASTId, cond, r1, r2);
+  return result;
 }
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+                                             Condition cond,
+                                             Register r1,
+                                             const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ExternalReference function, int stack_space) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(),
+      next_address);
+
+  // Allocate HandleScope in callee-save registers.
+  li(s3, Operand(next_address));
+  lw(s0, MemOperand(s3, kNextOffset));
+  lw(s1, MemOperand(s3, kLimitOffset));
+  lw(s2, MemOperand(s3, kLevelOffset));
+  Addu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+
+  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+  // (4 bytes) will be placed. This is also built into the Simulator.
+  // Set up the pointer to the returned value (a0). It was allocated in
+  // EnterExitFrame.
+  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub;
+  stub.GenerateCall(this, function);
+
+  // As mentioned above, on MIPS a pointer is returned - we need to dereference
+  // it to get the actual return value (which is also a pointer).
+  lw(v0, MemOperand(v0));
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  // If result is non-zero, dereference to get the result value
+  // otherwise set it to undefined.
+  Label skip;
+  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+  Branch(&skip, eq, v0, Operand(zero_reg));
+  lw(a0, MemOperand(v0));
+  bind(&skip);
+  mov(v0, a0);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  sw(s0, MemOperand(s3, kNextOffset));
+  if (emit_debug_code()) {
+    lw(a1, MemOperand(s3, kLevelOffset));
+    Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+  }
+  Subu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+  lw(at, MemOperand(s3, kLimitOffset));
+  Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+  lw(t1, MemOperand(at));
+  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+  li(s0, Operand(stack_space));
+  LeaveExitFrame(false, s0);
+  Ret();
+
+  bind(&promote_scheduled_exception);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  sw(s1, MemOperand(s3, kLimitOffset));
+  mov(s0, v0);
+  mov(a0, v0);
+  PrepareCallCFunction(1, s1);
+  li(a0, Operand(ExternalReference::isolate_address()));
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+      1);
+  mov(v0, s0);
+  jmp(&leave_exit_frame);
+
+  return result;
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addiu(sp, sp, num_arguments * kPointerSize);
@@ -2554,7 +3559,6 @@
 }
 
 
-
 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
                                             FPURegister value,
                                             Register scratch1) {
@@ -2564,6 +3568,74 @@
 }
 
 
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+
+  if (dst.is(left)) {
+    mov(scratch, left);  // Preserve left.
+    addu(dst, left, right);  // Left is overwritten.
+    xor_(scratch, dst, scratch);  // Original left.
+    xor_(overflow_dst, dst, right);
+    and_(overflow_dst, overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    mov(scratch, right);  // Preserve right.
+    addu(dst, left, right);  // Right is overwritten.
+    xor_(scratch, dst, scratch);  // Original right.
+    xor_(overflow_dst, dst, left);
+    and_(overflow_dst, overflow_dst, scratch);
+  } else {
+    addu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, dst, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+  ASSERT(!scratch.is(left));
+  ASSERT(!scratch.is(right));
+
+  if (dst.is(left)) {
+    mov(scratch, left);  // Preserve left.
+    subu(dst, left, right);  // Left is overwritten.
+    xor_(overflow_dst, dst, scratch);  // scratch is original left.
+    xor_(scratch, scratch, right);  // scratch is original left.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else if (dst.is(right)) {
+    mov(scratch, right);  // Preserve right.
+    subu(dst, left, right);  // Right is overwritten.
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, scratch);  // Original right.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else {
+    subu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // All parameters are on the stack. v0 has the return value after call.
@@ -2624,6 +3696,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -2640,15 +3723,26 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+  li(a1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags,
-                                   PostCallGenerator* post_call_generator) {
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
   GetBuiltinEntry(t9, id);
-  if (flags == CALL_JS) {
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(t9));
+    SetCallKind(t1, CALL_AS_METHOD);
     Call(t9);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
-    ASSERT(flags == JUMP_JS);
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(t1, CALL_AS_METHOD);
     Jump(t9);
   }
 }
@@ -2708,18 +3802,18 @@
 
 
 // -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
-  if (FLAG_debug_code)
+  if (emit_debug_code())
     Check(cc, msg, rs, rt);
 }
 
 
 void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                           Heap::RootListIndex index) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     LoadRoot(at, index);
     Check(eq, "Register did not match expected root", reg, Operand(at));
   }
@@ -2727,18 +3821,20 @@
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ASSERT(!elements.is(at));
     Label ok;
-    Push(elements);
+    push(elements);
     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
     Branch(&ok, eq, elements, Operand(at));
+    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
     Branch(&ok, eq, elements, Operand(at));
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
-    Pop(elements);
+    pop(elements);
   }
 }
 
@@ -2748,7 +3844,7 @@
   Label L;
   Branch(&L, cc, rs, rt);
   Abort(msg);
-  // will not return here
+  // Will not return here.
   bind(&L);
 }
 
@@ -2774,11 +3870,11 @@
   AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
-  Push(a0);
+  push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
-  Push(a0);
+  push(a0);
   CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+  // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
     // instructions generated, we insert padding here to keep the size
@@ -2798,18 +3894,15 @@
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
-    lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
-    // Load the function context (which is the incoming, outer context).
-    lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+    lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     for (int i = 1; i < context_chain_length; i++) {
-      lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
-      lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+      lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     }
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-  } else {  // Slot is in the current function context.
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    Move(dst, cp);
   }
 }
 
@@ -2830,9 +3923,9 @@
                                                   Register scratch) {
   // Load the initial map. The global functions all have initial maps.
   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
     Branch(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2862,95 +3955,93 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(Register hold_argc,
-                                    Register hold_argv,
-                                    Register hold_function,
-                                    bool save_doubles) {
-  // a0 is argc.
-  sll(t8, a0, kPointerSizeLog2);
-  addu(hold_argv, sp, t8);
-  addiu(hold_argv, hold_argv, -kPointerSize);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+                                    int stack_space) {
+  // Setup the frame structure on the stack.
+  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
 
-  // Compute callee's stack pointer before making changes and save it as
-  // t9 register so that it is restored as sp register on exit, thereby
-  // popping the args.
-  // t9 = sp + kPointerSize * #args
-  addu(t9, sp, t8);
-
-  // Compute the argv pointer and keep it in a callee-saved register.
-  // This only seems to be needed for crankshaft and may cause problems
-  // so it's disabled for now.
-  // Subu(s6, t9, Operand(kPointerSize));
-
-  // Align the stack at this point.
-  AlignStack(0);
+  // This is how the stack will look:
+  // fp + 2 (==kCallerSPDisplacement) - old stack's end
+  // [fp + 1 (==kCallerPCOffset)] - saved old ra
+  // [fp + 0 (==kCallerFPOffset)] - saved old fp
+  // [fp - 1 (==kSPOffset)] - sp of the called function
+  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+  //   new stack (will contain saved ra)
 
   // Save registers.
-  addiu(sp, sp, -12);
-  sw(t9, MemOperand(sp, 8));
-  sw(ra, MemOperand(sp, 4));
-  sw(fp, MemOperand(sp, 0));
-  mov(fp, sp);  // Setup new frame pointer.
+  addiu(sp, sp, -4 * kPointerSize);
+  sw(ra, MemOperand(sp, 3 * kPointerSize));
+  sw(fp, MemOperand(sp, 2 * kPointerSize));
+  addiu(fp, sp, 2 * kPointerSize);  // Setup new frame pointer.
 
-  li(t8, Operand(CodeObject()));
-  Push(t8);  // Accessed from ExitFrame::code_slot.
+  if (emit_debug_code()) {
+    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
+
+  li(t8, Operand(CodeObject()));  // Accessed from ExitFrame::code_slot.
+  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
-  li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   sw(fp, MemOperand(t8));
-  li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   sw(cp, MemOperand(t8));
 
-  // Setup argc and the builtin function in callee-saved registers.
-  mov(hold_argc, a0);
-  mov(hold_function, a1);
-
-  // Optionally save all double registers.
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   if (save_doubles) {
-#ifdef DEBUG
-    int frame_alignment = ActivationFrameAlignment();
-#endif
-    // The stack alignment code above made sp unaligned, so add space for one
-    // more double register and use aligned addresses.
+    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
     ASSERT(kDoubleSize == frame_alignment);
-    // Mark the frame as containing doubles by pushing a non-valid return
-    // address, i.e. 0.
-    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
-    push(zero_reg);  // Marker and alignment word.
-    int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
+    if (frame_alignment > 0) {
+      ASSERT(IsPowerOf2(frame_alignment));
+      And(sp, sp, Operand(-frame_alignment));  // Align stack.
+    }
+    int space = FPURegister::kNumRegisters * kDoubleSize;
     Subu(sp, sp, Operand(space));
     // Remember: we only need to save every 2nd double FPU value.
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
+      sdc1(reg, MemOperand(sp, i * kDoubleSize));
     }
-    // Note that f0 will be accessible at fp - 2*kPointerSize -
-    // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
-    // alignment word were pushed after the fp.
   }
+
+  // Reserve place for the return address, stack space and an optional slot
+  // (used by the DirectCEntryStub to hold the return value if a struct is
+  // returned) and align the frame preparing for calling the runtime function.
+  ASSERT(stack_space >= 0);
+  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+  if (frame_alignment > 0) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));  // Align stack.
+  }
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  addiu(at, sp, kPointerSize);
+  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
 
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+                                    Register argument_count) {
   // Optionally restore all double registers.
   if (save_doubles) {
-    // TODO(regis): Use vldrm instruction.
     // Remember: we only need to restore every 2nd double FPU value.
+    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      // Register f30-f31 is just below the marker.
-      const int offset = ExitFrameConstants::kMarkerOffset;
-      ldc1(reg, MemOperand(fp,
-          (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
+      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
     }
   }
 
   // Clear top frame.
-  li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   sw(zero_reg, MemOperand(t8));
 
   // Restore current context from top and clear it in debug mode.
-  li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   lw(cp, MemOperand(t8));
 #ifdef DEBUG
   sw(a3, MemOperand(t8));
@@ -2958,11 +4049,13 @@
 
   // Pop the arguments, restore registers, and return.
   mov(sp, fp);  // Respect ABI stack constraint.
-  lw(fp, MemOperand(sp, 0));
-  lw(ra, MemOperand(sp, 4));
-  lw(sp, MemOperand(sp, 8));
-  jr(ra);
-  nop();  // Branch delay slot nop.
+  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+  addiu(sp, sp, 8);
+  if (argument_count.is_valid()) {
+    sll(t8, argument_count, kPointerSizeLog2);
+    addu(sp, sp, t8);
+  }
 }
 
 
@@ -2997,36 +4090,22 @@
 }
 
 
-void MacroAssembler::AlignStack(int offset) {
-  // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
-  //     and an offset of 1 aligns to 4 modulo 8 bytes.
-#if defined(V8_HOST_ARCH_MIPS)
-  // Running on the real platform. Use the alignment as mandated by the local
-  // environment.
-  // Note: This will break if we ever start generating snapshots on one MIPS
-  // platform for another MIPS platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else  // defined(V8_HOST_ARCH_MIPS)
-  // If we are using the simulator then we should always align to the expected
-  // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
-#endif  // defined(V8_HOST_ARCH_MIPS)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    if (offset == 0) {
-      andi(t8, sp, activation_frame_alignment - 1);
-      Push(zero_reg, eq, t8, zero_reg);
-    } else {
-      andi(t8, sp, activation_frame_alignment - 1);
-      addiu(t8, t8, -4);
-      Push(zero_reg, eq, t8, zero_reg);
-    }
-  }
-}
+void MacroAssembler::AssertStackIsAligned() {
+  if (emit_debug_code()) {
+      const int frame_alignment = ActivationFrameAlignment();
+      const int frame_alignment_mask = frame_alignment - 1;
 
+      if (frame_alignment > kPointerSize) {
+        Label alignment_as_expected;
+        ASSERT(IsPowerOf2(frame_alignment));
+        andi(at, sp, frame_alignment_mask);
+        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+        // Don't use Check here, as it will call Runtime_Abort re-entering here.
+        stop("Unexpected stack alignment");
+        bind(&alignment_as_expected);
+      }
+    }
+}
 
 
 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
@@ -3078,6 +4157,18 @@
 }
 
 
+void MacroAssembler::AbortIfNotString(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  And(t0, object, Operand(kSmiTagMask));
+  Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+  push(object);
+  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+  Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+  pop(object);
+}
+
+
 void MacroAssembler::AbortIfNotRootValue(Register src,
                                          Heap::RootListIndex root_value_index,
                                          const char* message) {
@@ -3169,19 +4260,14 @@
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Up to four simple arguments are passed in registers a0..a3.
   // Those four arguments must have reserved argument slots on the stack for
   // mips, even though those argument slots are not normally used.
   // Remaining arguments are pushed on the stack, above (higher address than)
   // the argument slots.
-  ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
   int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
                                  0 : num_arguments - kRegisterPassedArguments) +
-                               (StandardFrameConstants::kCArgsSlotsSize /
-                               kPointerSize);
+                                kCArgSlotCount;
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -3198,7 +4284,7 @@
 
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, at, num_arguments);
+  CallCFunctionHelper(no_reg, function, t8, num_arguments);
 }
 
 
@@ -3216,21 +4302,6 @@
                                          ExternalReference function_reference,
                                          Register scratch,
                                          int num_arguments) {
-  // Push Isolate address as the last argument.
-  if (num_arguments < kRegisterPassedArguments) {
-    Register arg_to_reg[] = {a0, a1, a2, a3};
-    Register r = arg_to_reg[num_arguments];
-    li(r, Operand(ExternalReference::isolate_address()));
-  } else {
-    int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
-                                 (StandardFrameConstants::kCArgsSlotsSize /
-                                  kPointerSize);
-    // Push Isolate address on the stack after the arguments.
-    li(scratch, Operand(ExternalReference::isolate_address()));
-    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  }
-  num_arguments += 1;
-
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3257,23 +4328,20 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (!function.is(t9)) {
-    mov(t9, function);
-    function = t9;
-  }
 
   if (function.is(no_reg)) {
-    li(t9, Operand(function_reference));
+    function = t9;
+    li(function, Operand(function_reference));
+  } else if (!function.is(t9)) {
+    mov(t9, function);
     function = t9;
   }
 
   Call(function);
 
-  ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
   int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
                                 0 : num_arguments - kRegisterPassedArguments) +
-                               (StandardFrameConstants::kCArgsSlotsSize /
-                               kPointerSize);
+                               kCArgSlotCount;
 
   if (OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -3286,12 +4354,22 @@
 #undef BRANCH_ARGS_CHECK
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  lw(descriptors,
+     FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  bind(&not_smi);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(address, size_ + Assembler::kGap) {
+      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
@@ -3309,8 +4387,8 @@
 }
 
 
-void CodePatcher::Emit(Instr x) {
-  masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+  masm()->emit(instr);
 }
 
 
@@ -3319,7 +4397,26 @@
 }
 
 
-#endif  // ENABLE_DEBUGGER_SUPPORT
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  ASSERT(Assembler::IsBranch(instr));
+  uint32_t opcode = Assembler::GetOpcodeField(instr);
+  // Currently only the 'eq' and 'ne' cond values are supported and the simple
+  // branch instructions (with opcode being the branch type).
+  // There are some special cases (see Assembler::IsBranch()) so extending this
+  // would be tricky.
+  ASSERT(opcode == BEQ ||
+         opcode == BNE ||
+        opcode == BLEZ ||
+        opcode == BGTZ ||
+        opcode == BEQL ||
+        opcode == BNEL ||
+       opcode == BLEZL ||
+       opcode == BGTZL);
+  opcode = (cond == eq) ? BEQ : BNE;
+  instr = (instr & ~kOpcodeMask) | opcode;
+  masm_.emit(instr);
+}
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 7ff9e17..c968ffc 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,13 +30,13 @@
 
 #include "assembler.h"
 #include "mips/assembler-mips.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declaration.
 class JumpTarget;
-class PostCallGenerator;
 
 // Reserved Register Usage Summary.
 //
@@ -53,17 +53,12 @@
 // Registers aliases
 // cp is assumed to be a callee saved register.
 const Register roots = s6;  // Roots array pointer.
-const Register cp = s7;     // JavaScript context pointer
-const Register fp = s8_fp;  // Alias fp
-// Register used for condition evaluation.
+const Register cp = s7;     // JavaScript context pointer.
+const Register fp = s8_fp;  // Alias for fp.
+// Registers used for condition evaluation.
 const Register condReg1 = s4;
 const Register condReg2 = s5;
 
-enum InvokeJSFlags {
-  CALL_JS,
-  JUMP_JS
-};
-
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -98,46 +93,17 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
-// Arguments macros
+  // Arguments macros.
 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
 #define COND_ARGS cond, r1, r2
 
-// ** Prototypes
-
-// * Prototypes for functions with no target (eg Ret()).
-#define DECLARE_NOTARGET_PROTOTYPE(Name) \
-  void Name(BranchDelaySlot bd = PROTECT); \
-  void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
-  inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
-    Name(COND_ARGS, bd); \
-  }
-
-// * Prototypes for functions with a target.
-
-// Cases when relocation may be needed.
-#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
-  void Name(target_type target, \
-            RelocInfo::Mode rmode, \
-            BranchDelaySlot bd = PROTECT); \
-  inline void Name(BranchDelaySlot bd, \
-                   target_type target, \
-                   RelocInfo::Mode rmode) { \
-    Name(target, rmode, bd); \
-  } \
-  void Name(target_type target, \
-            RelocInfo::Mode rmode, \
-            COND_TYPED_ARGS, \
-            BranchDelaySlot bd = PROTECT); \
-  inline void Name(BranchDelaySlot bd, \
-                   target_type target, \
-                   RelocInfo::Mode rmode, \
-                   COND_TYPED_ARGS) { \
-    Name(target, rmode, COND_ARGS, bd); \
-  }
-
-// Cases when relocation is not needed.
+  // Cases when relocation is not needed.
 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
   void Name(target_type target, BranchDelaySlot bd = PROTECT); \
   inline void Name(BranchDelaySlot bd, target_type target) { \
@@ -152,34 +118,44 @@
     Name(target, COND_ARGS, bd); \
   }
 
-// ** Target prototypes.
-
-#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
-  DECLARE_NORELOC_PROTOTYPE(Name, Register) \
-  DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
-  DECLARE_RELOC_PROTOTYPE(Name, byte*) \
-  DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
-
 #define DECLARE_BRANCH_PROTOTYPES(Name) \
   DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
   DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
 
+  DECLARE_BRANCH_PROTOTYPES(Branch)
+  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
 
-DECLARE_JUMP_CALL_PROTOTYPES(Jump)
-DECLARE_JUMP_CALL_PROTOTYPES(Call)
-
-DECLARE_BRANCH_PROTOTYPES(Branch)
-DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-
-DECLARE_NOTARGET_PROTOTYPE(Ret)
-
+#undef DECLARE_BRANCH_PROTOTYPES
 #undef COND_TYPED_ARGS
 #undef COND_ARGS
-#undef DECLARE_NOTARGET_PROTOTYPE
-#undef DECLARE_NORELOC_PROTOTYPE
-#undef DECLARE_RELOC_PROTOTYPE
-#undef DECLARE_JUMP_CALL_PROTOTYPES
-#undef DECLARE_BRANCH_PROTOTYPES
+
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+  void Jump(Register target, COND_ARGS);
+  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+  int CallSize(Register target, COND_ARGS);
+  void Call(Register target, COND_ARGS);
+  int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  int CallSize(Handle<Code> code,
+               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+               unsigned ast_id = kNoASTId,
+               COND_ARGS);
+  void Call(Handle<Code> code,
+            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            unsigned ast_id = kNoASTId,
+            COND_ARGS);
+  void Ret(COND_ARGS);
+  inline void Ret(BranchDelaySlot bd) {
+    Ret(al, zero_reg, Operand(zero_reg), bd);
+  }
+
+#undef COND_ARGS
 
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
@@ -198,9 +174,28 @@
   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
 
   void Call(Label* target);
-  // May do nothing if the registers are identical.
-  void Move(Register dst, Register src);
 
+  inline void Move(Register dst, Register src) {
+    if (!dst.is(src)) {
+      mov(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_d(dst, src);
+    }
+  }
+
+  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+    mfc1(dst_low, src);
+    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+  }
+
+  inline void Move(FPURegister dst, Register src_low, Register src_high) {
+    mtc1(src_low, dst);
+    mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+  }
 
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -262,7 +257,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Inline caching support
+  // Inline caching support.
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -271,6 +266,17 @@
                               Register scratch,
                               Label* miss);
 
+  void GetNumberHash(Register reg0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss,
+                                Register elements,
+                                Register key,
+                                Register result,
+                                Register reg0,
+                                Register reg1,
+                                Register reg2);
+
+
   inline void MarkCode(NopMarkerTypes type) {
     nop(type);
   }
@@ -306,7 +312,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Allocation support
+  // Allocation support.
 
   // Allocate an object in new space. The object_size is specified
   // either in bytes or in words if the allocation flag SIZE_IN_WORDS
@@ -357,6 +363,16 @@
                                Register scratch1,
                                Register scratch2,
                                Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result,
+                                   Register length,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* gc_required);
+  void AllocateAsciiSlicedString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
 
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
@@ -373,7 +389,7 @@
                                    Label* gc_required);
 
   // ---------------------------------------------------------------------------
-  // Instruction macros
+  // Instruction macros.
 
 #define DEFINE_INSTRUCTION(instr)                                              \
   void instr(Register rd, Register rs, const Operand& rt);                     \
@@ -405,6 +421,7 @@
   DEFINE_INSTRUCTION(Or);
   DEFINE_INSTRUCTION(Xor);
   DEFINE_INSTRUCTION(Nor);
+  DEFINE_INSTRUCTION2(Neg);
 
   DEFINE_INSTRUCTION(Slt);
   DEFINE_INSTRUCTION(Sltu);
@@ -416,12 +433,12 @@
 #undef DEFINE_INSTRUCTION2
 
 
-  //------------Pseudo-instructions-------------
+  // ---------------------------------------------------------------------------
+  // Pseudo-instructions.
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
-
-  // load int32 in the rd register
+  // Load int32 in the rd register.
   void li(Register rd, Operand j, bool gen2instr = false);
   inline void li(Register rd, int32_t j, bool gen2instr = false) {
     li(rd, Operand(j), gen2instr);
@@ -430,112 +447,106 @@
     li(dst, Operand(value), gen2instr);
   }
 
-  // Exception-generating instructions and debugging support
-  void stop(const char* msg);
-
-
   // Push multiple registers on the stack.
   // Registers are saved in numerical order, with higher numbered registers
-  // saved in higher memory addresses
+  // saved in higher memory addresses.
   void MultiPush(RegList regs);
   void MultiPushReversed(RegList regs);
 
-  void Push(Register src) {
+  void MultiPushFPU(RegList regs);
+  void MultiPushReversedFPU(RegList regs);
+
+  // Lower case push() for compatibility with arch-independent code.
+  void push(Register src) {
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-  // Push two registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
+  // Push a handle.
+  void Push(Handle<Object> handle);
+
+  // Push two registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
     Subu(sp, sp, Operand(2 * kPointerSize));
     sw(src1, MemOperand(sp, 1 * kPointerSize));
     sw(src2, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push three registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(3 * -kPointerSize));
+  // Push three registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    Subu(sp, sp, Operand(3 * kPointerSize));
     sw(src1, MemOperand(sp, 2 * kPointerSize));
     sw(src2, MemOperand(sp, 1 * kPointerSize));
     sw(src3, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2,
-            Register src3, Register src4, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(4 * -kPointerSize));
+  // Push four registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    Subu(sp, sp, Operand(4 * kPointerSize));
     sw(src1, MemOperand(sp, 3 * kPointerSize));
     sw(src2, MemOperand(sp, 2 * kPointerSize));
     sw(src3, MemOperand(sp, 1 * kPointerSize));
     sw(src4, MemOperand(sp, 0 * kPointerSize));
   }
 
-  inline void push(Register src) { Push(src); }
-  inline void pop(Register src) { Pop(src); }
-
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
-    // Since we don't have conditionnal execution we use a Branch.
+    // Since we don't have conditional execution we use a Branch.
     Branch(3, cond, tst1, Operand(tst2));
-    Addu(sp, sp, Operand(-kPointerSize));
+    Subu(sp, sp, Operand(kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-
   // Pops multiple values from the stack and load them in the
   // registers specified in regs. Pop order is the opposite as in MultiPush.
   void MultiPop(RegList regs);
   void MultiPopReversed(RegList regs);
-  void Pop(Register dst) {
+
+  void MultiPopFPU(RegList regs);
+  void MultiPopReversedFPU(RegList regs);
+
+  // Lower case pop() for compatibility with arch-independent code.
+  void pop(Register dst) {
     lw(dst, MemOperand(sp, 0));
     Addu(sp, sp, Operand(kPointerSize));
   }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    ASSERT(!src1.is(src2));
+    lw(src2, MemOperand(sp, 0 * kPointerSize));
+    lw(src1, MemOperand(sp, 1 * kPointerSize));
+    Addu(sp, sp, 2 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
 
-  // ---------------------------------------------------------------------------
-  // These functions are only used by crankshaft, so they are currently
-  // unimplemented.
-
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
-  void PushSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PushSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  static int SafepointRegisterStackIndex(int reg_code) {
-    UNIMPLEMENTED_MIPS();
-    return 0;
-  }
-
-  // ---------------------------------------------------------------------------
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  void PushSafepointRegistersAndDoubles();
+  void PopSafepointRegistersAndDoubles();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
   // Convert unsigned word to double.
-  void Cvt_d_uw(FPURegister fd, FPURegister fs);
-  void Cvt_d_uw(FPURegister fd, Register rs);
+  void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
 
   // Convert double to unsigned word.
-  void Trunc_uw_d(FPURegister fd, FPURegister fs);
-  void Trunc_uw_d(FPURegister fd, Register rs);
+  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
@@ -548,8 +559,29 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Helper for EmitECMATruncate.
+  // This will truncate a floating-point value outside of the singed 32bit
+  // integer range to a 32bit signed integer.
+  // Expects the double value loaded in input_high and input_low.
+  // Exits with the answer in 'result'.
+  // Note that this code does not work for values in the 32bit range!
+  void EmitOutOfInt32RangeTruncate(Register result,
+                                   Register input_high,
+                                   Register input_low,
+                                   Register scratch);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+  // Exits with 'result' holding the answer and all other registers clobbered.
+  void EmitECMATruncate(Register result,
+                        FPURegister double_input,
+                        FPURegister single_scratch,
+                        Register scratch,
+                        Register scratch2,
+                        Register scratch3);
+
   // -------------------------------------------------------------------------
-  // Activation frames
+  // Activation frames.
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
   void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
@@ -558,23 +590,21 @@
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
   // Enter exit frame.
-  // Expects the number of arguments in register a0 and
-  // the builtin function to call in register a1.
-  // On output hold_argc, hold_function, and hold_argv are setup.
-  void EnterExitFrame(Register hold_argc,
-                      Register hold_argv,
-                      Register hold_function,
-                      bool save_doubles);
+  // argc - argument count to be dropped by LeaveExitFrame.
+  // save_doubles - saves FPU registers on stack, currently disabled.
+  // stack_space - extra stack space.
+  void EnterExitFrame(bool save_doubles,
+                      int stack_space = 0);
 
-  // Leave the current exit frame. Expects the return value in v0.
-  void LeaveExitFrame(bool save_doubles);
-
-  // Align the stack by optionally pushing a Smi zero.
-  void AlignStack(int offset);    // TODO(mips) : remove this function.
+  // Leave the current exit frame.
+  void LeaveExitFrame(bool save_doubles, Register arg_count);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
 
+  // Make sure the stack is aligned. Only emits code in debug mode.
+  void AssertStackIsAligned();
+
   void LoadContext(Register dst, int context_chain_length);
 
   void LoadGlobalFunction(int index, Register function);
@@ -586,31 +616,40 @@
                                     Register scratch);
 
   // -------------------------------------------------------------------------
-  // JavaScript invokes
+  // JavaScript invokes.
+
+  // Setup call kind marking in t1. The method takes t1 as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      CallKind call_kind);
 
 
   void IsObjectJSObjectType(Register heap_object,
@@ -628,14 +667,19 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // -------------------------------------------------------------------------
-  // Debugger Support
+  // Debugger Support.
 
   void DebugBreak();
 #endif
 
+  void InitializeRootRegister() {
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    li(kRootRegister, Operand(roots_address));
+  }
 
   // -------------------------------------------------------------------------
-  // Exception handling
+  // Exception handling.
 
   // Push a new try handler and link into try handler chain.
   // The return address must be passed in register ra.
@@ -646,9 +690,24 @@
   // Must preserve the result register.
   void PopTryHandler();
 
+  // Passes thrown value (in v0) to the handler of top of the try handler chain.
+  void Throw(Register value);
+
+  // Propagates an uncatchable exception to the top of the current JS stack's
+  // handler chain.
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
   // Copies a fixed number of fields of heap objects from src to dst.
   void CopyFields(Register dst, Register src, RegList temps, int field_count);
 
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src,
+                 Register dst,
+                 Register length,
+                 Register scratch);
+
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -666,21 +725,36 @@
                      Register map,
                      Register type_reg);
 
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map,
+                         Register scratch,
+                         Label* fail);
+
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
-  // to be a heap object)
+  // to be a heap object).
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
 
   void CheckMap(Register obj,
                 Register scratch,
                 Heap::RootListIndex index,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -692,6 +766,10 @@
   //   index - holds the overwritten index on exit.
   void IndexFromHash(Register hash, Register index);
 
+  // Get the number of least significant bits from a register.
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
   // Load the value of a number object into a FPU double register. If the
   // object is not a number a jump to the label not_number is performed
   // and the FPU double register is unchanged.
@@ -712,15 +790,70 @@
                               Register scratch1);
 
   // -------------------------------------------------------------------------
-  // Runtime calls
+  // Overflow handling functions.
+  // Usage: first call the appropriate arithmetic function, then call one of the
+  // jump functions with the overflow_dst register as the second parameter.
+
+  void AdduAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void SubuAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void BranchOnOverflow(Label* label,
+                        Register overflow_check,
+                        BranchDelaySlot bd = PROTECT) {
+    Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void BranchOnNoOverflow(Label* label,
+                          Register overflow_check,
+                          BranchDelaySlot bd = PROTECT) {
+    Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  // -------------------------------------------------------------------------
+  // Runtime calls.
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+                                           Condition cond = cc_always,
+                                           Register r1 = zero_reg,
+                                           const Operand& r2 =
+                                               Operand(zero_reg));
+
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = cc_always,
+                                               Register r1 = zero_reg,
+                                               const Operand& r2 =
+                                                   Operand(zero_reg));
+
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -741,6 +874,12 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -768,15 +907,31 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void GetCFunctionDoubleResult(const DoubleRegister dst);
+
+  // There are two ways of passing double arguments on MIPS, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+                                           int stack_space);
 
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flags,
-                     PostCallGenerator* post_call_generator = NULL);
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the code object for the given builtin in the target register and
   // setup the function in a1.
@@ -787,14 +942,17 @@
 
   struct Unresolved {
     int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
     const char* name;
   };
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // -------------------------------------------------------------------------
-  // StatsCounter support
+  // StatsCounter support.
 
   void SetCounter(StatsCounter* counter, int value,
                   Register scratch1, Register scratch2);
@@ -805,7 +963,7 @@
 
 
   // -------------------------------------------------------------------------
-  // Debugging
+  // Debugging.
 
   // Calls Abort(msg) if the condition cc is not satisfied.
   // Use --debug_code to enable.
@@ -826,7 +984,7 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
   // ---------------------------------------------------------------------------
-  // Number utilities
+  // Number utilities.
 
   // Check whether the value of reg is a power of two and not zero. If not
   // control continues at the label not_power_of_two. If reg is a power of two
@@ -837,7 +995,7 @@
                                  Label* not_power_of_two_or_zero);
 
   // -------------------------------------------------------------------------
-  // Smi utilities
+  // Smi utilities.
 
   // Try to convert int32 to smi. If the value is to large, preserve
   // the original value and jump to not_a_smi. Destroys scratch and
@@ -888,13 +1046,16 @@
   void AbortIfSmi(Register object);
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is a string. Used in debug code.
+  void AbortIfNotString(Register object);
+
   // Abort execution if argument is not the root value with the given index.
   void AbortIfNotRootValue(Register src,
                            Heap::RootListIndex root_value_index,
                            const char* message);
 
   // ---------------------------------------------------------------------------
-  // HeapNumber utilities
+  // HeapNumber utilities.
 
   void JumpIfNotHeapNumber(Register object,
                            Register heap_number_map,
@@ -902,7 +1063,7 @@
                            Label* on_not_heap_number);
 
   // -------------------------------------------------------------------------
-  // String utilities
+  // String utilities.
 
   // Checks if both instance types are sequential ASCII strings and jumps to
   // label if either is not.
@@ -935,22 +1096,33 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
                            Register scratch,
                            int num_arguments);
 
-  void Jump(intptr_t target, RelocInfo::Mode rmode,
-            BranchDelaySlot bd = PROTECT);
-  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
-            BranchDelaySlot bd = PROTECT);
-  void Call(intptr_t target, RelocInfo::Mode rmode,
-            BranchDelaySlot bd = PROTECT);
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
-            BranchDelaySlot bd = PROTECT);
+  void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+  void BranchShort(int16_t offset, Condition cond, Register rs,
+                   const Operand& rt,
+                   BranchDelaySlot bdslot = PROTECT);
+  void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+  void BranchShort(Label* L, Condition cond, Register rs,
+                   const Operand& rt,
+                   BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bdslot = PROTECT);
+  void J(Label* L, BranchDelaySlot bdslot);
+  void Jr(Label* L, BranchDelaySlot bdslot);
+  void Jalr(Label* L, BranchDelaySlot bdslot);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -959,7 +1131,8 @@
                       Register code_reg,
                       Label* done,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Get the code for the given builtin. Returns if able to resolve
   // the function in the 'resolved' flag.
@@ -975,15 +1148,24 @@
                            Register scratch1,
                            Register scratch2);
 
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+  bool UseAbsoluteCodePointers();
 
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // traversal.
+  friend class OptimizedFrame;
 };
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. It is not legal to emit
@@ -998,29 +1180,21 @@
   MacroAssembler* masm() { return &masm_; }
 
   // Emit an instruction directly.
-  void Emit(Instr x);
+  void Emit(Instr instr);
 
   // Emit an address directly.
   void Emit(Address addr);
 
+  // Change the condition part of an instruction leaving the rest of the current
+  // instruction unchanged.
+  void ChangeBranchCondition(Condition cond);
+
  private:
   byte* address_;  // The address of the code being patched.
   int instructions_;  // Number of instructions of the expected patch size.
   int size_;  // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
-  PostCallGenerator() { }
-  virtual ~PostCallGenerator() { }
-  virtual void Generate() = 0;
-};
 
 
 // -----------------------------------------------------------------------------
@@ -1042,6 +1216,15 @@
 }
 
 
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1055,4 +1238,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 9f9e976..63e836f 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -56,49 +56,58 @@
  * - sp : points to tip of C stack.
  *
  * The remaining registers are free for computations.
- *
  * Each call to a public method should retain this convention.
+ *
  * The stack will have the following structure:
- *       - direct_call        (if 1, direct call from JavaScript code, if 0 call
- *                             through the runtime system)
- *       - stack_area_base    (High end of the memory area to use as
- *                             backtracking stack)
- *       - int* capture_array (int[num_saved_registers_], for output).
- *       - stack frame header (16 bytes in size)
- *       --- sp when called ---
- *       - link address
- *       - backup of registers s0..s7
- *       - end of input       (Address of end of string)
- *       - start of input     (Address of first character in string)
- *       - start index        (character index of start)
- *       --- frame pointer ----
- *       - void* input_string (location of a handle containing the string)
- *       - Offset of location before start of input (effectively character
- *         position -1). Used to initialize capture registers to a non-position.
- *       - At start (if 1, we are starting at the start of the
- *         string, otherwise 0)
- *       - register 0         (Only positions must be stored in the first
- *       - register 1          num_saved_registers_ registers)
- *       - ...
- *       - register num_registers-1
- *       --- sp ---
+ *
+ *  - fp[56]  direct_call  (if 1, direct call from JavaScript code,
+ *                          if 0, call through the runtime system).
+ *  - fp[52]  stack_area_base (High end of the memory area to use as
+ *                             backtracking stack).
+ *  - fp[48]  int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[44]  secondary link/return address used by native call.
+ *  --- sp when called ---
+ *  - fp[40]  return address (lr).
+ *  - fp[36]  old frame pointer (r11).
+ *  - fp[0..32]  backup of registers s0..s7.
+ *  --- frame pointer ----
+ *  - fp[-4]  end of input       (Address of end of string).
+ *  - fp[-8]  start of input     (Address of first character in string).
+ *  - fp[-12] start index        (character index of start).
+ *  - fp[-16] void* input_string (location of a handle containing the string).
+ *  - fp[-20] Offset of location before start of input (effectively character
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-24] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-28] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
  *
  * The first num_saved_registers_ registers are initialized to point to
  * "character -1" in the string (i.e., char_size() bytes before the first
  * character of the string). The remaining registers start out as garbage.
  *
  * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
  * int (*match)(String* input_string,
  *              int start_index,
  *              Address start,
  *              Address end,
+ *              Address secondary_return_address,  // Only used by native call.
  *              int* capture_output_array,
- *              bool at_start,
  *              byte* stack_area_base,
- *              bool direct_call)
+ *              bool direct_call = false)
  * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc).
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
  */
 
 #define __ ACCESS_MASM(masm_)
@@ -106,7 +115,7 @@
 RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -114,9 +123,15 @@
       start_label_(),
       success_label_(),
       backtrack_label_(),
-      exit_label_() {
+      exit_label_(),
+      internal_failure_label_() {
   ASSERT_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ li(v0, Operand(FAILURE));
+  __ Ret();
   __ bind(&start_label_);  // And then continue from here.
 }
 
@@ -131,6 +146,7 @@
   exit_label_.Unuse();
   check_preempt_label_.Unuse();
   stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
 }
 
 
@@ -140,47 +156,75 @@
 
 
 void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
-  UNIMPLEMENTED_MIPS();
+  if (by != 0) {
+    __ Addu(current_input_offset(),
+           current_input_offset(), Operand(by * char_size()));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ lw(a0, register_location(reg));
+    __ Addu(a0, a0, Operand(by));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Backtrack() {
-  UNIMPLEMENTED_MIPS();
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(a0);
+  __ Addu(a0, a0, code_pointer());
+  __ Jump(a0);
 }
 
 
 void RegExpMacroAssemblerMIPS::Bind(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(label);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
-  UNIMPLEMENTED_MIPS();
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+  __ bind(&not_at_start);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
-  UNIMPLEMENTED_MIPS();
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
 }
 
 
@@ -188,26 +232,212 @@
                                               int cp_offset,
                                               Label* on_failure,
                                               bool check_end_of_string) {
-  UNIMPLEMENTED_MIPS();
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack for each test, (re)use the global
+    // backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  if (check_end_of_string) {
+    // Is last character of required match inside string.
+    CheckPosition(cp_offset + str.length() - 1, on_failure);
+  }
+
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  if (cp_offset != 0) {
+    int byte_offset = cp_offset * char_size();
+    __ Addu(a0, a0, Operand(byte_offset));
+  }
+
+  // a0 : Address of characters to match against str.
+  int stored_high_byte = 0;
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ lbu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      ASSERT(str[i] <= String::kMaxAsciiCharCode);
+      BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+    } else {
+      __ lhu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      uc16 match_char = str[i];
+      int match_high_byte = (match_char >> 8);
+      if (match_high_byte == 0) {
+        BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+      } else {
+        if (match_high_byte != stored_high_byte) {
+          __ li(a2, Operand(match_high_byte));
+          stored_high_byte = match_high_byte;
+        }
+        __ Addu(a3, a2, Operand(match_char & 0xff));
+        BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
+      }
+    }
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  Label backtrack_non_equal;
+  __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+  __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(kPointerSize));
+  __ bind(&backtrack_non_equal);
+  BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  __ lw(a0, register_location(start_reg));  // Index of start of capture.
+  __ lw(a1, register_location(start_reg + 1));  // Index of end of capture.
+  __ Subu(a1, a1, a0);  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+    __ Addu(a1, a0, Operand(a1));
+
+    // a0 - Address of start of capture.
+    // a1 - Address of end of capture.
+    // a2 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+
+    __ Branch(&loop_check, eq, t0, Operand(a3));
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ Or(a3, a3, Operand(0x20));  // Convert capture character to lower-case.
+    __ Or(t0, t0, Operand(0x20));  // Also convert input character.
+    __ Branch(&fail, ne, t0, Operand(a3));
+    __ Subu(a3, a3, Operand('a'));
+    __ Branch(&fail, hi, a3, Operand('z' - 'a'));  // Is a3 a lowercase letter?
+
+    __ bind(&loop_check);
+    __ Branch(&loop, lt, a0, Operand(a1));
+    __ jmp(&success);
+
+    __ bind(&fail);
+    GoTo(on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ Subu(current_input_offset(), a2, end_of_input_address());
+  } else {
+    ASSERT(mode_ == UC16);
+    // Put regexp engine registers on stack.
+    RegList regexp_registers_to_retain = current_input_offset().bit() |
+        current_character().bit() | backtrack_stackpointer().bit();
+    __ MultiPush(regexp_registers_to_retain);
+
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, a2);
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   a0: Address byte_offset1 - Address captured substring's start.
+    //   a1: Address byte_offset2 - Address of current character position.
+    //   a2: size_t byte_length - length of capture in bytes(!).
+    //   a3: Isolate* isolate.
+
+    // Address of start of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    // Length of capture.
+    __ mov(a2, a1);
+    // Save length in callee-save register for use on return.
+    __ mov(s3, a1);
+    // Address of current input position.
+    __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address()));
+
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(function, argument_count);
+
+    // Restore regexp engine registers.
+    __ MultiPop(regexp_registers_to_retain);
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+    // Check if function returned non-zero for success or zero for failure.
+    BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+    // On success, increment position by length of capture.
+    __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+  }
+
+  __ bind(&fallthrough);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReference(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ lw(a0, register_location(start_reg));
+  __ lw(a1, register_location(start_reg + 1));
+  __ Subu(a1, a1, a0);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  // Compute pointers to match string and capture string.
+  __ Addu(a0, a0, Operand(end_of_input_address()));
+  __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+  __ Addu(a1, a1, Operand(a0));
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lhu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  }
+  BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
+  __ Branch(&loop, lt, a0, Operand(a1));
+
+  // Move current character position to position after match.
+  __ Subu(current_input_offset(), a2, end_of_input_address());
+  __ bind(&fallthrough);
 }
 
 
@@ -220,21 +450,23 @@
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
                                                 Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
                                                      uint32_t mask,
                                                      Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_equal, eq, a0, Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
                                                         uint32_t mask,
                                                         Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
 }
 
 
@@ -249,24 +481,360 @@
 
 bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
                                                          Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check.
+  switch (type) {
+  case 's':
+    // Match space-characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      Label success;
+      __ Branch(&success, eq, current_character(), Operand(' '));
+      // Check range 0x09..0x0d.
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9').
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+    return true;
+  case 'D':
+    // Match non ASCII-digits.
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+    }
+    return true;
+  }
+  case 'n': {
+    // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    if (mode_ == ASCII) {
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+    } else {
+      Label done;
+      BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+      __ bind(&done);
+    }
+    return true;
+  }
+  case 'w': {
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+    return true;
+  }
+  case 'W': {
+    Label done;
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      __ Branch(&done, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+    if (mode_ != ASCII) {
+      __ bind(&done);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    return true;
+  // No custom implementation (yet): s(UC16), S(UC16).
+  default:
+    return false;
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Fail() {
-  UNIMPLEMENTED_MIPS();
+  __ li(v0, Operand(FAILURE));
+  __ jmp(&exit_label_);
 }
 
 
 Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
-  UNIMPLEMENTED_MIPS();
-  return Handle<HeapObject>::null();
+  if (masm_->has_exception()) {
+    // If the code gets corrupted due to long regular expressions and lack of
+    // space on trampolines, an internal exception flag is set. If this case
+    // is detected, we will jump into exit sequence right away.
+    __ bind_to(&entry_label_, internal_failure_label_.pos());
+  } else {
+    // Finalize code - write the entry point code now we know how many
+    // registers we need.
+
+    // Entry code:
+    __ bind(&entry_label_);
+    // Push arguments
+    // Save callee-save registers.
+    // Start new stack frame.
+    // Store link register in existing stack-cell.
+    // Order here should correspond to order of offset constants in header file.
+    RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+        s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+    RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+    __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+    // Set frame pointer in space for it if this is not a direct call
+    // from generated code.
+    __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+    __ push(a0);  // Make room for "position - 1" constant (value irrelevant).
+    __ push(a0);  // Make room for "at start" constant (value irrelevant).
+
+    // Check if we have space on the stack for registers.
+    Label stack_limit_hit;
+    Label stack_ok;
+
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(masm_->isolate());
+    __ li(a0, Operand(stack_limit));
+    __ lw(a0, MemOperand(a0));
+    __ Subu(a0, sp, a0);
+    // Handle it if the stack pointer is already below the stack limit.
+    __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+    // Check if there is room for the variable number of registers above
+    // the stack limit.
+    __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+    // Exit with OutOfMemory exception. There is not enough space on the stack
+    // for our working registers.
+    __ li(v0, Operand(EXCEPTION));
+    __ jmp(&exit_label_);
+
+    __ bind(&stack_limit_hit);
+    CallCheckStackGuardState(a0);
+    // If returned value is non-zero, we exit with the returned value as result.
+    __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+    __ bind(&stack_ok);
+    // Allocate space on stack for registers.
+    __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
+    // Load string end.
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    // Load input start.
+    __ lw(a0, MemOperand(frame_pointer(), kInputStart));
+    // Find negative length (offset of start relative to end).
+    __ Subu(current_input_offset(), a0, end_of_input_address());
+    // Set a0 to address of char before start of the input string
+    // (effectively string position -1).
+    __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
+    __ Subu(a0, current_input_offset(), Operand(char_size()));
+    __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
+    __ Subu(a0, a0, t5);
+    // Store this value in a local variable, for use when clearing
+    // position registers.
+    __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+    // Determine whether the start index is zero, that is at the start of the
+    // string, and store that value in a local variable.
+    __ mov(t5, a1);
+    __ li(a1, Operand(1));
+    __ movn(a1, zero_reg, t5);
+    __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+
+    if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+      // Fill saved registers with initial value = start offset - 1.
+
+      // Address of register 0.
+      __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+      __ li(a2, Operand(num_saved_registers_));
+      Label init_loop;
+      __ bind(&init_loop);
+      __ sw(a0, MemOperand(a1));
+      __ Addu(a1, a1, Operand(-kPointerSize));
+      __ Subu(a2, a2, Operand(1));
+      __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+    }
+
+    // Initialize backtrack stack pointer.
+    __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+    // Initialize code pointer register
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    // Load previous char as initial value of current character register.
+    Label at_start;
+    __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+    __ Branch(&at_start, ne, a0, Operand(zero_reg));
+    LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+    __ jmp(&start_label_);
+    __ bind(&at_start);
+    __ li(current_character(), Operand('\n'));
+    __ jmp(&start_label_);
+
+
+    // Exit code:
+    if (success_label_.is_linked()) {
+      // Save captures when successful.
+      __ bind(&success_label_);
+      if (num_saved_registers_ > 0) {
+        // Copy captures to output.
+        __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+        __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
+        __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
+        __ Subu(a1, end_of_input_address(), a1);
+        // a1 is length of input in bytes.
+        if (mode_ == UC16) {
+          __ srl(a1, a1, 1);
+        }
+        // a1 is length of input in characters.
+        __ Addu(a1, a1, Operand(a2));
+        // a1 is length of string in characters.
+
+        ASSERT_EQ(0, num_saved_registers_ % 2);
+        // Always an even number of capture registers. This allows us to
+        // unroll the loop once to add an operation between a load of a register
+        // and the following use of that register.
+        for (int i = 0; i < num_saved_registers_; i += 2) {
+          __ lw(a2, register_location(i));
+          __ lw(a3, register_location(i + 1));
+          if (mode_ == UC16) {
+            __ sra(a2, a2, 1);
+            __ Addu(a2, a2, a1);
+            __ sra(a3, a3, 1);
+            __ Addu(a3, a3, a1);
+          } else {
+            __ Addu(a2, a1, Operand(a2));
+            __ Addu(a3, a1, Operand(a3));
+          }
+          __ sw(a2, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+          __ sw(a3, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+        }
+      }
+      __ li(v0, Operand(SUCCESS));
+    }
+    // Exit and return v0.
+    __ bind(&exit_label_);
+    // Skip sp past regexp registers and local variables..
+    __ mov(sp, frame_pointer());
+    // Restore registers s0..s7 and return (restoring ra to pc).
+    __ MultiPop(registers_to_retain | ra.bit());
+    __ Ret();
+
+    // Backtrack code (branch target for conditional backtracks).
+    if (backtrack_label_.is_linked()) {
+      __ bind(&backtrack_label_);
+      Backtrack();
+    }
+
+    Label exit_with_exception;
+
+    // Preempt-code.
+    if (check_preempt_label_.is_linked()) {
+      SafeCallTarget(&check_preempt_label_);
+      // Put regexp engine registers on stack.
+      RegList regexp_registers_to_retain = current_input_offset().bit() |
+          current_character().bit() | backtrack_stackpointer().bit();
+      __ MultiPush(regexp_registers_to_retain);
+      CallCheckStackGuardState(a0);
+      __ MultiPop(regexp_registers_to_retain);
+      // If returning non-zero, we should end execution with the given
+      // result as return value.
+      __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+      // String might have moved: Reload end of string from frame.
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      SafeReturn();
+    }
+
+    // Backtrack stack overflow code.
+    if (stack_overflow_label_.is_linked()) {
+      SafeCallTarget(&stack_overflow_label_);
+      // Reached if the backtrack-stack limit has been hit.
+      // Put regexp engine registers on stack first.
+      RegList regexp_registers = current_input_offset().bit() |
+          current_character().bit();
+      __ MultiPush(regexp_registers);
+      Label grow_failed;
+      // Call GrowStack(backtrack_stackpointer(), &stack_base)
+      static const int num_arguments = 3;
+      __ PrepareCallCFunction(num_arguments, a0);
+      __ mov(a0, backtrack_stackpointer());
+      __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
+      __ li(a2, Operand(ExternalReference::isolate_address()));
+      ExternalReference grow_stack =
+          ExternalReference::re_grow_stack(masm_->isolate());
+      __ CallCFunction(grow_stack, num_arguments);
+      // Restore regexp registers.
+      __ MultiPop(regexp_registers);
+      // If return NULL, we have failed to grow the stack, and
+      // must exit with a stack-overflow exception.
+      __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+      // Otherwise use return value as new stack pointer.
+      __ mov(backtrack_stackpointer(), v0);
+      // Restore saved registers and continue.
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      SafeReturn();
+    }
+
+    if (exit_with_exception.is_linked()) {
+      // If any of the code above needed to exit with an exception.
+      __ bind(&exit_with_exception);
+      // Exit with Result EXCEPTION(-1) to signal thrown exception.
+      __ li(v0, Operand(EXCEPTION));
+      __ jmp(&exit_label_);
+    }
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = FACTORY->NewCode(code_desc,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+  return Handle<HeapObject>::cast(code);
 }
 
 
 void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
-  UNIMPLEMENTED_MIPS();
+  if (to == NULL) {
+    Backtrack();
+    return;
+  }
+  __ jmp(to);
+  return;
 }
 
 
@@ -281,13 +849,15 @@
 void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
                                            int comparand,
                                            Label* if_lt) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
 }
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
                                               Label* if_eq) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
 }
 
 
@@ -301,23 +871,47 @@
                                                    Label* on_end_of_input,
                                                    bool check_bounds,
                                                    int characters) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
 
 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
-  UNIMPLEMENTED_MIPS();
+  Pop(current_input_offset());
 }
 
 
 void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
-  UNIMPLEMENTED_MIPS();
+  Pop(a0);
+  __ sw(a0, register_location(register_index));
 }
 
 
-
 void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    Label after_constant;
+    __ Branch(&after_constant);
+    int offset = masm_->pc_offset();
+    int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+    __ emit(0);
+    masm_->label_at_put(label, offset);
+    __ bind(&after_constant);
+    if (is_int16(cp_offset)) {
+      __ lw(a0, MemOperand(code_pointer(), cp_offset));
+    } else {
+      __ Addu(a0, code_pointer(), cp_offset);
+      __ lw(a0, MemOperand(a0, 0));
+    }
+  }
+  Push(a0);
+  CheckStackLimit();
 }
 
 
@@ -328,55 +922,90 @@
 
 void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
                                            StackCheckFlag check_stack_limit) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(register_index));
+  Push(a0);
+  if (check_stack_limit) CheckStackLimit();
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(current_input_offset(), register_location(reg));
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(backtrack_stackpointer(), register_location(reg));
+  __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
-  UNIMPLEMENTED_MIPS();
+  Label after_position;
+  __ Branch(&after_position,
+            ge,
+            current_input_offset(),
+            Operand(-by * char_size()));
+  __ li(current_input_offset(), -by * char_size());
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
 }
 
 
 void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ li(a0, Operand(to));
+  __ sw(a0, register_location(register_index));
 }
 
 
 void RegExpMacroAssemblerMIPS::Succeed() {
-  UNIMPLEMENTED_MIPS();
+  __ jmp(&success_label_);
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
                                                              int cp_offset) {
-  UNIMPLEMENTED_MIPS();
+  if (cp_offset == 0) {
+    __ sw(current_input_offset(), register_location(reg));
+  } else {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg_from <= reg_to);
+  __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Subu(a0, backtrack_stackpointer(), a1);
+  __ sw(a0, register_location(reg));
 }
 
 
 // Private methods:
 
 void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
-  UNIMPLEMENTED_MIPS();
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
+  __ mov(a2, frame_pointer());
+  // Code* of self.
+  __ li(a1, Operand(masm_->CodeObject()));
+  // a0 becomes return address pointer.
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(masm_->isolate());
+  CallCFunctionUsingStub(stack_guard_check, num_arguments);
 }
 
 
@@ -388,22 +1017,114 @@
 
 
 int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
-                                                  Code* re_code,
-                                                  Address re_frame) {
-  UNIMPLEMENTED_MIPS();
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+  ASSERT(isolate == Isolate::Current());
+  if (isolate->stack_guard()->IsStackOverflow()) {
+    isolate->StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
+  // Prepare for possible GC.
+  HandleScope handles(isolate);
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid.
+    int delta = code_handle->address() - re_code->address();
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
+  // String might have changed.
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = static_cast<int>(end_address - start_address);
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
   return 0;
 }
 
 
 MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
                                             Label* on_outside_input) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_outside_input,
+                    ge,
+                    current_input_offset(),
+                    Operand(-cp_offset * char_size()));
 }
 
 
@@ -411,61 +1132,126 @@
                                                  Condition condition,
                                                  Register rs,
                                                  const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ Branch(&backtrack_label_, condition, rs, rt);
+    return;
+  }
+  __ Branch(to, condition, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
                                            const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  __ BranchAndLink(to, cond, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeReturn() {
-  UNIMPLEMENTED_MIPS();
+  __ pop(ra);
+  __ Addu(t5, ra, Operand(masm_->CodeObject()));
+  __ Jump(t5);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(name);
+  __ Subu(ra, ra, Operand(masm_->CodeObject()));
+  __ push(ra);
 }
 
 
 void RegExpMacroAssemblerMIPS::Push(Register source) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!source.is(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(-kPointerSize));
+  __ sw(source, MemOperand(backtrack_stackpointer()));
 }
 
 
 void RegExpMacroAssemblerMIPS::Pop(Register target) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ lw(target, MemOperand(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPreemption() {
-  UNIMPLEMENTED_MIPS();
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm_->isolate());
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
-  UNIMPLEMENTED_MIPS();
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
     ExternalReference function,
     int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  // Must pass all arguments in registers. The stub pushes on the stack.
+  ASSERT(num_arguments <= 4);
+  __ li(code_pointer(), Operand(function));
+  RegExpCEntryStub stub;
+  __ CallStub(&stub);
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ lw(sp, MemOperand(sp, 16));
+  }
+  __ li(code_pointer(), Operand(masm_->CodeObject()));
 }
 
 
 void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
-                                                             int characters) {
-  UNIMPLEMENTED_MIPS();
+                                                            int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = a0;
+  }
+  // We assume that we cannot do unaligned loads on MIPS, so this function
+  // must only be used to load a single character at a time.
+  ASSERT(characters == 1);
+  __ Addu(t5, end_of_input_address(), Operand(offset));
+  if (mode_ == ASCII) {
+    __ lbu(current_character(), MemOperand(t5, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(current_character(), MemOperand(t5, 0));
+  }
 }
 
 
 void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  UNIMPLEMENTED_MIPS();
+  int stack_alignment = OS::ActivationFrameAlignment();
+  if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+  // Stack is already aligned for call, so decrement by alignment
+  // to make room for storing the return address.
+  __ Subu(sp, sp, Operand(stack_alignment));
+  __ sw(ra, MemOperand(sp, 0));
+  __ mov(a0, sp);
+  __ mov(t9, t1);
+  __ Call(t9);
+  __ lw(ra, MemOperand(sp, 0));
+  __ Addu(sp, sp, Operand(stack_alignment));
+  __ Jump(ra);
 }
 
 
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 7310c9d..d42d4cf 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,12 @@
 #ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
 #define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
 
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#include "macro-assembler.h"
+#include "code.h"
+#include "mips/macro-assembler-mips.h"
+
 namespace v8 {
 namespace internal {
 
@@ -112,6 +118,7 @@
   static int CheckStackGuardState(Address* return_address,
                                   Code* re_code,
                                   Address re_frame);
+
  private:
   // Offsets from frame_pointer() of function parameters and stored registers.
   static const int kFramePointer = 0;
@@ -121,10 +128,11 @@
   static const int kStoredRegisters = kFramePointer;
   // Return address (stored from link register, read into pc on return).
   static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
   // Stack frame header.
   static const int kStackFrameHeader = kReturnAddress + kPointerSize;
   // Stack parameters placed by caller.
-  static const int kRegisterOutput = kStackFrameHeader + 16;
+  static const int kRegisterOutput = kStackFrameHeader + 20;
   static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
   static const int kIsolate = kDirectCall + kPointerSize;
@@ -183,7 +191,7 @@
   // Register holding pointer to the current code object.
   inline Register code_pointer() { return t1; }
 
-  // Byte size of chars in the string to match (decided by the Mode argument)
+  // Byte size of chars in the string to match (decided by the Mode argument).
   inline int char_size() { return static_cast<int>(mode_); }
 
   // Equivalent to a conditional branch to the label, unless the label
@@ -228,7 +236,7 @@
   int num_registers_;
 
   // Number of registers to output at the end (the saved registers
-  // are always 0..num_saved_registers_-1)
+  // are always 0..num_saved_registers_-1).
   int num_saved_registers_;
 
   // Labels used internally.
@@ -239,6 +247,7 @@
   Label exit_label_;
   Label check_preempt_label_;
   Label stack_overflow_label_;
+  Label internal_failure_label_;
 };
 
 #endif  // V8_INTERPRETED_REGEXP
@@ -247,4 +256,3 @@
 }}  // namespace v8::internal
 
 #endif  // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index bbfb31d..0000000
--- a/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  // The code for this test relies on the order of register codes.
-  return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,    // zero_reg
-    1,    // at
-    2,    // v0
-    3,    // v1
-    4,    // a0
-    5,    // a1
-    6,    // a2
-    7,    // a3
-    8,    // t0
-    9,    // t1
-    10,   // t2
-    11,   // t3
-    12,   // t4
-    13,   // t5
-    14,   // t
-    15,   // t7
-    16,   // t8
-    17,   // t9
-    18,   // s0
-    19,   // s1
-    20,   // s2
-    21,   // s3
-    22,   // s4
-    23,   // s5
-    24,   // s6
-    25,   // s7
-    26,   // k0
-    27,   // k1
-    28,   // gp
-    29,   // sp
-    30,   // s8_fp
-    31,   // ra
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] = {
-    zero_reg,
-    at,
-    v0,
-    v1,
-    a0,
-    a1,
-    a2,
-    a3,
-    t0,
-    t1,
-    t2,
-    t3,
-    t4,
-    t5,
-    t6,
-    t7,
-    s0,
-    s1,
-    s2,
-    s3,
-    s4,
-    s5,
-    s6,
-    s7,
-    t8,
-    t9,
-    k0,
-    k1,
-    gp,
-    sp,
-    s8_fp,
-    ra
-  };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61b..0000000
--- a/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // No byte registers on MIPS.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
deleted file mode 100644
index c448923..0000000
--- a/src/mips/register-allocator-mips.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
-#include "mips/constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  // No registers are currently managed by the register allocator on MIPS.
-  static const int kNumRegisters = 0;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 50ad7a1..17c1897 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,9 +33,10 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
+#include "cpu.h"
 #include "disasm.h"
 #include "assembler.h"
-#include "globals.h"    // Need the BitCast
+#include "globals.h"    // Need the BitCast.
 #include "mips/constants-mips.h"
 #include "mips/simulator-mips.h"
 
@@ -46,7 +47,7 @@
 namespace v8 {
 namespace internal {
 
-// Utils functions
+// Utils functions.
 bool HaveSameSign(int32_t a, int32_t b) {
   return ((a ^ b) >= 0);
 }
@@ -126,20 +127,33 @@
 
 
 void MipsDebugger::Stop(Instruction* instr) {
-  UNIMPLEMENTED_MIPS();
-  char* str = reinterpret_cast<char*>(instr->InstructionBits());
-  if (strlen(str) > 0) {
+  // Get the stop code.
+  uint32_t code = instr->Bits(25, 6);
+  // Retrieve the encoded address, which comes just after this stop.
+  char** msg_address =
+    reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+  char* msg = *msg_address;
+  ASSERT(msg != NULL);
+
+  // Update this stop description.
+  if (!watched_stops[code].desc) {
+    watched_stops[code].desc = msg;
+  }
+
+  if (strlen(msg) > 0) {
     if (coverage_log != NULL) {
       fprintf(coverage_log, "%s\n", str);
       fflush(coverage_log);
     }
-    instr->SetInstructionBits(0x0);  // Overwrite with nop.
+    // Overwrite the instruction and address with nops.
+    instr->SetInstructionBits(kNopInstr);
+    reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
   }
-  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+  sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
 }
 
 
-#else  // ndef GENERATED_CODE_COVERAGE
+#else  // GENERATED_CODE_COVERAGE
 
 #define UNSUPPORTED() printf("Unsupported instruction.\n");
 
@@ -147,9 +161,17 @@
 
 
 void MipsDebugger::Stop(Instruction* instr) {
-  const char* str = reinterpret_cast<char*>(instr->InstructionBits());
-  PrintF("Simulator hit %s\n", str);
-  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+  // Get the stop code.
+  uint32_t code = instr->Bits(25, 6);
+  // Retrieve the encoded address, which comes just after this stop.
+  char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+      Instruction::kInstrSize);
+  // Update this stop description.
+  if (!sim_->watched_stops[code].desc) {
+    sim_->watched_stops[code].desc = msg;
+  }
+  PrintF("Simulator hit %s (%u)\n", msg, code);
+  sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
   Debug();
 }
 #endif  // GENERATED_CODE_COVERAGE
@@ -263,15 +285,15 @@
 #define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
 
   PrintF("\n");
-  // at, v0, a0
+  // at, v0, a0.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(1), REG_INFO(2), REG_INFO(4));
-  // v1, a1
+  // v1, a1.
   PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          "", REG_INFO(3), REG_INFO(5));
-  // a2
+  // a2.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
-  // a3
+  // a3.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
   PrintF("\n");
   // t0-t7, s0-s7
@@ -280,16 +302,16 @@
            REG_INFO(8+i), REG_INFO(16+i));
   }
   PrintF("\n");
-  // t8, k0, LO
+  // t8, k0, LO.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(24), REG_INFO(26), REG_INFO(32));
-  // t9, k1, HI
+  // t9, k1, HI.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(25), REG_INFO(27), REG_INFO(33));
-  // sp, fp, gp
+  // sp, fp, gp.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(29), REG_INFO(30), REG_INFO(28));
-  // pc
+  // pc.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(31), REG_INFO(34));
 
@@ -307,7 +329,7 @@
   PrintAllRegs();
 
   PrintF("\n\n");
-  // f0, f1, f2, ... f31
+  // f0, f1, f2, ... f31.
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
@@ -345,7 +367,7 @@
   char arg2[ARG_SIZE + 1];
   char* argv[3] = { cmd, arg1, arg2 };
 
-  // make sure to have a proper terminating character if reaching the limit
+  // Make sure to have a proper terminating character if reaching the limit.
   cmd[COMMAND_SIZE] = 0;
   arg1[ARG_SIZE] = 0;
   arg2[ARG_SIZE] = 0;
@@ -358,10 +380,10 @@
     if (last_pc != sim_->get_pc()) {
       disasm::NameConverter converter;
       disasm::Disassembler dasm(converter);
-      // use a reasonably large buffer
+      // Use a reasonably large buffer.
       v8::internal::EmbeddedVector<char, 256> buffer;
       dasm.InstructionDecode(buffer,
-                             reinterpret_cast<byte_*>(sim_->get_pc()));
+                             reinterpret_cast<byte*>(sim_->get_pc()));
       PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
       last_pc = sim_->get_pc();
     }
@@ -475,7 +497,7 @@
 
         if (strcmp(cmd, "stack") == 0) {
           cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
-        } else {  // "mem"
+        } else {  // Command "mem".
           int32_t value;
           if (!GetValue(arg1, &value)) {
             PrintF("%s unrecognized\n", arg1);
@@ -496,35 +518,62 @@
         end = cur + words;
 
         while (cur < end) {
-          PrintF("  0x%08x:  0x%08x %10d\n",
+          PrintF("  0x%08x:  0x%08x %10d",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          int value = *cur;
+          Heap* current_heap = v8::internal::Isolate::Current()->heap();
+          if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+            PrintF(" (");
+            if ((value & 1) == 0) {
+              PrintF("smi %d", value / 2);
+            } else {
+              obj->ShortPrint();
+            }
+            PrintF(")");
+          }
+          PrintF("\n");
           cur++;
         }
 
-      } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+      } else if ((strcmp(cmd, "disasm") == 0) ||
+                 (strcmp(cmd, "dpc") == 0) ||
+                 (strcmp(cmd, "di") == 0)) {
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
-          int32_t value;
-          if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
-            // no length parameter passed, assume 10 instructions
-            end = cur + (10 * Instruction::kInstrSize);
+          int regnum = Registers::Number(arg1);
+          if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+              // Disassemble 10 instructions at <arg1>.
+              end = cur + (10 * Instruction::kInstrSize);
+            }
+          } else {
+            // The argument is the number of instructions.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              end = cur + (value * Instruction::kInstrSize);
+            }
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -558,28 +607,87 @@
         }
       } else if (strcmp(cmd, "flags") == 0) {
         PrintF("No flags on MIPS !\n");
-      } else if (strcmp(cmd, "unstop") == 0) {
-          PrintF("Unstop command not implemented on MIPS.");
+      } else if (strcmp(cmd, "stop") == 0) {
+        int32_t value;
+        intptr_t stop_pc = sim_->get_pc() -
+            2 * Instruction::kInstrSize;
+        Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+        Instruction* msg_address =
+          reinterpret_cast<Instruction*>(stop_pc +
+              Instruction::kInstrSize);
+        if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+          // Remove the current stop.
+          if (sim_->IsStopInstruction(stop_instr)) {
+            stop_instr->SetInstructionBits(kNopInstr);
+            msg_address->SetInstructionBits(kNopInstr);
+          } else {
+            PrintF("Not at debugger stop.\n");
+          }
+        } else if (argc == 3) {
+          // Print information about all/the specified breakpoint(s).
+          if (strcmp(arg1, "info") == 0) {
+            if (strcmp(arg2, "all") == 0) {
+              PrintF("Stop information:\n");
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->PrintStopInfo(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->PrintStopInfo(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "enable") == 0) {
+            // Enable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->EnableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->EnableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "disable") == 0) {
+            // Disable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->DisableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->DisableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          }
+        } else {
+          PrintF("Wrong usage. Use help command for more information.\n");
+        }
       } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
-        // Print registers and disassemble
+        // Print registers and disassemble.
         PrintAllRegs();
         PrintF("\n");
 
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
+            cur = reinterpret_cast<byte*>(value);
             // no length parameter passed, assume 10 instructions
             end = cur + (10 * Instruction::kInstrSize);
           }
@@ -587,7 +695,7 @@
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -615,17 +723,36 @@
         PrintF("flags\n");
         PrintF("  print flags\n");
         PrintF("disasm [<instructions>]\n");
-        PrintF("disasm [[<address>] <instructions>]\n");
-        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
         PrintF("gdb\n");
         PrintF("  enter gdb\n");
         PrintF("break <address>\n");
         PrintF("  set a break point on the address\n");
         PrintF("del\n");
         PrintF("  delete the breakpoint\n");
-        PrintF("unstop\n");
-        PrintF("  ignore the stop instruction at the current location");
-        PrintF(" from now on\n");
+        PrintF("stop feature:\n");
+        PrintF("  Description:\n");
+        PrintF("    Stops are debug instructions inserted by\n");
+        PrintF("    the Assembler::stop() function.\n");
+        PrintF("    When hitting a stop, the Simulator will\n");
+        PrintF("    stop and and give control to the Debugger.\n");
+        PrintF("    All stop codes are watched:\n");
+        PrintF("    - They can be enabled / disabled: the Simulator\n");
+        PrintF("       will / won't stop when hitting them.\n");
+        PrintF("    - The Simulator keeps track of how many times they \n");
+        PrintF("      are met. (See the info command.) Going over a\n");
+        PrintF("      disabled stop still increases its counter. \n");
+        PrintF("  Commands:\n");
+        PrintF("    stop info all/<code> : print infos about number <code>\n");
+        PrintF("      or all stop(s).\n");
+        PrintF("    stop enable/disable all/<code> : enables / disables\n");
+        PrintF("      all or number <code> stop(s)\n");
+        PrintF("    stop unstop\n");
+        PrintF("      ignore the stop instruction at the current location\n");
+        PrintF("      from now on\n");
       } else {
         PrintF("Unknown command: %s\n", cmd);
       }
@@ -689,8 +816,8 @@
 
 CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
   v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
-                                                         ICacheHash(page),
-                                                         true);
+                                                        ICacheHash(page),
+                                                        true);
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -738,23 +865,23 @@
 }
 
 
-void Simulator::Initialize() {
-  if (Isolate::Current()->simulator_initialized()) return;
-  Isolate::Current()->set_simulator_initialized(true);
-  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
 }
 
 
-Simulator::Simulator() : isolate_(Isolate::Current()) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
     i_cache_ = new v8::internal::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
-  Initialize();
+  Initialize(isolate);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
-  stack_size_ = 1 * 1024*1024;  // allocate 1MB for stack
   stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
   icount_ = 0;
@@ -852,17 +979,14 @@
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
-      Isolate::CurrentPerIsolateThreadData();
-  if (isolate_data == NULL) {
-    Isolate::EnterDefaultIsolate();
-    isolate_data = Isolate::CurrentPerIsolateThreadData();
-  }
+       isolate->FindOrAllocatePerThreadDataForThisThread();
+  ASSERT(isolate_data != NULL);
   ASSERT(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
     // TODO(146): delete the simulator object when a thread/isolate goes away.
-    sim = new Simulator();
+    sim = new Simulator(isolate);
     isolate_data->set_simulator(sim);
   }
   return sim;
@@ -877,7 +1001,7 @@
     pc_modified_ = true;
   }
 
-  // zero register always hold 0.
+  // Zero register always holds 0.
   registers_[reg] = (reg == 0) ? 0 : value;
 }
 
@@ -937,6 +1061,87 @@
 }
 
 
+// For use in calls that take two double values, constructed either
+// from a0-a3 or f12 and f14.
+void Simulator::GetFpArgs(double* x, double* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_fpu_register_double(14);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Registers a2 and a3 -> y.
+    reg_buffer[0] = get_register(a2);
+    reg_buffer[1] = get_register(a3);
+    memcpy(y, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value, constructed either
+// from a0 and a1 or f12.
+void Simulator::GetFpArgs(double* x) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value constructed either
+// from a0 and a1 or f12 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_register(a2);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers 0 and 1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Register 2 -> y.
+    reg_buffer[0] = get_register(a2);
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+  if (!IsMipsSoftFloatABI) {
+    set_fpu_register_double(0, result);
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to v0 and v1.
+    set_register(v0, reg_buffer[0]);
+    set_register(v1, reg_buffer[1]);
+  }
+}
+
+
 // Helper functions for setting and testing the FCSR register's bits.
 void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
   if (value) {
@@ -955,15 +1160,30 @@
 // Sets the rounding error codes in FCSR based on the result of the rounding.
 // Returns true if the operation was invalid.
 bool Simulator::set_fcsr_round_error(double original, double rounded) {
-  if (!isfinite(original) ||
-      rounded > LONG_MAX ||
-      rounded < LONG_MIN) {
-    set_fcsr_bit(6, true);  // Invalid operation.
-    return true;
-  } else if (original != static_cast<double>(rounded)) {
-    set_fcsr_bit(2, true);  // Inexact.
+  bool ret = false;
+
+  if (!isfinite(original) || !isfinite(rounded)) {
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
   }
-  return false;
+
+  if (original != rounded) {
+    set_fcsr_bit(kFCSRInexactFlagBit, true);
+  }
+
+  if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+    set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+    ret = true;
+  }
+
+  if (rounded > INT_MAX || rounded < INT_MIN) {
+    set_fcsr_bit(kFCSROverflowFlagBit, true);
+    // The reference is not really clear but it seems this is required:
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
+  }
+
+  return ret;
 }
 
 
@@ -995,7 +1215,9 @@
 
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory read from bad address: 0x%08x, pc=0x%08x\n",
+           addr, reinterpret_cast<intptr_t>(instr));
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1003,8 +1225,9 @@
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
   return 0;
@@ -1013,7 +1236,9 @@
 
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory write to bad address: 0x%08x, pc=0x%08x\n",
+           addr, reinterpret_cast<intptr_t>(instr));
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1022,8 +1247,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
 }
@@ -1034,8 +1260,9 @@
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1047,8 +1274,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1058,8 +1286,9 @@
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1070,8 +1299,9 @@
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1083,8 +1313,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1095,8 +1326,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1158,6 +1390,14 @@
                                          int32_t arg2,
                                          int32_t arg3);
 
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+                                                                  int32_t arg1);
+
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime. They are also used for debugging with simulator.
 void Simulator::SoftwareInterrupt(Instruction* instr) {
@@ -1165,81 +1405,131 @@
   // the break_ instruction, or several variants of traps. All
   // Are "SPECIAL" class opcode, and are distinuished by function.
   int32_t func = instr->FunctionFieldRaw();
-  int32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+  uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
 
   // We first check if we met a call_rt_redirected.
   if (instr->InstructionBits() == rtCallRedirInstr) {
-    // Check if stack is aligned. Error if not aligned is reported below to
-    // include information on the function called.
-    bool stack_aligned =
-        (get_register(sp)
-         & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
     Redirection* redirection = Redirection::FromSwiInstruction(instr);
     int32_t arg0 = get_register(a0);
     int32_t arg1 = get_register(a1);
     int32_t arg2 = get_register(a2);
     int32_t arg3 = get_register(a3);
-    int32_t arg4 = 0;
-    int32_t arg5 = 0;
 
-    // Need to check if sp is valid before assigning arg4, arg5.
-    // This is a fix for cctest test-api/CatchStackOverflow which causes
-    // the stack to overflow. For some reason arm doesn't need this
-    // stack check here.
     int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
-    int32_t* stack = reinterpret_cast<int32_t*>(stack_);
-    if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
-      arg4 = stack_pointer[0];
-      arg5 = stack_pointer[1];
+    // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+    int32_t arg4 = stack_pointer[4];
+    int32_t arg5 = stack_pointer[5];
+
+    bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+    if (!IsMipsSoftFloatABI) {
+      // With the hard floating point calling convention, double
+      // arguments are passed in FPU registers. Fetch the arguments
+      // from there and call the builtin using soft floating point
+      // convention.
+      switch (redirection->type()) {
+      case ExternalReference::BUILTIN_FP_FP_CALL:
+      case ExternalReference::BUILTIN_COMPARE_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_fpu_register(f14);
+        arg3 = get_fpu_register(f15);
+        break;
+      case ExternalReference::BUILTIN_FP_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        break;
+      case ExternalReference::BUILTIN_FP_INT_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_register(a2);
+        break;
+      default:
+        break;
+      }
     }
+
     // This is dodgy but it works because the C entry stubs are never moved.
     // See comment in codegen-arm.cc and bug 1242173.
     int32_t saved_ra = get_register(ra);
 
     intptr_t external =
-        reinterpret_cast<int32_t>(redirection->external_function());
+          reinterpret_cast<intptr_t>(redirection->external_function());
 
     // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
     // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
     // simulator. Soft-float has additional abstraction of ExternalReference,
-    // to support serialization. Finally, when simulated on x86 host, the
-    // x86 softfloat routines are used, and this Redirection infrastructure
-    // lets simulated-mips make calls into x86 C code.
-    // When doing that, the 'double' return type must be handled differently
-    // than the usual int64_t return. The data is returned in different
-    // registers and cannot be cast from one type to the other. However, the
-    // calling arguments are passed the same way in both cases.
-    if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
+    // to support serialization.
+    if (fp_call) {
       SimulatorRuntimeFPCall target =
-          reinterpret_cast<SimulatorRuntimeFPCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
-        PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
-               FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
+                  reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        double dval0, dval1;
+        int32_t ival;
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            GetFpArgs(&dval0, &dval1);
+            PrintF("Call to host function at %p with args %f, %f",
+                FUNCTION_ADDR(target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            GetFpArgs(&dval0);
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(target), dval0);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            GetFpArgs(&dval0, &ival);
+            PrintF("Call to host function at %p with args %f, %d",
+                FUNCTION_ADDR(target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
         }
-        PrintF("\n");
       }
       double result = target(arg0, arg1, arg2, arg3);
-      // fp result -> registers v0 and v1.
-      int32_t gpreg_pair[2];
-      memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
-      set_register(v0, gpreg_pair[0]);
-      set_register(v1, gpreg_pair[1]);
+      if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+          SetFpResult(result);
+      } else {
+        int32_t gpreg_pair[2];
+        memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+        set_register(v0, gpreg_pair[0]);
+        set_register(v1, gpreg_pair[1]);
+      }
     } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
-      PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectApiCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x\n",
+               FUNCTION_ADDR(target), arg1);
+      }
+      v8::Handle<v8::Value> result = target(arg1);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
-      PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectGetterCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x %08x\n",
+               FUNCTION_ADDR(target), arg1, arg2);
+      }
+      v8::Handle<v8::Value> result = target(arg1, arg2);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else {
-      // Builtin call.
-      ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
       SimulatorRuntimeCall target =
-          reinterpret_cast<SimulatorRuntimeCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+                  reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
         PrintF(
-            "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
+            "Call to host function at %p "
+            "args %08x, %08x, %08x, %08x, %08x, %08x\n",
             FUNCTION_ADDR(target),
             arg0,
             arg1,
@@ -1247,12 +1537,7 @@
             arg3,
             arg4,
             arg5);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
-        }
-        PrintF("\n");
       }
-
       int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       set_register(v0, static_cast<int32_t>(result));
       set_register(v1, static_cast<int32_t>(result >> 32));
@@ -1263,14 +1548,13 @@
     set_register(ra, saved_ra);
     set_pc(get_register(ra));
 
-  } else if (func == BREAK && code >= 0 && code < 16) {
-    // First 16 break_ codes interpreted as debug markers.
-    MipsDebugger dbg(this);
-    ++break_count_;
-    PrintF("\n---- break %d marker: %3d  (instr count: %8d) ----------"
-           "----------------------------------",
-           code, break_count_, icount_);
-    dbg.PrintAllRegs();  // Print registers and continue running.
+  } else if (func == BREAK && code <= kMaxStopCode) {
+    if (IsWatchpoint(code)) {
+      PrintWatchpoint(code);
+    } else {
+      IncreaseStopCounter(code);
+      HandleStop(code, instr);
+    }
   } else {
     // All remaining break_ codes, and all traps are handled here.
     MipsDebugger dbg(this);
@@ -1279,6 +1563,99 @@
 }
 
 
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint32_t code) {
+  return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint32_t code) {
+  MipsDebugger dbg(this);
+  ++break_count_;
+  PrintF("\n---- break %d marker: %3d  (instr count: %8d) ----------"
+         "----------------------------------",
+         code, break_count_, icount_);
+  dbg.PrintAllRegs();  // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint32_t code, Instruction* instr) {
+  // Stop if it is enabled, otherwise go on jumping over the stop
+  // and the message address.
+  if (IsEnabledStop(code)) {
+    MipsDebugger dbg(this);
+    dbg.Stop(instr);
+  } else {
+    set_pc(get_pc() + 2 * Instruction::kInstrSize);
+  }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+  int32_t func = instr->FunctionFieldRaw();
+  uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+  return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint32_t code) {
+  ASSERT(code <= kMaxStopCode);
+  ASSERT(code > kMaxWatchpointCode);
+  return !(watched_stops[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+  if (!IsEnabledStop(code)) {
+    watched_stops[code].count &= ~kStopDisabledBit;
+  }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+  if (IsEnabledStop(code)) {
+    watched_stops[code].count |= kStopDisabledBit;
+  }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+  ASSERT(code <= kMaxStopCode);
+  if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+    PrintF("Stop counter for code %i has overflowed.\n"
+           "Enabling this code and reseting the counter to 0.\n", code);
+    watched_stops[code].count = 0;
+    EnableStop(code);
+  } else {
+    watched_stops[code].count++;
+  }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+  if (code <= kMaxWatchpointCode) {
+    PrintF("That is a watchpoint, not a stop.\n");
+    return;
+  } else if (code > kMaxStopCode) {
+    PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+    return;
+  }
+  const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+  int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+  // Don't print the state of unused breakpoints.
+  if (count != 0) {
+    if (watched_stops[code].desc) {
+      PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+             code, code, state, count, watched_stops[code].desc);
+    } else {
+      PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+             code, code, state, count);
+    }
+  }
+}
+
+
 void Simulator::SignalExceptions() {
   for (int i = 1; i < kNumExceptions; i++) {
     if (exceptions[i] != 0) {
@@ -1314,9 +1691,9 @@
   const int32_t  fs_reg = instr->FsValue();
 
 
-  // ---------- Configuration
+  // ---------- Configuration.
   switch (op) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // Handled in DecodeTypeImmed, should never come here.
           UNREACHABLE();
@@ -1365,7 +1742,7 @@
           } else {
             // Logical right-rotate of a word by a fixed number of bits. This
             // is special case of SRL instruction, added in MIPS32 Release 2.
-            // RS field is equal to 00001
+            // RS field is equal to 00001.
             alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
           }
           break;
@@ -1383,7 +1760,7 @@
           } else {
             // Logical right-rotate of a word by a variable number of bits.
             // This is special case od SRLV instruction, added in MIPS32
-            // Release 2. SA field is equal to 00001
+            // Release 2. SA field is equal to 00001.
             alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
           }
           break;
@@ -1402,10 +1779,6 @@
         case MULTU:
           u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
           break;
-        case DIV:
-        case DIVU:
-            exceptions[kDivideByZero] = rt == 0;
-          break;
         case ADD:
           if (HaveSameSign(rs, rt)) {
             if (rs > 0) {
@@ -1450,7 +1823,7 @@
         case SLTU:
           alu_out = rs_u < rt_u ? 1 : 0;
           break;
-        // Break and trap instructions
+        // Break and trap instructions.
         case BREAK:
 
           do_interrupt = true;
@@ -1478,6 +1851,10 @@
         case MOVCI:
           // No action taken on decode.
           break;
+        case DIV:
+        case DIVU:
+          // div and divu never raise exceptions.
+          break;
         default:
           UNREACHABLE();
       };
@@ -1497,7 +1874,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of insert.
+          // Interpret rd field as 5-bit msb of insert.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of insert.
           uint16_t lsb = sa;
@@ -1507,7 +1884,7 @@
           break;
         }
         case EXT: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of extract.
+          // Interpret rd field as 5-bit msb of extract.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of extract.
           uint16_t lsb = sa;
@@ -1543,7 +1920,7 @@
   int64_t  i64hilo = 0;
   uint64_t u64hilo = 0;
 
-  // ALU output
+  // ALU output.
   // It should not be used as is. Instructions using it should always
   // initialize it first.
   int32_t alu_out = 0x12345678;
@@ -1551,7 +1928,7 @@
   // For break and trap instructions.
   bool do_interrupt = false;
 
-  // For jr and jalr
+  // For jr and jalr.
   // Get current pc.
   int32_t current_pc = get_pc();
   // Next pc
@@ -1568,11 +1945,11 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
     case COP1:
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           UNREACHABLE();
           break;
         case CFC1:
@@ -1688,9 +2065,10 @@
               break;
             case TRUNC_W_D:  // Truncate double to word (round towards 0).
               {
-                int32_t result = static_cast<int32_t>(fs);
+                double rounded = trunc(fs);
+                int32_t result = static_cast<int32_t>(rounded);
                 set_fpu_register(fd_reg, result);
-                if (set_fcsr_round_error(fs, static_cast<double>(result))) {
+                if (set_fcsr_round_error(fs, rounded)) {
                   set_fpu_register(fd_reg, kFPUInvalidResult);
                 }
               }
@@ -1718,16 +2096,20 @@
             case CVT_S_D:  // Convert double to float (single).
               set_fpu_register_float(fd_reg, static_cast<float>(fs));
               break;
-            case CVT_L_D:  // Mips32r2: Truncate double to 64-bit long-word.
-              i64 = static_cast<int64_t>(fs);
+            case CVT_L_D: {  // Mips32r2: Truncate double to 64-bit long-word.
+              double rounded = trunc(fs);
+              i64 = static_cast<int64_t>(rounded);
               set_fpu_register(fd_reg, i64 & 0xffffffff);
               set_fpu_register(fd_reg + 1, i64 >> 32);
               break;
-            case TRUNC_L_D:  // Mips32r2 instruction.
-              i64 = static_cast<int64_t>(fs);
+            }
+            case TRUNC_L_D: {  // Mips32r2 instruction.
+              double rounded = trunc(fs);
+              i64 = static_cast<int64_t>(rounded);
               set_fpu_register(fd_reg, i64 & 0xffffffff);
               set_fpu_register(fd_reg + 1, i64 >> 32);
               break;
+            }
             case ROUND_L_D: {  // Mips32r2 instruction.
               double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
               i64 = static_cast<int64_t>(rounded);
@@ -1802,7 +2184,7 @@
           Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
               current_pc+Instruction::kInstrSize);
           BranchDelayInstructionDecode(branch_delay_instr);
-          set_register(31, current_pc + 2* Instruction::kInstrSize);
+          set_register(31, current_pc + 2 * Instruction::kInstrSize);
           set_pc(next_pc);
           pc_modified_ = true;
           break;
@@ -1817,13 +2199,19 @@
           set_register(HI, static_cast<int32_t>(u64hilo >> 32));
           break;
         case DIV:
-          // Divide by zero was checked in the configuration step.
-          set_register(LO, rs / rt);
-          set_register(HI, rs % rt);
+          // Divide by zero was not checked in the configuration step - div and
+          // divu do not raise exceptions. On division by 0, the result will
+          // be UNPREDICTABLE.
+          if (rt != 0) {
+            set_register(LO, rs / rt);
+            set_register(HI, rs % rt);
+          }
           break;
         case DIVU:
-          set_register(LO, rs_u / rt_u);
-          set_register(HI, rs_u % rt_u);
+          if (rt_u != 0) {
+            set_register(LO, rs_u / rt_u);
+            set_register(HI, rs_u % rt_u);
+          }
           break;
         // Break and trap instructions.
         case BREAK:
@@ -1842,9 +2230,9 @@
           if (rt) set_register(rd_reg, rs);
           break;
         case MOVCI: {
-          uint32_t cc = instr->FCccValue();
+          uint32_t cc = instr->FBccValue();
           uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
-          if (instr->Bit(16)) {  // Read Tf bit
+          if (instr->Bit(16)) {  // Read Tf bit.
             if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
           } else {
             if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
@@ -1893,17 +2281,17 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
   int32_t  rs     = get_register(instr->RsValue());
   uint32_t rs_u   = static_cast<uint32_t>(rs);
-  int32_t  rt_reg = instr->RtValue();  // destination register
+  int32_t  rt_reg = instr->RtValue();  // Destination register.
   int32_t  rt     = get_register(rt_reg);
   int16_t  imm16  = instr->Imm16Value();
 
-  int32_t  ft_reg = instr->FtValue();  // destination register
+  int32_t  ft_reg = instr->FtValue();  // Destination register.
 
   // Zero extended immediate.
   uint32_t  oe_imm16 = 0xffff & imm16;
@@ -1927,10 +2315,10 @@
 
   // Used for memory instructions.
   int32_t addr = 0x0;
-  // Value to be written in memory
+  // Value to be written in memory.
   uint32_t mem_value = 0x0;
 
-  // ---------- Configuration (and execution for REGIMM)
+  // ---------- Configuration (and execution for REGIMM).
   switch (op) {
     // ------------- COP1. Coprocessor instructions.
     case COP1:
@@ -1941,7 +2329,7 @@
           cc_value = test_fcsr_bit(fcsr_cc);
           do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
           } else {
@@ -1952,7 +2340,7 @@
           UNREACHABLE();
       };
       break;
-    // ------------- REGIMM class
+    // ------------- REGIMM class.
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
@@ -1977,7 +2365,7 @@
         case BGEZAL:
           // Branch instructions common part.
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
             if (instr->IsLinkingInstruction()) {
@@ -1989,8 +2377,8 @@
         default:
           break;
         };
-    break;  // case REGIMM
-    // ------------- Branch instructions
+    break;  // case REGIMM.
+    // ------------- Branch instructions.
     // When comparing to zero, the encoding of rt field is always 0, so we don't
     // need to replace rt with zero.
     case BEQ:
@@ -2005,7 +2393,7 @@
     case BGTZ:
       do_branch = rs  > 0;
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
       if (HaveSameSign(rs, se_imm16)) {
         if (rs > 0) {
@@ -2038,7 +2426,7 @@
     case LUI:
         alu_out = (oe_imm16 << 16);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
       addr = rs + se_imm16;
       alu_out = ReadB(addr);
@@ -2048,7 +2436,7 @@
       alu_out = ReadH(addr, instr);
       break;
     case LWL: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
@@ -2071,7 +2459,7 @@
       alu_out = ReadHU(addr, instr);
       break;
     case LWR: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
@@ -2126,16 +2514,16 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
-    // ------------- Branch instructions
+    // ------------- Branch instructions.
     case BEQ:
     case BNE:
     case BLEZ:
     case BGTZ:
       // Branch instructions common part.
       execute_branch_delay_instruction = true;
-      // Set next_pc
+      // Set next_pc.
       if (do_branch) {
         next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
         if (instr->IsLinkingInstruction()) {
@@ -2145,7 +2533,7 @@
         next_pc = current_pc + 2 * Instruction::kInstrSize;
       }
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
     case ADDIU:
     case SLTI:
@@ -2156,7 +2544,7 @@
     case LUI:
       set_register(rt_reg, alu_out);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
     case LH:
     case LWL:
@@ -2216,26 +2604,26 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
   // Get unchanged bits of pc.
   int32_t pc_high_bits = current_pc & 0xf0000000;
-  // Next pc
+  // Next pc.
   int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
 
-  // Execute branch delay slot
+  // Execute branch delay slot.
   // We don't check for end_sim_pc. First it should not be met as the current pc
   // is valid. Secondly a jump should always execute its branch delay slot.
   Instruction* branch_delay_instr =
-    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+      reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
   BranchDelayInstructionDecode(branch_delay_instr);
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
   if (instr->IsLinkingInstruction()) {
-    set_register(31, current_pc + 2* Instruction::kInstrSize);
+    set_register(31, current_pc + 2 * Instruction::kInstrSize);
   }
   set_pc(next_pc);
   pc_modified_ = true;
@@ -2251,11 +2639,11 @@
   if (::v8::internal::FLAG_trace_sim) {
     disasm::NameConverter converter;
     disasm::Disassembler dasm(converter);
-    // use a reasonably large buffer
+    // Use a reasonably large buffer.
     v8::internal::EmbeddedVector<char, 256> buffer;
-    dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
     PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr),
-           buffer.start());
+        buffer.start());
   }
 
   switch (instr->InstructionType()) {
@@ -2310,10 +2698,10 @@
 }
 
 
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Setup arguments
+  // Setup arguments.
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -2333,12 +2721,12 @@
   // Store remaining arguments on stack, from low to high memory.
   intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
   for (int i = 4; i < argument_count; i++) {
-    stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
+    stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
   }
   va_end(parameters);
   set_register(sp, entry_stack);
 
-  // Prepare to execute the code at entry
+  // Prepare to execute the code at entry.
   set_register(pc, reinterpret_cast<int32_t>(entry));
   // Put down marker for end of simulation. The simulator will stop simulation
   // when the PC reaches this value. By saving the "end simulation" value into
@@ -2374,7 +2762,7 @@
   set_register(gp, callee_saved_value);
   set_register(fp, callee_saved_value);
 
-  // Start the simulation
+  // Start the simulation.
   Execute();
 
   // Check that the callee-saved registers have been preserved.
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 0cd9bbe..69dddfa 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,14 +50,15 @@
   entry(p0, p1, p2, p3, p4)
 
 typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
-                                  void*, int*, Address, int, Isolate*);
+                                   void*, int*, Address, int, Isolate*);
+
 
 // Call the generated regexp code directly. The code at the entry address
 // should act as a function matching the type arm_regexp_matcher.
 // The fifth argument is a dummy that reserves the space used for
 // the return address added by the ExitFrame in native calls.
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  (FUNCTION_CAST<mips_regexp_matcher>(entry)(                             \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
       p0, p1, p2, p3, NULL, p4, p5, p6, p7))
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -68,7 +69,8 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
     return c_limit;
   }
 
@@ -95,6 +97,7 @@
 // Running with a simulator.
 
 #include "hashmap.h"
+#include "assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -151,7 +154,7 @@
     sp,
     s8,
     ra,
-    // LO, HI, and pc
+    // LO, HI, and pc.
     LO,
     HI,
     pc,   // pc must be the last register.
@@ -164,13 +167,13 @@
   // Generated code will always use doubles. So we will only use even registers.
   enum FPURegister {
     f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
-    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters.
     f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
     f26, f27, f28, f29, f30, f31,
     kNumFPURegisters
   };
 
-  Simulator();
+  explicit Simulator(Isolate* isolate);
   ~Simulator();
 
   // The currently executing Simulator instance. Potentially there can be one
@@ -182,7 +185,7 @@
   // instruction.
   void set_register(int reg, int32_t value);
   int32_t get_register(int reg) const;
-  // Same for FPURegisters
+  // Same for FPURegisters.
   void set_fpu_register(int fpureg, int32_t value);
   void set_fpu_register_float(int fpureg, float value);
   void set_fpu_register_double(int fpureg, double value);
@@ -205,7 +208,7 @@
   void Execute();
 
   // Call on program start.
-  static void Initialize();
+  static void Initialize(Isolate* isolate);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -286,6 +289,18 @@
   // Used for breakpoints and traps.
   void SoftwareInterrupt(Instruction* instr);
 
+  // Stop helper functions.
+  bool IsWatchpoint(uint32_t code);
+  void PrintWatchpoint(uint32_t code);
+  void HandleStop(uint32_t code, Instruction* instr);
+  bool IsStopInstruction(Instruction* instr);
+  bool IsEnabledStop(uint32_t code);
+  void EnableStop(uint32_t code);
+  void DisableStop(uint32_t code);
+  void IncreaseStopCounter(uint32_t code);
+  void PrintStopInfo(uint32_t code);
+
+
   // Executes one instruction.
   void InstructionDecode(Instruction* instr);
   // Execute one instruction placed in a branch delay slot.
@@ -304,7 +319,6 @@
                            int size);
   static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
 
-
   enum Exception {
     none,
     kIntegerOverflow,
@@ -321,9 +335,12 @@
   static void* RedirectExternalReference(void* external_function,
                                          ExternalReference::Type type);
 
-  // Used for real time calls that takes two double values as arguments and
-  // returns a double.
-  void SetFpResult(double result);
+  // For use in calls that take double value arguments.
+  void GetFpArgs(double* x, double* y);
+  void GetFpArgs(double* x);
+  void GetFpArgs(double* x, int32_t* y);
+  void SetFpResult(const double& result);
+
 
   // Architecture state.
   // Registers.
@@ -334,35 +351,49 @@
   uint32_t FCSR_;
 
   // Simulator support.
+  // Allocate 1MB for stack.
+  static const size_t stack_size_ = 1 * 1024*1024;
   char* stack_;
-  size_t stack_size_;
   bool pc_modified_;
   int icount_;
   int break_count_;
 
-  // Icache simulation
+  // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
+  v8::internal::Isolate* isolate_;
+
   // Registered breakpoints.
   Instruction* break_pc_;
   Instr break_instr_;
 
-  v8::internal::Isolate* isolate_;
+  // Stop is disabled if bit 31 is set.
+  static const uint32_t kStopDisabledBit = 1 << 31;
+
+  // A stop is enabled, meaning the simulator will stop when meeting the
+  // instruction, if bit 31 of watched_stops[code].count is unset.
+  // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+  // the breakpoint was hit or gone through.
+  struct StopCountAndDesc {
+    uint32_t count;
+    char* desc;
+  };
+  StopCountAndDesc watched_stops[kMaxStopCode + 1];
 };
 
 
 // When running with the simulator transition into simulated execution at this
 // point.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+    reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
       FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
 
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  Simulator::current(Isolate::Current())->Call( \
-      entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+    Simulator::current(Isolate::Current())->Call( \
+        entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
 
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
-  try_catch_address == NULL ? \
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address)                              \
+  try_catch_address == NULL ?                                                  \
       NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
 
 
@@ -373,8 +404,9 @@
 // trouble down the line.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return Simulator::current(Isolate::Current())->StackLimit();
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
@@ -391,4 +423,3 @@
 
 #endif  // !defined(USE_SIMULATOR)
 #endif  // V8_MIPS_SIMULATOR_MIPS_H_
-
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 1a49558..5b94973 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -39,6 +39,124 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void ProbeTable(Isolate* isolate,
+                       MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset,
+                       Register scratch,
+                       Register scratch2) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+
+  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+
+  // Check the relative positions of the address fields.
+  ASSERT(value_off_addr > key_off_addr);
+  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register offsets_base_addr = scratch;
+
+  // Check that the key in the entry matches the name.
+  __ li(offsets_base_addr, Operand(key_offset));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ Branch(&miss, ne, name, Operand(scratch2));
+
+  // Get the code entry from the cache.
+  __ Addu(offsets_base_addr, offsets_base_addr,
+         Operand(value_off_addr - key_off_addr));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+
+  // Check that the flags match what we're looking for.
+  __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+  __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, scratch2, Operand(flags));
+
+  // Re-load code entry from cache.
+  __ sll(offset, offset, 1);
+  __ addu(offset, offset, offsets_base_addr);
+  __ lw(offset, MemOperand(offset));
+
+  // Jump to the first instruction in the code stub.
+  __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
+  ASSERT(name->IsSymbol());
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, at, Operand(zero_reg));
+
+
+  // Check that receiver is a JSObject.
+  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ Branch(miss_label, ne, map, Operand(tmp));
+
+  // Restore the temporarily used register.
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
+
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
+}
+
+
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -46,20 +164,96 @@
                               Register scratch,
                               Register extra,
                               Register extra2) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+  ASSERT(!extra.is(receiver));
+  ASSERT(!extra.is(name));
+  ASSERT(!extra.is(scratch));
+  ASSERT(!extra2.is(receiver));
+  ASSERT(!extra2.is(name));
+  ASSERT(!extra2.is(scratch));
+  ASSERT(!extra2.is(extra));
+
+  // Check scratch, extra and extra2 registers are valid.
+  ASSERT(!scratch.is(no_reg));
+  ASSERT(!extra.is(no_reg));
+  ASSERT(!extra2.is(no_reg));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss, t0);
+
+  // Get the map of the receiver and compute the hash.
+  __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+  __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, Operand(t8));
+  __ Xor(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ Subu(scratch, scratch, Operand(name));
+  __ Addu(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
 }
 
 
 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                        int index,
                                                        Register prototype) {
-  UNIMPLEMENTED_MIPS();
+  // Load the global or builtins object from the current context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ lw(prototype,
+         FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ lw(prototype,
+         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
     MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  // Check we're still in the same context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ASSERT(!prototype.is(at));
+  __ li(at, isolate->global());
+  __ Branch(miss, ne, prototype, Operand(at));
+  // Get the global function with the given index.
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
+  // Load its initial map. The global functions all have initial maps.
+  __ li(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
@@ -69,7 +263,18 @@
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
                                             Register dst, Register src,
                                             JSObject* holder, int index) {
-  UNIMPLEMENTED_MIPS();
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ lw(dst, FieldMemOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+    __ lw(dst, FieldMemOperand(dst, offset));
+  }
 }
 
 
@@ -77,7 +282,41 @@
                                            Register receiver,
                                            Register scratch,
                                            Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Load length directly from the JS array.
+  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Ret();
+}
+
+
+// Generate code to check if an object is a string.  If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, smi, t0);
+
+  // Check that the object is a string.
+  __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ And(scratch2, scratch1, Operand(kIsNotStringMask));
+  // The cast is to resolve the overload for the argument of 0x0.
+  __ Branch(non_string_object,
+            ne,
+            scratch2,
+            Operand(static_cast<int32_t>(kStringTag)));
 }
 
 
@@ -91,7 +330,28 @@
                                             Register scratch2,
                                             Label* miss,
                                             bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  Label check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch1 register.
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
+
+  // Load length directly from the string.
+  __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
+  __ Ret();
+
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+
+    // Unwrap the value and check if the wrapped value is a string.
+    __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+    __ Ret();
+  }
 }
 
 
@@ -100,7 +360,9 @@
                                                  Register scratch1,
                                                  Register scratch2,
                                                  Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(v0, scratch1);
+  __ Ret();
 }
 
 
@@ -115,50 +377,416 @@
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // a0 : value.
+  Label exit;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+  // Check that the map of the receiver hasn't changed.
+  __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ li(a2, Operand(Handle<Map>(transition)));
+    __ Push(a2, a0);
+    __ TailCallExternalReference(
+           ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                             masm->isolate()),
+           3, 1);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ li(t0, Operand(Handle<Map>(transition)));
+    __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ sw(a0, FieldMemOperand(receiver_reg, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit, scratch);
+
+    // Update the write barrier for the array address.
+    // Pass the now unused name_reg as a scratch register.
+    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array.
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ sw(a0, FieldMemOperand(scratch, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit);
+
+    // Update the write barrier for the array address.
+    // Ok to clobber receiver_reg and name_reg, since we return.
+    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+  }
+
+  // Return the value (register v0).
+  __ bind(&exit);
+  __ mov(v0, a0);
+  __ Ret();
 }
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
+static void GenerateCallFunction(MacroAssembler* masm,
+                                 Object* object,
+                                 const ParameterCount& arguments,
+                                 Label* miss,
+                                 Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a0: receiver
+  //  -- a1: function to call
+  // -----------------------------------
+  // Check that the function really is a function.
+  __ JumpIfSmi(a1, miss);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
+  }
+
+  // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Register name,
+                                     JSObject* holder_obj) {
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Register scratch = name;
+  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ Push(scratch, receiver, holder);
+  __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+  __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+          masm->isolate());
+  __ li(a0, Operand(5));
+  __ li(a1, Operand(ref));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+static const int kFastApiCallArguments = 3;
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+                                       Register scratch) {
+  ASSERT(Smi::FromInt(0) == 0);
+  for (int i = 0; i < kFastApiCallArguments; i++) {
+    __ push(zero_reg);
+  }
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+  __ Drop(kFastApiCallArguments);
+}
+
+
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]              : holder (set by CheckPrototypes)
+  //  -- sp[4]              : callee js function
+  //  -- sp[8]              : call data
+  //  -- sp[12]             : last js argument
+  //  -- ...
+  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 4) * 4] : receiver
+  // -----------------------------------
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ li(t1, Operand(Handle<JSFunction>(function)));
+  __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ li(a0, api_call_info_handle);
+    __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+  } else {
+    __ li(t2, Operand(Handle<Object>(call_data)));
+  }
+
+  // Store js function and call data.
+  __ sw(t1, MemOperand(sp, 1 * kPointerSize));
+  __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+
+  // a2 points to call data as expected by Arguments
+  // (refer to layout above).
+  __ Addu(a2, sp, Operand(2 * kPointerSize));
+
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
+  const int kApiStackSpace = 4;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  // a1 = v8::Arguments&
+  // Arguments is built at sp + 1 (sp is a reserved spot for ra).
+  __ Addu(a1, sp, kPointerSize);
+
+  // v8::Arguments::implicit_args = data
+  __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+  // v8::Arguments::values = last argument
+  __ Addu(t0, a2, Operand(argc * kPointerSize));
+  __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+  // v8::Arguments::length_ = argc
+  __ li(t0, Operand(argc));
+  __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+  // v8::Arguments::is_construct_call = 0
+  __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_API_CALL,
+                        masm->isolate());
+  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+}
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
-  void Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  void CompileCacheable(MacroAssembler* masm,
+  MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
                        Register scratch3,
-                       JSObject* interceptor_holder,
-                       LookupResult* lookup,
-                       String* name,
-                       const CallOptimization& optimization,
-                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+                       Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, miss);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call()) {
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();
+    }
+  }
+
+ private:
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
+    ASSERT(optimization.is_constant_call());
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
+    Counters* counters = masm->isolate()->counters();
+
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
+      }
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
+    }
+
+    __ IncrementCounter(counters->call_const_interceptor(), 1,
+                      scratch1, scratch2);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
+                          scratch1, scratch2);
+      ReserveSpaceForFastApiCall(masm, scratch1);
+    }
+
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+      stub_compiler_->CheckPrototypes(object, receiver,
+                                      interceptor_holder, scratch1,
+                                      scratch2, scratch3, name, depth1, miss);
+
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
+                        &regular_invoke);
+
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
+
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    if (interceptor_holder != lookup->holder()) {
+      stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
+    } else {
+      // CheckPrototypes has a side effect of fetching a 'holder'
+      // for API (object which is instanceof for the signature).  It's
+      // safe to omit it here, as if present, it should be fetched
+      // by the previous CheckPrototypes.
+      ASSERT(depth2 == kInvalidProtoDepth);
+    }
+
+    // Invoke function.
+    if (can_do_fast_api_call) {
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
+    } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION, call_kind);
+    }
+
+    // Deferred code for fast API call case---clean preallocated space.
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm);
+      __ Branch(miss_label);
+    }
+
+    // Invoke a regular function.
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm);
+    }
+
+    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -170,7 +798,31 @@
                       String* name,
                       JSObject* interceptor_holder,
                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
+
+    // Call a runtime function to load the interceptor property.
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
+
+    __ CallExternalReference(
+          ExternalReference(
+              IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+              masm->isolate()),
+          5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -179,15 +831,201 @@
                            JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    UNIMPLEMENTED_MIPS();
+    __ EnterInternalFrame();
+
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
+    // If interceptor returns no-result sentinel, call the constant function.
+    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
   }
 
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ li(scratch, Operand(Handle<Object>(cell)));
+  __ lw(scratch,
+        FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  __ Branch(miss, ne, scratch, Operand(at));
+  return cell;
+}
+
+
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If FPU is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+                            Register dst,
+                            Register wordoffset,
+                            Register ival,
+                            Register fval,
+                            Register scratch1,
+                            Register scratch2) {
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(ival, f0);
+    __ cvt_s_w(f0, f0);
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ swc1(f0, MemOperand(scratch1, 0));
+  } else {
+    // FPU is not available,  do manual conversions.
+
+    Label not_special, done;
+    // Move sign bit from source to destination.  This works because the sign
+    // bit in the exponent word of the double has the same position and polarity
+    // as the 2's complement sign bit in a Smi.
+    ASSERT(kBinary32SignMask == 0x80000000u);
+
+    __ And(fval, ival, Operand(kBinary32SignMask));
+    // Negate value if it is negative.
+    __ subu(scratch1, zero_reg, ival);
+    __ movn(ival, scratch1, fval);
+
+    // We have -1, 0 or 1, which we treat specially. Register ival contains
+    // absolute value: it is either equal to 1 (special case of -1 and 1),
+    // greater than 1 (not a special case) or less than 1 (special case of 0).
+    __ Branch(&not_special, gt, ival, Operand(1));
+
+    // For 1 or -1 we need to or in the 0 exponent (biased).
+    static const uint32_t exponent_word_for_1 =
+        kBinary32ExponentBias << kBinary32ExponentShift;
+
+    __ Xor(scratch1, ival, Operand(1));
+    __ li(scratch2, exponent_word_for_1);
+    __ or_(scratch2, fval, scratch2);
+    __ movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
+    __ Branch(&done);
+
+    __ bind(&not_special);
+    // Count leading zeros.
+    // Gets the wrong answer for 0, but we already checked for that case above.
+    Register zeros = scratch2;
+    __ clz(zeros, ival);
+
+    // Compute exponent and or it into the exponent register.
+    __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
+    __ subu(scratch1, scratch1, zeros);
+
+    __ sll(scratch1, scratch1, kBinary32ExponentShift);
+    __ or_(fval, fval, scratch1);
+
+    // Shift up the source chopping the top bit off.
+    __ Addu(zeros, zeros, Operand(1));
+    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+    __ sllv(ival, ival, zeros);
+    // And the top (top 20 bits).
+    __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
+    __ or_(fval, fval, scratch1);
+
+    __ bind(&done);
+
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ sw(fval, MemOperand(scratch1, 0));
+  }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
+  if (mantissa_shift_for_hi_word > 0) {
+    __ sll(loword, hiword, mantissa_shift_for_lo_word);
+    __ srl(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  } else {
+    __ mov(loword, zero_reg);
+    __ sll(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ li(scratch, 1 << HeapNumber::kExponentShift);
+    __ nor(scratch, scratch, scratch);
+    __ and_(hiword, hiword, scratch);
+  }
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -201,8 +1039,132 @@
                                        String* name,
                                        int save_at_depth,
                                        Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
+  // Make sure there's no overlap between holder and object registers.
+  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+         && !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    __ sw(reg, MemOperand(sp));
+  }
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    if (!current->HasFastProperties() &&
+        !current->IsJSGlobalObject() &&
+        !current->IsJSGlobalProxy()) {
+      if (!name->IsSymbol()) {
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
+      }
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
+             StringDictionary::kNotFound);
+
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        // Restore scratch register to be the map of the object.  In the
+        // new space case below, we load the prototype from the map in
+        // the scratch register.
+        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now the object is in holder_reg.
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ li(reg, Operand(Handle<JSObject>(prototype)));
+    }
+
+    if (save_at_depth == depth) {
+      __ sw(reg, MemOperand(sp));
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+  }
+
+  // Check the holder map.
+  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+  // Log the check depth.
+  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  };
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
+
+  // Return the register containing the holder.
+  return reg;
 }
 
 
@@ -215,7 +1177,16 @@
                                      int index,
                                      String* name,
                                      Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch1, receiver, Operand(kSmiTagMask));
+  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
+  GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+  __ Ret();
 }
 
 
@@ -228,7 +1199,17 @@
                                         Object* value,
                                         String* name,
                                         Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, scratch3, name, miss);
+
+  // Return the constant value.
+  __ li(v0, Operand(Handle<Object>(value)));
+  __ Ret();
 }
 
 
@@ -242,8 +1223,56 @@
                                                 AccessorInfo* callback,
                                                 String* name,
                                                 Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                    name, miss);
+
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  __ push(receiver);
+  __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
+  Handle<AccessorInfo> callback_handle(callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ li(scratch3, callback_handle);
+    __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+  } else {
+    __ li(scratch3, Handle<Object>(callback_handle->data()));
+  }
+  __ Push(reg, scratch3, name_reg);
+  __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
+  __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  const int kApiStackSpace = 1;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+  // Create AccessorInfo instance on the stack above the exit frame with
+  // scratch2 (internal::Object **args_) as the data.
+  __ sw(a2, MemOperand(sp, kPointerSize));
+  // a2 (second argument - see note above) = AccessorInfo&
+  __ Addu(a2, sp, kPointerSize);
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_GETTER_CALL,
+                        masm()->isolate());
+  // 4 args - will be freed later by LeaveExitFrame.
+  return masm()->TryCallApiFunctionAndReturn(ref, 4);
 }
 
 
@@ -257,12 +1286,143 @@
                                            Register scratch3,
                                            String* name,
                                            Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(interceptor_holder->HasNamedInterceptor());
+  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // So far the most popular follow ups for interceptor loads are FIELD
+  // and CALLBACKS, so inline only them, other cases may be added
+  // later.
+  bool compile_followup_inline = false;
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
+    if (lookup->type() == FIELD) {
+      compile_followup_inline = true;
+    } else if (lookup->type() == CALLBACKS &&
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
+    }
+  }
+
+  if (compile_followup_inline) {
+    // Compile the interceptor call, followed by inline code to load the
+    // property from further up the prototype chain if the call fails.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+    // Save necessary data before invoking an interceptor.
+    // Requires a frame to make GC aware of pushed pointers.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ Push(receiver, holder_reg, name_reg);
+    } else {
+      __ Push(holder_reg, name_reg);
+    }
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+    __ LeaveInternalFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    // Check that the maps from interceptor's holder to lookup's holder
+    // haven't changed.  And load lookup's holder into |holder| register.
+    if (interceptor_holder != lookup->holder()) {
+      holder_reg = CheckPrototypes(interceptor_holder,
+                                   holder_reg,
+                                   lookup->holder(),
+                                   scratch1,
+                                   scratch2,
+                                   scratch3,
+                                   name,
+                                   miss);
+    }
+
+    if (lookup->type() == FIELD) {
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Retrieve a field from field's holder.
+      GenerateFastPropertyLoad(masm(), v0, holder_reg,
+                               lookup->holder(), lookup->GetFieldIndex());
+      __ Ret();
+    } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      // Tail call to runtime.
+      // Important invariant in CALLBACKS case: the code above must be
+      // structured to never clobber |receiver| register.
+      __ li(scratch2, Handle<AccessorInfo>(callback));
+      // holder_reg is either receiver or scratch1.
+      if (!receiver.is(holder_reg)) {
+        ASSERT(scratch1.is(holder_reg));
+        __ Push(receiver, holder_reg);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(scratch3, scratch2, name_reg);
+      } else {
+        __ push(receiver);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(holder_reg, scratch3, scratch2, name_reg);
+      }
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+                            masm()->isolate());
+      __ TailCallExternalReference(ref, 5, 1);
+    }
+  } else {  // !compile_followup_inline
+    // Call the runtime system to load the interceptor.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    PushInterceptorArguments(masm(), receiver, holder_reg,
+                             name_reg, interceptor_holder);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+    __ TailCallExternalReference(ref, 5, 1);
+  }
 }
 
 
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  if (kind_ == Code::KEYED_CALL_IC) {
+    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+  }
 }
 
 
@@ -270,20 +1430,63 @@
                                                    JSObject* holder,
                                                    String* name,
                                                    Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(holder->IsGlobalObject());
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(a0, miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
 }
 
 
 void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  if (heap()->InNewSpace(function)) {
+    // We can't embed a pointer to a function in new space so we have
+    // to verify that the shared function info is unchanged. This has
+    // the nice side effect that multiple closures based on the same
+    // function can all use this call IC. Before we load through the
+    // function, we have to verify that it still is a function.
+    __ JumpIfSmi(a1, miss);
+    __ GetObjectType(a1, a3, a3);
+    __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+    // Check the shared function info. Make sure it hasn't changed.
+    __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
+    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ Branch(miss, ne, t0, Operand(a3));
+  } else {
+    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+  }
 }
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
@@ -291,8 +1494,34 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  const int argc = arguments().immediate();
+
+  // Get the receiver of the function from the stack into a0.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a0, &miss, t0);
+
+  // Do the right check and compute the holder register.
+  Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+  GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -301,8 +1530,160 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  Register receiver = a1;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object), receiver,
+                  holder, a3, v0, t0, name, &miss);
+
+  if (argc == 0) {
+    // Nothing to do, just return the length.
+    __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Drop(argc + 1);
+    __ Ret();
+  } else {
+    Label call_builtin;
+
+    Register elements = a3;
+    Register end_elements = t1;
+
+    // Get the elements array of the object.
+    __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ CheckMap(elements,
+                v0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
+
+    if (argc == 1) {  // Otherwise fall through to call the builtin.
+      Label exit, with_write_barrier, attempt_to_grow_elements;
+
+      // Get the array's length into v0 and calculate new length.
+      __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      STATIC_ASSERT(kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0);
+      __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+      // Get the element's length.
+      __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Check if we could survive without allocation.
+      __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+
+      // Save new length.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      const int kEndElementsOffset =
+          FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+      __ Addu(end_elements, end_elements, kPointerSize);
+
+      // Check for a smi.
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+      __ bind(&exit);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&with_write_barrier);
+      __ InNewSpace(elements, t0, eq, &exit);
+      __ RecordWriteHelper(elements, end_elements, t0);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&attempt_to_grow_elements);
+      // v0: array's length + 1.
+      // t0: elements' length.
+
+      if (!FLAG_inline_new) {
+        __ Branch(&call_builtin);
+      }
+
+      ExternalReference new_space_allocation_top =
+          ExternalReference::new_space_allocation_top_address(
+              masm()->isolate());
+      ExternalReference new_space_allocation_limit =
+          ExternalReference::new_space_allocation_limit_address(
+              masm()->isolate());
+
+      const int kAllocationDelta = 4;
+      // Load top and check if it is the end of elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+      __ li(t3, Operand(new_space_allocation_top));
+      __ lw(t2, MemOperand(t3));
+      __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+
+      __ li(t5, Operand(new_space_allocation_limit));
+      __ lw(t5, MemOperand(t5));
+      __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+      __ Branch(&call_builtin, hi, t2, Operand(t5));
+
+      // We fit and could grow elements.
+      // Update new_space_allocation_top.
+      __ sw(t2, MemOperand(t3));
+      // Push the argument.
+      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ sw(t2, MemOperand(end_elements));
+      // Fill the rest with holes.
+      __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+      for (int i = 1; i < kAllocationDelta; i++) {
+        __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+      }
+
+      // Update elements' and array's sizes.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+      __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Elements are in new space, so write barrier is not required.
+      __ Drop(argc + 1);
+      __ Ret();
+    }
+    __ bind(&call_builtin);
+    __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+                                                   masm()->isolate()),
+                                 argc + 1,
+                                 1);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -311,8 +1692,87 @@
                                                    JSGlobalPropertyCell* cell,
                                                    JSFunction* function,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss, return_undefined, call_builtin;
+
+  Register receiver = a1;
+  Register elements = a3;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object),
+                  receiver, holder, elements, t0, v0, name, &miss);
+
+  // Get the elements array of the object.
+  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+  // Check that the elements are in fast mode and writable.
+  __ CheckMap(elements,
+              v0,
+              Heap::kFixedArrayMapRootIndex,
+              &call_builtin,
+              DONT_DO_SMI_CHECK);
+
+  // Get the array's length into t0 and calculate new length.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+  __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
+
+  // Get the last element.
+  __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  // We can't address the last element in one operation. Compute the more
+  // expensive shift first, and use an offset later on.
+  __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements, elements, t1);
+  __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Branch(&call_builtin, eq, v0, Operand(t2));
+
+  // Set the array's length.
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+  // Fill with the hole.
+  __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&return_undefined);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+                                                 masm()->isolate()),
+                               argc + 1,
+                               1);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -322,8 +1782,84 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = a1;
+  Register index = t1;
+  Register scratch = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kNanValueRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -333,8 +1869,85 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = v0;
+  Register index = t1;
+  Register scratch1 = a1;
+  Register scratch2 = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -344,8 +1957,69 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the char code argument.
+  Register code = a1;
+  __ lw(code, MemOperand(sp, 0 * kPointerSize));
+
+  // Check the code is a smi.
+  Label slow;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(code, &slow);
+
+  // Convert the smi code to uint16.
+  __ And(code, code, Operand(Smi::FromInt(0xffff)));
+
+  StringCharFromCodeGenerator char_from_code_generator(code, v0);
+  char_from_code_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -354,8 +2028,132 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  if (!CpuFeatures::IsSupported(FPU))
+    return heap()->undefined_value();
+  CpuFeatures::Scope scope_fpu(FPU);
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss, slow;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // If the argument is a smi, just return.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Drop(argc + 1, eq, t0, Operand(zero_reg));
+  __ Ret(eq, t0, Operand(zero_reg));
+
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+
+  Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
+
+  // If fpu is enabled, we use the floor instruction.
+
+  // Load the HeapNumber value.
+  __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+
+  // Backup FCSR.
+  __ cfc1(a3, FCSR);
+  // Clearing FCSR clears the exception mask with no side-effects.
+  __ ctc1(zero_reg, FCSR);
+  // Convert the argument to an integer.
+  __ floor_w_d(f0, f0);
+
+  // Start checking for special cases.
+  // Get the argument exponent and clear the sign bit.
+  __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
+  __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+  __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+  // Retrieve FCSR and check for fpu errors.
+  __ cfc1(t5, FCSR);
+  __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
+  __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
+
+  // Check for NaN, Infinity, and -Infinity.
+  // They are invariant through a Math.Floor call, so just
+  // return the original argument.
+  __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
+        >> HeapNumber::kMantissaBitsInTopWord));
+  __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
+  // We had an overflow or underflow in the conversion. Check if we
+  // have a big exponent.
+  // If greater or equal, the argument is already round and in v0.
+  __ Branch(&restore_fcsr_and_return, ge, t3,
+      Operand(HeapNumber::kMantissaBits));
+  __ Branch(&wont_fit_smi);
+
+  __ bind(&no_fpu_error);
+  // Move the result back to v0.
+  __ mfc1(v0, f0);
+  // Check if the result fits into a smi.
+  __ Addu(a1, v0, Operand(0x40000000));
+  __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
+  // Tag the result.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(v0, v0, kSmiTagSize);
+
+  // Check for -0.
+  __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
+  // t1 already holds the HeapNumber exponent.
+  __ And(t0, t1, Operand(HeapNumber::kSignMask));
+  // If our HeapNumber is negative it was -0, so load its address and return.
+  // Else v0 is loaded with 0, so we can also just return.
+  __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  __ bind(&restore_fcsr_and_return);
+  // Restore FCSR and return.
+  __ ctc1(a3, FCSR);
+
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&wont_fit_smi);
+  // Restore FCSR and fall to slow case.
+  __ ctc1(a3, FCSR);
+
+  __ bind(&slow);
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -364,8 +2162,100 @@
                                                   JSGlobalPropertyCell* cell,
                                                   JSFunction* function,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // Check if the argument is a smi.
+  Label not_smi;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(v0, &not_smi);
+
+  // Do bitwise not or do nothing depending on the sign of the
+  // argument.
+  __ sra(t0, v0, kBitsPerInt - 1);
+  __ Xor(a1, v0, t0);
+
+  // Add 1 or do nothing depending on the sign of the argument.
+  __ Subu(v0, a1, t0);
+
+  // If the result is still negative, go to the slow case.
+  // This only happens for the most negative smi.
+  Label slow;
+  __ Branch(&slow, lt, v0, Operand(zero_reg));
+
+  // Smi case done.
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Check if the argument is a heap number and load its exponent and
+  // sign.
+  __ bind(&not_smi);
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+  __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  Label negative_sign;
+  __ And(t0, a1, Operand(HeapNumber::kSignMask));
+  __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // If the argument is negative, clear the sign, and return a new
+  // number.
+  __ bind(&negative_sign);
+  __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
+  __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
+  __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+  __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -376,8 +2266,51 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+
+  Counters* counters = isolate()->counters();
+
+  ASSERT(optimization.is_simple_api_call());
+  // Bail out if object is a global object as we don't want to
+  // repatch it to global receiver.
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
+  int depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+  Label miss, miss_before_stack_reserved;
+
+  GenerateNameCheck(name, &miss_before_stack_reserved);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &miss_before_stack_reserved);
+
+  __ IncrementCounter(counters->call_const(), 1, a0, a3);
+  __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
+
+  ReserveSpaceForFastApiCall(masm(), a0);
+
+  // Check that the maps haven't changed and find a Holder as a side effect.
+  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                  depth, &miss);
+
+  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
+
+  __ bind(&miss);
+  FreeSpaceForFastApiCall(masm());
+
+  __ bind(&miss_before_stack_reserved);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -386,16 +2319,186 @@
                                                    JSFunction* function,
                                                    String* name,
                                                    CheckType check) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ And(t1, a1, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t1, Operand(zero_reg));
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+          1, a0, a3);
+
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                      &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+        __ sw(a3, MemOperand(sp, argc * kPointerSize));
+      }
+      break;
+
+    case STRING_CHECK:
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        // Check that the object is a two-byte string or a symbol.
+        __ GetObjectType(a1, a3, a3);
+        __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+      Label fast;
+        // Check that the object is a smi or a heap number.
+        __ And(t1, a1, Operand(kSmiTagMask));
+        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ GetObjectType(a1, a0, a0);
+        __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        Label fast;
+        // Check that the object is a boolean.
+        __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+        __ Branch(&fast, eq, a1, Operand(t0));
+        __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+        __ Branch(&miss, ne, a1, Operand(t0));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
 MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         a1,
+                                         a3,
+                                         t0,
+                                         a0,
+                                         &miss);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // Move returned value, the function to call, to a1.
+  __ mov(a1, v0);
+  // Restore receiver.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -404,8 +2507,64 @@
                                                  JSGlobalPropertyCell* cell,
                                                  JSFunction* function,
                                                  String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  GenerateGlobalReceiverCheck(object, holder, name, &miss);
+  GenerateLoadFunctionFromCell(cell, function, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Setup the context (function already in r1).
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -413,39 +2572,205 @@
                                                   int index,
                                                   Map* transition,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Name register might be clobbered.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a1, a2, a3,
+                     &miss);
+  __ bind(&miss);
+  __ li(a2, Operand(Handle<String>(name)));  // Restore name.
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
                                                      AccessorInfo* callback,
                                                      String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ push(a1);  // Receiver.
+  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
+  __ Push(a3, a2, a0);
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
                                                         String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub is never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ Push(a1, a2, a0);  // Receiver, name, value.
+
+  __ li(a0, Operand(Smi::FromInt(strict_mode_)));
+  __ push(a0);  // Strict mode.
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_ic_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
                                                    JSGlobalPropertyCell* cell,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ Branch(&miss, eq, t1, Operand(t2));
+
+  // Store the value in the cell.
+  __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ mov(v0, a0);  // Stored value must be returned in v0.
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+  __ Ret();
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver is not a smi.
+  __ JumpIfSmi(a0, &miss);
+
+  // Check the maps of the full prototype chain.
+  CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  a1,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
+  }
+
+  // Return undefined if maps of the full prototype chain is still the same.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
@@ -453,8 +2778,21 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  __ mov(v0, a0);
+
+  GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -462,8 +2800,25 @@
                                                    JSObject* object,
                                                    JSObject* holder,
                                                    AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -471,16 +2826,50 @@
                                                    JSObject* holder,
                                                    Object* value,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          a0,
+                          a2,
+                          a3,
+                          a1,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -489,8 +2878,45 @@
                                                  JSGlobalPropertyCell* cell,
                                                  String* name,
                                                  bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ And(t0, a0, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t0, Operand(zero_reg));
+  }
+
+  // Check that the map of the global has not changed.
+  CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&miss, eq, t0, Operand(at));
+  }
+
+  __ mov(v0, t0);
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -498,8 +2924,21 @@
                                                      JSObject* receiver,
                                                      JSObject* holder,
                                                      int index) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(FIELD, name);
 }
 
 
@@ -508,8 +2947,27 @@
     JSObject* receiver,
     JSObject* holder,
     AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+                                             t0, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -517,40 +2975,172 @@
                                                         JSObject* receiver,
                                                         JSObject* holder,
                                                         Object* value) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
                                                            JSObject* holder,
                                                            String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          a1,
+                          a0,
+                          a2,
+                          a3,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadArrayLength(masm(), a1, a2, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+
+  // Check the name hasn't changed.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a1, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -558,39 +3148,1345 @@
                                                        int index,
                                                        Map* transition,
                                                        String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
+
+  // Check that the name has not changed.
+  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+
+  // a3 is used as scratch register. a1 and a2 keep their values if a jump to
+  // the miss label is generated.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a2, a1, a3,
+                     &miss);
+  __ bind(&miss);
+
+  __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
+  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // a0    : argc
+  // a1    : constructor
+  // ra    : return address
+  // [sp]  : last argument
+  Label generic_stub_call;
+
+  // Use t7 for holding undefined which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&generic_stub_call, ne, a2, Operand(t7));
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  // a1: constructor function
+  // t7: undefined
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+  __ And(t0, a2, Operand(kSmiTagMask));
+  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ GetObjectType(a2, a3, t0);
+  __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Check(ne, "Function constructed by construct stub.",
+      a3, Operand(JS_FUNCTION_TYPE));
+#endif
+
+  // Now allocate the JSObject in new space.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ AllocateInNewSpace(a3,
+                        t4,
+                        t5,
+                        t6,
+                        &generic_stub_call,
+                        SIZE_IN_WORDS);
+
+  // Allocated the JSObject, now initialize the fields. Map is set to initial
+  // map and properties and elements are set to empty fixed array.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t7: undefined
+  __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+  __ mov(t5, t4);
+  __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+  __ Addu(t5, t5, Operand(3 * kPointerSize));
+  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+
+  // Calculate the location of the first argument. The stack contains only the
+  // argc arguments.
+  __ sll(a1, a0, kPointerSizeLog2);
+  __ Addu(a1, a1, sp);
+
+  // Fill all the in-object properties with undefined.
+  // a0: argc
+  // a1: first argument
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t5: First in-object property of JSObject (not tagged)
+  // t7: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  SharedFunctionInfo* shared = function->shared();
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed, next;
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
+      // Argument passed - find it on the stack.
+      __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+      __ jmp(&next);
+      __ bind(&not_passed);
+      // Set the property to undefined.
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, Operand(kPointerSize));
+      __ bind(&next);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ li(a2, Operand(constant));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  ASSERT(function->has_initial_map());
+  for (int i = shared->this_property_assignments_count();
+       i < function->initial_map()->inobject_properties();
+       i++) {
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+  }
+
+  // a0: argc
+  // t4: JSObject (not tagged)
+  // Move argc to a1 and the JSObject to return to v0 and tag it.
+  __ mov(a1, a0);
+  __ mov(v0, t4);
+  __ Or(v0, v0, Operand(kHeapObjectTag));
+
+  // v0: JSObject
+  // a1: argc
+  // Remove caller arguments and receiver from the stack and return.
+  __ sll(t0, a1, kPointerSizeLog2);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
+  __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
+  __ Ret();
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Handle<Code> generic_construct_stub =
+      masm()->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+    MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow, miss_force_generic;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  __ JumpIfNotSmi(key, &miss_force_generic);
+  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ sra(a2, a0, kSmiTagSize);
+  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
+  __ Ret();
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+  // Entry registers are intact.
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+     masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+static bool IsElementTypeSigned(ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+      return true;
+
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      return false;
+
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      return false;
+  }
+  return false;
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow, failed_allocation;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // a3: elements array
+
+  // Check that the index is in range.
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  __ sra(t2, key, kSmiTagSize);
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
+
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+  // a3: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  Register value = a2;
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lb(value, MemOperand(t3, 0));
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lbu(value, MemOperand(t3, 0));
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      __ addu(t3, a3, key);
+      __ lh(value, MemOperand(t3, 0));
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ addu(t3, a3, key);
+      __ lhu(value, MemOperand(t3, 0));
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ sll(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lw(value, MemOperand(t3, 0));
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      __ sll(t3, t2, 2);
+      __ addu(t3, a3, t3);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ lwc1(f0, MemOperand(t3, 0));
+      } else {
+        __ lw(value, MemOperand(t3, 0));
+      }
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ sll(t2, key, 2);
+      __ addu(t3, a3, t2);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ ldc1(f0, MemOperand(t3, 0));
+      } else {
+        // t3: pointer to the beginning of the double we want to load.
+        __ lw(a2, MemOperand(t3, 0));
+        __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
+      }
+      break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // a2: value
+  // For float array type:
+  // f0: value (if FPU is supported)
+  // a2: value (if FPU is not supported)
+  // For double array type:
+  // f0: value (if FPU is supported)
+  // a2/a3: value (if FPU is not supported)
+
+  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ Subu(t3, value, Operand(0xC0000000));  // Non-smi value gives neg result.
+    __ Branch(&box_int, lt, t3, Operand(zero_reg));
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+
+    __ bind(&box_int);
+    // Allocate a HeapNumber for the result and perform int-to-double
+    // conversion.
+    // The arm version uses a temporary here to save r0, but we don't need to
+    // (a0 is not modified).
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ mtc1(value, f0);
+      __ cvt_d_w(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      Register dst1 = t2;
+      Register dst2 = t3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              f0,
+                                              dst1,
+                                              dst2,
+                                              t1,
+                                              f2);
+      __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      Label pl_box_int;
+      __ And(t2, value, Operand(0xC0000000));
+      __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
+
+      // It can fit in an Smi.
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      __ bind(&pl_box_int);
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
+      // registers - also when jumping due to exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
+
+      // This is replaced by a macro:
+      // __ mtc1(value, f0);     // LS 32-bits.
+      // __ mtc1(zero_reg, f1);  // MS 32-bits are all zero.
+      // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
+
+      __ Cvt_d_uw(f0, value, f22);
+
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ And(t2, value, Operand(0x80000000));
+      __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
+      __ And(t2, value, Operand(0x40000000));
+      __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
+
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      Register hiword = value;  // a2.
+      Register loword = a3;
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, hiword, loword, t0, 0);
+      __ Branch(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, hiword, loword, t0, 1);
+
+
+      __ bind(&done);
+      // Integer was converted to double in registers hiword:loword.
+      // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
+      // clobbers all registers - also when jumping due to exhausted young
+      // space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
+
+      __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
+      __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
+
+      __ mov(v0, t2);
+      __ Ret();
+    }
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The float (single) value is already in fpu reg f0 (if we use float).
+      __ cvt_d_s(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // FPU is not available, do manual single to double conversion.
+
+      // a2: floating point value (binary32).
+      // v0: heap number for result
+
+      // Extract mantissa to t4.
+      __ And(t4, value, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to t5.
+      __ srl(t5, value, kBinary32MantissaBits);
+      __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
+
+      __ li(t0, 0x7ff);
+      __ Xor(t1, t5, Operand(0xFF));
+      __ movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
+      __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+
+      // Rebias exponent.
+      __ Addu(t5,
+              t5,
+              Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ And(a2, value, Operand(kBinary32SignMask));
+      value = no_reg;
+      __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
+      __ or_(a2, a2, t0);
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ srl(t0, t4, kMantissaShiftForHiWord);
+      __ or_(a2, a2, t0);
+      __ sll(a0, t4, kMantissaShiftForLoWord);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ Ret();
+    }
+
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The double value is already in f0
+      __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+
+  } else {
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, check_heap_number, miss_force_generic;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  // a3 mostly holds the elements array or the destination external array.
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+    // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check that the index is in range.
+  __ SmiUntag(t0, key);
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // a3: external array.
+  // t0: key (integer).
+
+  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+    // Double to pixel conversion is only implemented in the runtime for now.
+    __ JumpIfNotSmi(value, &slow);
+  } else {
+    __ JumpIfNotSmi(value, &check_heap_number);
+  }
+  __ SmiUntag(t1, value);
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+  // a3: base pointer of external storage.
+  // t0: key (integer).
+  // t1: value (integer).
+
+  switch (elements_kind) {
+    case EXTERNAL_PIXEL_ELEMENTS: {
+      // Clamp the value to [0..255].
+      // v0 is used as a scratch register here.
+      Label done;
+      __ li(v0, Operand(255));
+      // Normal branch: nop in delay slot.
+      __ Branch(&done, gt, t1, Operand(v0));
+      // Use delay slot in this branch.
+      __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
+      __ mov(v0, zero_reg);  // In delay slot.
+      __ mov(v0, t1);  // Value is in range 0..255.
+      __ bind(&done);
+      __ mov(t1, v0);
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      }
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ sll(t8, t0, 1);
+      __ addu(t8, a3, t8);
+      __ sh(t1, MemOperand(t8, 0));
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ sll(t8, t0, 2);
+      __ addu(t8, a3, t8);
+      __ sw(t1, MemOperand(t8, 0));
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      // Perform int-to-float conversion and store to memory.
+      StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ sll(t8, t0, 3);
+      __ addu(a3, a3, t8);
+      // a3: effective address of the double element
+      FloatingPointHelper::Destination destination;
+      if (CpuFeatures::IsSupported(FPU)) {
+        destination = FloatingPointHelper::kFPURegisters;
+      } else {
+        destination = FloatingPointHelper::kCoreRegisters;
+      }
+      FloatingPointHelper::ConvertIntToDouble(
+          masm, t1, destination,
+          f0, t2, t3,  // These are: double_dst, dst1, dst2.
+          t0, f2);  // These are: scratch2, single_scratch.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        __ sdc1(f0, MemOperand(a3, 0));
+      } else {
+        __ sw(t2, MemOperand(a3, 0));
+        __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
+      }
+      break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+
+  // Entry registers are intact, a0 holds the value which is the return value.
+  __ mov(v0, value);
+  __ Ret();
+
+  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
+    // a3: external array.
+    // t0: index (integer).
+    __ bind(&check_heap_number);
+    __ GetObjectType(value, t1, t2);
+    __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
+
+    __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+    // a3: base pointer of external storage.
+    // t0: key (integer).
+
+    // The WebGL specification leaves the behavior of storing NaN and
+    // +/-Infinity into integer arrays basically undefined. For more
+    // reproducible behavior, convert these to zero.
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+
+      __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        __ cvt_s_d(f0, f0);
+        __ sll(t8, t0, 2);
+        __ addu(t8, a3, t8);
+        __ swc1(f0, MemOperand(t8, 0));
+      } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        __ sdc1(f0, MemOperand(t8, 0));
+      } else {
+        __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
+
+        switch (elements_kind) {
+          case EXTERNAL_BYTE_ELEMENTS:
+          case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_SHORT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_INT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_PIXEL_ELEMENTS:
+          case EXTERNAL_FLOAT_ELEMENTS:
+          case EXTERNAL_DOUBLE_ELEMENTS:
+          case FAST_ELEMENTS:
+          case FAST_DOUBLE_ELEMENTS:
+          case DICTIONARY_ELEMENTS:
+          case NON_STRICT_ARGUMENTS_ELEMENTS:
+            UNREACHABLE();
+            break;
+        }
+      }
+
+      // Entry registers are intact, a0 holds the value
+      // which is the return value.
+      __ mov(v0, value);
+      __ Ret();
+    } else {
+      // FPU is not available, do manual conversions.
+
+      __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
+      __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        Label done, nan_or_infinity_or_zero;
+        static const int kMantissaInHiWordShift =
+            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+        static const int kMantissaInLoWordShift =
+            kBitsPerInt - kMantissaInHiWordShift;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
+
+        __ xor_(t1, t6, t5);
+        __ li(t2, kBinary32ExponentMask);
+        __ movz(t6, t2, t1);  // Only if t6 is equal to t5.
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+
+        // Rebias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Addu(t6,
+                t6,
+                Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+        __ li(t1, Operand(kBinary32MaxExponent));
+        __ Slt(t1, t1, t6);
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ Or(t2, t2, Operand(kBinary32ExponentMask));
+        __ movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
+        __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
+
+        __ Slt(t1, t6, Operand(kBinary32MinExponent));
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
+        __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
+
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t7, t7, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t7, t7, t4);
+        __ sll(t6, t6, kBinary32ExponentShift);
+        __ or_(t3, t7, t6);
+
+        __ bind(&done);
+        __ sll(t9, a1, 2);
+        __ addu(t9, a2, t9);
+        __ sw(t3, MemOperand(t9, 0));
+
+        // Entry registers are intact, a0 holds the value which is the return
+        // value.
+        __ mov(v0, value);
+        __ Ret();
+
+        __ bind(&nan_or_infinity_or_zero);
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ or_(t6, t6, t7);
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t6, t6, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t3, t6, t4);
+        __ Branch(&done);
+      } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        // t8: effective address of destination element.
+        __ sw(t4, MemOperand(t8, 0));
+        __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ Ret();
+      } else {
+        bool is_signed_type = IsElementTypeSigned(elements_kind);
+        int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+        int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+
+        Label done, sign;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
+        __ Branch(&done, eq, t6, Operand(zero_reg));
+
+        __ xor_(t2, t6, t5);
+        __ movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
+        __ Branch(&done, eq, t6, Operand(t5));
+
+        // Unbias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
+        // If exponent is negative then result is 0.
+        __ slt(t2, t6, zero_reg);
+        __ movn(t3, zero_reg, t2);  // Only if exponent is negative.
+        __ Branch(&done, lt, t6, Operand(zero_reg));
+
+        // If exponent is too big then result is minimal value.
+        __ slti(t1, t6, meaningfull_bits - 1);
+        __ li(t2, min_value);
+        __ movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
+        __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
+
+        __ And(t5, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+        __ li(t9, HeapNumber::kMantissaBitsInTopWord);
+        __ subu(t6, t9, t6);
+        __ slt(t1, t6, zero_reg);
+        __ srlv(t2, t3, t6);
+        __ movz(t3, t2, t1);  // Only if t6 is positive.
+        __ Branch(&sign, ge, t6, Operand(zero_reg));
+
+        __ subu(t6, zero_reg, t6);
+        __ sllv(t3, t3, t6);
+        __ li(t9, meaningfull_bits);
+        __ subu(t6, t9, t6);
+        __ srlv(t4, t4, t6);
+        __ or_(t3, t3, t4);
+
+        __ bind(&sign);
+        __ subu(t2, t3, zero_reg);
+        __ movz(t3, t2, t5);  // Only if t5 is zero.
+
+        __ bind(&done);
+
+        // Result is in t3.
+        // This switch block should be exactly the same as above (FPU mode).
+        switch (elements_kind) {
+          case EXTERNAL_BYTE_ELEMENTS:
+          case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_SHORT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_INT_ELEMENTS:
+          case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          case EXTERNAL_PIXEL_ELEMENTS:
+          case EXTERNAL_FLOAT_ELEMENTS:
+          case EXTERNAL_DOUBLE_ELEMENTS:
+          case FAST_ELEMENTS:
+          case FAST_DOUBLE_ELEMENTS:
+          case DICTIONARY_ELEMENTS:
+          case NON_STRICT_ARGUMENTS_ELEMENTS:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+  // Entry registers are intact.
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+     masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
+
+  // Get the elements array.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ AssertFastElements(a2);
+
+  // Check that the key is within bounds.
+  __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+
+  // Load the result and make sure it's not the hole.
+  __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, t0, a3);
+  __ lw(t0, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+  __ mov(v0, t0);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  Register key_reg = a0;
+  Register receiver_reg = a1;
+  Register elements_reg = a2;
+  Register heap_number_reg = a2;
+  Register indexed_double_offset = a3;
+  Register scratch = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
+  Register heap_number_map = t3;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Get the elements array.
+  __ lw(elements_reg,
+        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+  // Check that the key is within bounds.
+  __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  // Load the upper word of the double in the fixed array and test for NaN.
+  __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
+  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+  __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
+
+  // Non-NaN. Allocate a new heap number and copy the double value into it.
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+                        heap_number_map, &slow_allocate_heapnumber);
+
+  // Don't need to reload the upper 32 bits of the double, it's already in
+  // scratch.
+  __ sw(scratch, FieldMemOperand(heap_number_reg,
+                                 HeapNumber::kExponentOffset));
+  __ lw(scratch, FieldMemOperand(indexed_double_offset,
+                                 FixedArray::kHeaderSize));
+  __ sw(scratch, FieldMemOperand(heap_number_reg,
+                                 HeapNumber::kMantissaOffset));
+
+  __ mov(v0, heap_number_reg);
+  __ Ret();
+
+  __ bind(&slow_allocate_heapnumber);
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  //  -- a4    : scratch (elements)
+  // -----------------------------------
+  Label miss_force_generic;
+
+  Register value_reg = a0;
+  Register key_reg = a1;
+  Register receiver_reg = a2;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ lw(elements_reg,
+        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  __ Addu(scratch,
+          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(scratch3, scratch2, scratch);
+  __ sw(value_reg, MemOperand(scratch3));
+  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
+  // value_reg (a0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+    MacroAssembler* masm,
+    bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  //  -- t0    : scratch (elements_reg)
+  //  -- t1    : scratch (mantissa_reg)
+  //  -- t2    : scratch (exponent_reg)
+  //  -- t3    : scratch4
+  // -----------------------------------
+  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+
+  Register value_reg = a0;
+  Register key_reg = a1;
+  Register receiver_reg = a2;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register mantissa_reg = t1;
+  Register exponent_reg = t2;
+  Register scratch4 = t3;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  __ lw(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ lw(scratch,
+          FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis, unsigned compare catches both negative and out-of-bound
+  // indexes.
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  // Handle smi values specially.
+  __ JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  __ CheckMap(value_reg,
+              scratch,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
+
+  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  __ bind(&have_double_value);
+  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  __ Addu(scratch, elements_reg, Operand(scratch4));
+  __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ sw(exponent_reg, FieldMemOperand(scratch, offset));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value_reg);  // In delay slot.
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
+  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+
+  __ bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  __ jmp(&have_double_value);
+
+  __ bind(&smi_value);
+  __ Addu(scratch, elements_reg,
+          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  __ Addu(scratch, scratch, scratch4);
+  // scratch is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(FPU)) {
+    destination = FloatingPointHelper::kFPURegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  __ SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(
+      masm,
+      untagged_value,
+      destination,
+      f0,
+      mantissa_reg,
+      exponent_reg,
+      scratch4,
+      f2);
+  if (destination == FloatingPointHelper::kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    __ sdc1(f0, MemOperand(scratch, 0));
+  } else {
+    __ sw(mantissa_reg, MemOperand(scratch, 0));
+    __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+  }
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value_reg);  // In delay slot.
+
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index 22fe9f0..0000000
--- a/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTOSTo(
-    VirtualFrame::TopOfStack expected_top_of_stack_state,
-    Condition cond,
-    Register r1,
-    const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Exit() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  UNIMPLEMENTED_MIPS();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                 InvokeJSFlags flags,
-                                 int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedStoreIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-//    NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
-const bool VirtualFrame::kA0InUse[TOS_STATES] =
-    { false,            true,   false,  true,      true };
-const bool VirtualFrame::kA1InUse[TOS_STATES] =
-    { false,            false,  true,   true,      true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
-    { 0,                1,      1,      2,         2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a1,        a0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a0,        a1 };
-const Register VirtualFrame::kAllocatedRegisters[
-    VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
-// Popping is done by the transition implied by kStateAfterPop.  Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
-    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush.  Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
-    { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Pop() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::Peek() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-Register VirtualFrame::Peek2() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::Dup() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Dup2() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPushReversed(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAll() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
deleted file mode 100644
index cf30b09..0000000
--- a/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  class RegisterAllocationScope;
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled. The constructor spills the code
-  // generator's current frame, and keeps it spilled.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    explicit SpilledScope(VirtualFrame* frame)
-      : old_is_spilled_(
-          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-      if (frame != NULL) {
-        if (!old_is_spilled_) {
-          frame->SpillAll();
-        } else {
-          frame->AssertIsSpilled();
-        }
-      }
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
-    }
-    ~SpilledScope() {
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
-          old_is_spilled_);
-    }
-    static bool is_spilled() {
-      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
-    }
-
-   private:
-    int old_is_spilled_;
-
-    SpilledScope() {}
-
-    friend class RegisterAllocationScope;
-  };
-
-  class RegisterAllocationScope BASE_EMBEDDED {
-   public:
-    // A utility class to introduce a scope where the virtual frame
-    // is not spilled, ie. where register allocation occurs.  Eventually
-    // when RegisterAllocationScope is ubiquitous it can be removed
-    // along with the (by then unused) SpilledScope class.
-    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
-    inline ~RegisterAllocationScope();
-
-   private:
-    CodeGenerator* cgen_;
-    bool old_is_spilled_;
-
-    RegisterAllocationScope() {}
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct an invalid virtual frame, used by JumpTargets.
-  explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  inline CodeGenerator* cgen() const;
-  inline MacroAssembler* masm();
-
-  // The number of elements on the virtual frame.
-  int element_count() const { return element_count_; }
-
-  // The height of the virtual expression stack.
-  inline int height() const;
-
-  bool is_used(int num) {
-    switch (num) {
-      case 0: {  // a0.
-        return kA0InUse[top_of_stack_state_];
-      }
-      case 1: {  // a1.
-        return kA1InUse[top_of_stack_state_];
-      }
-      case 2:
-      case 3:
-      case 4:
-      case 5:
-      case 6: {  // a2 to a3, t0 to t2.
-        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
-        ASSERT(num >= kFirstAllocatedRegister);
-        if ((register_allocation_map_ &
-             (1 << (num - kFirstAllocatedRegister))) == 0) {
-          return false;
-        } else {
-          return true;
-        }
-      }
-      default: {
-        ASSERT(num < kFirstAllocatedRegister ||
-               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
-        return false;
-      }
-    }
-  }
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed). No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call). No code is emitted except to bring the
-  // frame to a spilled state.
-  void Forget(int count);
-
-
-  // Spill all values from the frame to memory.
-  void SpillAll();
-
-  void AssertIsSpilled() const {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    ASSERT(register_allocation_map_ == 0);
-  }
-
-  void AssertIsNotSpilled() {
-    ASSERT(!SpilledScope::is_spilled());
-  }
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    UNIMPLEMENTED();
-  }
-
-  // Spill all occurrences of an arbitrary register if possible. Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references). Unimplemented.
-  Register SpillAnyRegister();
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame. As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(const VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  void MergeTo(VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  // Checks whether this frame can be branched to by the other frame.
-  bool IsCompatibleWith(const VirtualFrame* other) const {
-    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
-  }
-
-  inline void ForgetTypeInfo() {
-    tos_known_smi_map_ = 0;
-  }
-
-  // Detach a frame from its code generator, perhaps temporarily. This
-  // tells the register allocator that it is free to use frame-internal
-  // registers. Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-  }
-
-  // (Re)attach a frame to its code generator. This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences. After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used. Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by elements in the virtual frame.
-  // This avoids generating unnecessary merge code when jumping to the shared
-  // return site. No spill code emitted. Value to return should be in v0.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 5;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // The current top of the expression stack as an assembly operand.
-  MemOperand Top() {
-    AssertIsSpilled();
-    return MemOperand(sp, 0);
-  }
-
-  // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) {
-    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
-    ASSERT(adjusted_index >= 0);
-    return MemOperand(sp, adjusted_index * kPointerSize);
-  }
-
-  bool KnownSmiAt(int index) {
-    if (index >= kTOSKnownSmiMapSize) return false;
-    return (tos_known_smi_map_ & (1 << index)) != 0;
-  }
-  // A frame-allocated local as an assembly operand.
-  inline MemOperand LocalAt(int index);
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // The function frame slot.
-  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
-  // The context frame slot.
-  MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
-  // A parameter as an assembly operand.
-  inline MemOperand ParameterAt(int index);
-
-  // The receiver frame slot.
-  inline MemOperand Receiver();
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline void CallStub(CodeStub* stub, int arg_count);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  void CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  void CallRuntime(const Runtime::Function* f, int arg_count);
-  void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flag,
-                     int arg_count);
-
-  // Call load IC. Receiver is on the stack and is consumed. Result is returned
-  // in v0.
-  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
-  // Call store IC. If the load is contextual, value is found on top of the
-  // frame. If not, value and receiver are on the frame. Both are consumed.
-  // Result is returned in v0.
-  void CallStoreIC(Handle<String> name, bool is_contextual);
-
-  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
-  // Result is returned in v0.
-  void CallKeyedLoadIC();
-
-  // Call keyed store IC. Value, key and receiver are on the stack. All three
-  // are consumed. Result is returned in v0 (and a0).
-  void CallKeyedStoreIC();
-
-  // Call into an IC stub given the number of arguments it removes
-  // from the stack. Register arguments to the IC stub are implicit,
-  // and depend on the type of IC stub.
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      int dropped_args);
-
-  // Drop a number of elements from the top of the expression stack. May
-  // emit code to affect the physical frame. Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Pop an element from the top of the expression stack. Discards
-  // the result.
-  void Pop();
-
-  // Pop an element from the top of the expression stack.  The register
-  // will be one normally used for the top of stack register allocation
-  // so you can't hold on to it if you push on the stack.
-  Register PopToRegister(Register but_not_to_this_one = no_reg);
-
-  // Look at the top of the stack.  The register returned is aliased and
-  // must be copied to a scratch register before modification.
-  Register Peek();
-
-  // Look at the value beneath the top of the stack. The register returned is
-  // aliased and must be copied to a scratch register before modification.
-  Register Peek2();
-
-  // Duplicate the top of stack.
-  void Dup();
-
-  // Duplicate the two elements on top of stack.
-  void Dup2();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a0.
-  void SpillAllButCopyTOSToA0();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1.
-  void SpillAllButCopyTOSToA1();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1
-  // and the next value on the stack in a0.
-  void SpillAllButCopyTOSToA1A0();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  // Same but for multiple registers
-  void EmitMultiPop(RegList regs);
-  void EmitMultiPopReversed(RegList regs);
-
-
-  // Takes the top two elements and puts them in a0 (top element) and a1
-  // (second element).
-  void PopToA1A0();
-
-  // Takes the top element and puts it in a1.
-  void PopToA1();
-
-  // Takes the top element and puts it in a0.
-  void PopToA0();
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPushRoot(Heap::RootListIndex index);
-
-  // Overwrite the nth thing on the stack.  If the nth position is in a
-  // register then this turns into a Move, otherwise an sw.  Afterwards
-  // you can still use the register even if it is a register that can be
-  // used for TOS (a0 or a1).
-  void SetElementAt(Register reg, int this_far_down);
-
-  // Get a register which is free and which must be immediately used to
-  // push on the top of the stack.
-  Register GetTOSRegister();
-
-  // Same but for multiple registers.
-  void EmitMultiPush(RegList regs);
-  void EmitMultiPushReversed(RegList regs);
-
-  static Register scratch0() { return t4; }
-  static Register scratch1() { return t5; }
-  static Register scratch2() { return t6; }
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  // 5 states for the top of stack, which can be in memory or in a0 and a1.
-  enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
-                    TOS_STATES};
-  static const int kMaxTOSRegisters = 2;
-
-  static const bool kA0InUse[TOS_STATES];
-  static const bool kA1InUse[TOS_STATES];
-  static const int kVirtualElements[TOS_STATES];
-  static const TopOfStack kStateAfterPop[TOS_STATES];
-  static const TopOfStack kStateAfterPush[TOS_STATES];
-  static const Register kTopRegister[TOS_STATES];
-  static const Register kBottomRegister[TOS_STATES];
-
-  // We allocate up to 5 locals in registers.
-  static const int kNumberOfAllocatedRegisters = 5;
-  // r2 to r6 are allocated to locals.
-  static const int kFirstAllocatedRegister = 2;
-
-  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
-  static Register AllocatedRegister(int r) {
-    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
-    return kAllocatedRegisters[r];
-  }
-
-  // The number of elements on the stack frame.
-  int element_count_;
-  TopOfStack top_of_stack_state_:3;
-  int register_allocation_map_:kNumberOfAllocatedRegisters;
-  static const int kTOSKnownSmiMapSize = 4;
-  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the sp register).  For now since everything is in memory it is given
-  // by the number of elements on the not-very-virtual stack frame.
-  int stack_pointer() { return element_count_ - 1; }
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count() const;
-  inline int local_count() const;
-
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register). The parameters, receiver, function, and context
-  // are below the frame pointer.
-  inline int frame_pointer() const;
-
-  // The index of the first parameter. The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame. It is immediately
-  // below the frame pointer.
-  inline int context_index();
-
-  // The index of the function slot in the frame. It is below the frame
-  // pointer and context slot.
-  inline int function_index();
-
-  // The index of the first local. Between the frame pointer and the
-  // locals lies the return address.
-  inline int local0_index() const;
-
-  // The index of the base of the expression stack.
-  inline int expression_base_index() const;
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  inline int fp_relative(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame. Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // If all top-of-stack registers are in use then the lowest one is pushed
-  // onto the physical stack and made free.
-  void EnsureOneFreeTOSRegister();
-
-  // Emit instructions to get the top of stack state from where we are to where
-  // we want to be.
-  void MergeTOSTo(TopOfStack expected_state,
-                  Condition cond = al,
-                  Register r1 = no_reg,
-                  const Operand& r2 = Operand(no_reg));
-
-  inline bool Equals(const VirtualFrame* other);
-
-  inline void LowerHeight(int count) {
-    element_count_ -= count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = 0;
-    } else {
-      tos_known_smi_map_ >>= count;
-    }
-  }
-
-  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
-    ASSERT(known_smi_map < (1u << count));
-    element_count_ += count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = known_smi_map;
-    } else {
-      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
-    }
-  }
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 99e9819..e3f3c48 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -174,11 +174,12 @@
 PropertyType.Field                   = 1;
 PropertyType.ConstantFunction        = 2;
 PropertyType.Callbacks               = 3;
-PropertyType.Interceptor             = 4;
-PropertyType.MapTransition           = 5;
-PropertyType.ExternalArrayTransition = 6;
-PropertyType.ConstantTransition      = 7;
-PropertyType.NullDescriptor          = 8;
+PropertyType.Handler                 = 4;
+PropertyType.Interceptor             = 5;
+PropertyType.MapTransition           = 6;
+PropertyType.ExternalArrayTransition = 7;
+PropertyType.ConstantTransition      = 8;
+PropertyType.NullDescriptor          = 9;
 
 
 // Different attributes for a property.
@@ -194,7 +195,8 @@
               Local: 1,
               With: 2,
               Closure: 3,
-              Catch: 4 };
+              Catch: 4,
+              Block: 5 };
 
 
 // Mirror hierarchy:
@@ -1242,13 +1244,17 @@
 const kFrameDetailsSourcePositionIndex = 5;
 const kFrameDetailsConstructCallIndex = 6;
 const kFrameDetailsAtReturnIndex = 7;
-const kFrameDetailsDebuggerFrameIndex = 8;
+const kFrameDetailsFlagsIndex = 8;
 const kFrameDetailsFirstDynamicIndex = 9;
 
 const kFrameDetailsNameIndex = 0;
 const kFrameDetailsValueIndex = 1;
 const kFrameDetailsNameValueSize = 2;
 
+const kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
+const kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
+const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
+
 /**
  * Wrapper for the frame details information retreived from the VM. The frame
  * details from the VM is an array with the following content. See runtime.cc
@@ -1261,7 +1267,7 @@
  *     5: Source position
  *     6: Construct call
  *     7: Is at return
- *     8: Debugger frame
+ *     8: Flags (debugger frame, optimized frame, inlined frame index)
  *     Arguments name, value
  *     Locals name, value
  *     Return value if any
@@ -1307,7 +1313,27 @@
 
 FrameDetails.prototype.isDebuggerFrame = function() {
   %CheckExecutionState(this.break_id_);
-  return this.details_[kFrameDetailsDebuggerFrameIndex];
+  var f = kFrameDetailsFlagDebuggerFrameMask;
+  return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isOptimizedFrame = function() {
+  %CheckExecutionState(this.break_id_);
+  var f = kFrameDetailsFlagOptimizedFrameMask;
+  return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isInlinedFrame = function() {
+  return this.inlinedFrameIndex() > 0;
+}
+
+
+FrameDetails.prototype.inlinedFrameIndex = function() {
+  %CheckExecutionState(this.break_id_);
+  var f = kFrameDetailsFlagInlinedFrameIndexMask;
+  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
 }
 
 
@@ -1446,6 +1472,21 @@
 };
 
 
+FrameMirror.prototype.isOptimizedFrame = function() {
+  return this.details_.isOptimizedFrame();
+};
+
+
+FrameMirror.prototype.isInlinedFrame = function() {
+  return this.details_.isInlinedFrame();
+};
+
+
+FrameMirror.prototype.inlinedFrameIndex = function() {
+  return this.details_.inlinedFrameIndex();
+};
+
+
 FrameMirror.prototype.argumentCount = function() {
   return this.details_.argumentCount();
 };
@@ -1535,8 +1576,12 @@
 
 
 FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
-  var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
-                              source, Boolean(disable_break), opt_context_object);
+  var result = %DebugEvaluate(this.break_id_,
+                              this.details_.frameId(),
+                              this.details_.inlinedFrameIndex(),
+                              source,
+                              Boolean(disable_break),
+                              opt_context_object);
   return MakeMirror(result);
 };
 
@@ -1561,8 +1606,10 @@
     // Try to find the function as a property in the receiver. Include the
     // prototype chain in the lookup.
     var property = GetUndefinedMirror();
-    if (!receiver.isUndefined()) {
-      for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
+    if (receiver.isObject()) {
+      for (var r = receiver;
+           !r.isNull() && property.isUndefined();
+           r = r.protoObject()) {
         property = r.lookupProperty(func);
       }
     }
@@ -1689,6 +1736,7 @@
   this.break_id_ = frame.break_id_;
   this.details_ = %GetScopeDetails(frame.break_id_,
                                    frame.details_.frameId(),
+                                   frame.details_.inlinedFrameIndex(),
                                    index);
 }
 
diff --git a/src/misc-intrinsics.h b/src/misc-intrinsics.h
new file mode 100644
index 0000000..5393de2
--- /dev/null
+++ b/src/misc-intrinsics.h
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MISC_INTRINSICS_H_
+#define V8_MISC_INTRINSICS_H_
+
+#include "../include/v8.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Returns the index of the leading 1 bit, counting the least significant bit at
+// index 0.  (1 << IntegerLog2(x)) is a mask for the most significant bit of x.
+// Result is undefined if input is zero.
+int IntegerLog2(uint32_t value);
+
+#if defined(__GNUC__)
+
+inline int IntegerLog2(uint32_t value) {
+  return 31 - __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanReverse)
+
+inline int IntegerLog2(uint32_t value) {
+  unsigned long result;             // NOLINT: MSVC intrinsic demands this type.
+  _BitScanReverse(&result, value);
+  return result;
+}
+
+#else
+
+// Default version using regular operations. Code taken from:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
+inline int IntegerLog2(uint32_t value) {
+  int result, shift;
+
+  shift = (value > 0xFFFF) << 4;
+  value >>= shift;
+  result = shift;
+
+  shift = (value > 0xFF) << 3;
+  value >>= shift;
+  result |= shift;
+
+  shift = (value > 0xF) << 2;
+  value >>= shift;
+  result |= shift;
+
+  shift = (value > 0x3) << 1;
+  value >>= shift;
+  result |= shift;
+
+  result |= (value >> 1);
+
+  return result;
+}
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_MISC_INTRINSICS_H_
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 6ecbc8c..a791dbb 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -25,9 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
 #include <signal.h>
-#include <string>
-#include <map>
 
 #include "v8.h"
 
@@ -37,8 +38,6 @@
 #include "serialize.h"
 #include "list.h"
 
-// use explicit namespace to avoid clashing with types in namespace v8
-namespace i = v8::internal;
 using namespace v8;
 
 static const unsigned int kMaxCounters = 256;
@@ -85,21 +84,54 @@
 };
 
 
-// We statically allocate a set of local counters to be used if we
-// don't want to store the stats in a memory-mapped file
-static CounterCollection local_counters;
-
-
-typedef std::map<std::string, int*> CounterMap;
-typedef std::map<std::string, int*>::iterator CounterMapIterator;
-static CounterMap counter_table_;
-
-
-class CppByteSink : public i::SnapshotByteSink {
+class Compressor {
  public:
-  explicit CppByteSink(const char* snapshot_file)
-      : bytes_written_(0),
-        partial_sink_(this) {
+  virtual ~Compressor() {}
+  virtual bool Compress(i::Vector<char> input) = 0;
+  virtual i::Vector<char>* output() = 0;
+};
+
+
+class PartialSnapshotSink : public i::SnapshotByteSink {
+ public:
+  PartialSnapshotSink() : data_(), raw_size_(-1) { }
+  virtual ~PartialSnapshotSink() { data_.Free(); }
+  virtual void Put(int byte, const char* description) {
+    data_.Add(byte);
+  }
+  virtual int Position() { return data_.length(); }
+  void Print(FILE* fp) {
+    int length = Position();
+    for (int j = 0; j < length; j++) {
+      if ((j & 0x1f) == 0x1f) {
+        fprintf(fp, "\n");
+      }
+      if (j != 0) {
+        fprintf(fp, ",");
+      }
+      fprintf(fp, "%d", at(j));
+    }
+  }
+  char at(int i) { return data_[i]; }
+  bool Compress(Compressor* compressor) {
+    ASSERT_EQ(-1, raw_size_);
+    raw_size_ = data_.length();
+    if (!compressor->Compress(data_.ToVector())) return false;
+    data_.Clear();
+    data_.AddAll(*compressor->output());
+    return true;
+  }
+  int raw_size() { return raw_size_; }
+
+ private:
+  i::List<char> data_;
+  int raw_size_;
+};
+
+
+class CppByteSink : public PartialSnapshotSink {
+ public:
+  explicit CppByteSink(const char* snapshot_file) {
     fp_ = i::OS::FOpen(snapshot_file, "wb");
     if (fp_ == NULL) {
       i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -114,7 +146,18 @@
   }
 
   virtual ~CppByteSink() {
-    fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
+    fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position());
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n");
+    fprintf(fp_,
+            "const int Snapshot::raw_size_ = %d;\n\n",
+            raw_size());
+#else
+    fprintf(fp_,
+            "const byte* Snapshot::raw_data_ = Snapshot::data_;\n");
+    fprintf(fp_,
+            "const int Snapshot::raw_size_ = Snapshot::size_;\n\n");
+#endif
     fprintf(fp_, "} }  // namespace v8::internal\n");
     fclose(fp_);
   }
@@ -127,7 +170,6 @@
       int map_space_used,
       int cell_space_used,
       int large_space_used) {
-    fprintf(fp_, "};\n\n");
     fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
     fprintf(fp_,
             "const int Snapshot::pointer_space_used_ = %d;\n",
@@ -151,64 +193,98 @@
     int length = partial_sink_.Position();
     fprintf(fp_, "};\n\n");
     fprintf(fp_, "const int Snapshot::context_size_ = %d;\n",  length);
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_,
+            "const int Snapshot::context_raw_size_ = %d;\n",
+            partial_sink_.raw_size());
+#else
+    fprintf(fp_,
+            "const int Snapshot::context_raw_size_ = "
+            "Snapshot::context_size_;\n");
+#endif
     fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
-    for (int j = 0; j < length; j++) {
-      if ((j & 0x1f) == 0x1f) {
-        fprintf(fp_, "\n");
-      }
-      char byte = partial_sink_.at(j);
-      if (j != 0) {
-        fprintf(fp_, ",");
-      }
-      fprintf(fp_, "%d", byte);
-    }
+    partial_sink_.Print(fp_);
+    fprintf(fp_, "};\n\n");
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n");
+#else
+    fprintf(fp_, "const byte* Snapshot::context_raw_data_ ="
+            " Snapshot::context_data_;\n");
+#endif
   }
 
-  virtual void Put(int byte, const char* description) {
-    if (bytes_written_ != 0) {
-      fprintf(fp_, ",");
-    }
-    fprintf(fp_, "%d", byte);
-    bytes_written_++;
-    if ((bytes_written_ & 0x1f) == 0) {
-      fprintf(fp_, "\n");
-    }
+  void WriteSnapshot() {
+    Print(fp_);
   }
 
-  virtual int Position() {
-    return bytes_written_;
-  }
-
-  i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
-
-  class PartialSnapshotSink : public i::SnapshotByteSink {
-   public:
-    explicit PartialSnapshotSink(CppByteSink* parent)
-        : parent_(parent),
-          data_() { }
-    virtual ~PartialSnapshotSink() { data_.Free(); }
-    virtual void Put(int byte, const char* description) {
-      data_.Add(byte);
-    }
-    virtual int Position() { return data_.length(); }
-    char at(int i) { return data_[i]; }
-   private:
-    CppByteSink* parent_;
-    i::List<char> data_;
-  };
+  PartialSnapshotSink* partial_sink() { return &partial_sink_; }
 
  private:
   FILE* fp_;
-  int bytes_written_;
   PartialSnapshotSink partial_sink_;
 };
 
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Compressor : public Compressor {
+ public:
+  BZip2Compressor() : output_(NULL) {}
+  virtual ~BZip2Compressor() {
+    delete output_;
+  }
+  virtual bool Compress(i::Vector<char> input) {
+    delete output_;
+    output_ = new i::ScopedVector<char>((input.length() * 101) / 100 + 1000);
+    unsigned int output_length_ = output_->length();
+    int result = BZ2_bzBuffToBuffCompress(output_->start(), &output_length_,
+                                          input.start(), input.length(),
+                                          9, 1, 0);
+    if (result == BZ_OK) {
+      output_->Truncate(output_length_);
+      return true;
+    } else {
+      fprintf(stderr, "bzlib error code: %d\n", result);
+      return false;
+    }
+  }
+  virtual i::Vector<char>* output() { return output_; }
+
+ private:
+  i::ScopedVector<char>* output_;
+};
+
+
+class BZip2Decompressor : public StartupDataDecompressor {
+ public:
+  virtual ~BZip2Decompressor() { }
+
+ protected:
+  virtual int DecompressData(char* raw_data,
+                             int* raw_data_size,
+                             const char* compressed_data,
+                             int compressed_data_size) {
+    ASSERT_EQ(StartupData::kBZip2,
+              V8::GetCompressedStartupDataAlgorithm());
+    unsigned int decompressed_size = *raw_data_size;
+    int result =
+        BZ2_bzBuffToBuffDecompress(raw_data,
+                                   &decompressed_size,
+                                   const_cast<char*>(compressed_data),
+                                   compressed_data_size,
+                                   0, 1);
+    if (result == BZ_OK) {
+      *raw_data_size = decompressed_size;
+    }
+    return result;
+  }
+};
+#endif
+
+
 int main(int argc, char** argv) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
-#endif
+
   // Print the usage if an error occurs when parsing the command line
   // flags or if the help flag is set.
   int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -217,6 +293,14 @@
     i::FlagList::PrintHelp();
     return !i::FLAG_help;
   }
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  BZip2Decompressor natives_decompressor;
+  int bz2_result = natives_decompressor.Decompress();
+  if (bz2_result != BZ_OK) {
+    fprintf(stderr, "bzip error code: %d\n", bz2_result);
+    exit(1);
+  }
+#endif
   i::Serializer::Enable();
   Persistent<Context> context = v8::Context::New();
   ASSERT(!context.IsEmpty());
@@ -242,6 +326,14 @@
 
   ser.SerializeWeakReferences();
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  BZip2Compressor compressor;
+  if (!sink.Compress(&compressor))
+    return 1;
+  if (!sink.partial_sink()->Compress(&compressor))
+    return 1;
+#endif
+  sink.WriteSnapshot();
   sink.WritePartialSnapshot();
 
   sink.WriteSpaceUsed(
diff --git a/src/natives.h b/src/natives.h
index 1df94b0..5f34420 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
                                      int index);
 
 enum NativeType {
-  CORE, D8, I18N
+  CORE, EXPERIMENTAL, D8, TEST
 };
 
 template <NativeType type>
@@ -52,11 +52,15 @@
   // non-debugger scripts have an index in the interval [GetDebuggerCount(),
   // GetNativesCount()).
   static int GetIndex(const char* name);
-  static Vector<const char> GetScriptSource(int index);
+  static int GetRawScriptsSize();
+  static Vector<const char> GetRawScriptSource(int index);
   static Vector<const char> GetScriptName(int index);
+  static Vector<const byte> GetScriptsSource();
+  static void SetRawScriptsSource(Vector<const char> raw_source);
 };
 
 typedef NativesCollection<CORE> Natives;
+typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
 
 } }  // namespace v8::internal
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index dd606dc..e9ca6c0 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -88,6 +88,9 @@
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayVerify();
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
+      break;
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayVerify();
       break;
@@ -116,6 +119,9 @@
     case EXTERNAL_FLOAT_ARRAY_TYPE:
       ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
+      break;
     case CODE_TYPE:
       Code::cast(this)->CodeVerify();
       break;
@@ -147,13 +153,22 @@
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayVerify();
       break;
+    case JS_WEAK_MAP_TYPE:
+      JSWeakMap::cast(this)->JSWeakMapVerify();
+      break;
     case JS_REGEXP_TYPE:
       JSRegExp::cast(this)->JSRegExpVerify();
       break;
     case FILLER_TYPE:
       break;
-    case PROXY_TYPE:
-      Proxy::cast(this)->ProxyVerify();
+    case JS_PROXY_TYPE:
+      JSProxy::cast(this)->JSProxyVerify();
+      break;
+    case JS_FUNCTION_PROXY_TYPE:
+      JSFunctionProxy::cast(this)->JSFunctionProxyVerify();
+      break;
+    case FOREIGN_TYPE:
+      Foreign::cast(this)->ForeignVerify();
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
@@ -232,6 +247,11 @@
 }
 
 
+void ExternalDoubleArray::ExternalDoubleArrayVerify() {
+  ASSERT(IsExternalDoubleArray());
+}
+
+
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
@@ -240,9 +260,9 @@
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
-  ASSERT(map()->has_fast_elements() ==
-         (elements()->map() == GetHeap()->fixed_array_map() ||
-          elements()->map() == GetHeap()->fixed_cow_array_map()));
+  ASSERT_EQ(map()->has_fast_elements(),
+            (elements()->map() == GetHeap()->fixed_array_map() ||
+             elements()->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
 }
 
@@ -261,7 +281,7 @@
 void Map::SharedMapVerify() {
   MapVerify();
   ASSERT(is_shared());
-  ASSERT_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
+  ASSERT(instance_descriptors()->IsEmpty());
   ASSERT_EQ(0, pre_allocated_property_fields());
   ASSERT_EQ(0, unused_property_fields());
   ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
@@ -278,6 +298,12 @@
 }
 
 
+void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
+  VerifyHeapPointer(cache());
+  ASSERT(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
+}
+
+
 void FixedArray::FixedArrayVerify() {
   for (int i = 0; i < length(); i++) {
     Object* e = get(i);
@@ -290,6 +316,18 @@
 }
 
 
+void FixedDoubleArray::FixedDoubleArrayVerify() {
+  for (int i = 0; i < length(); i++) {
+    if (!is_the_hole(i)) {
+      double value = get_scalar(i);
+      ASSERT(!isnan(value) ||
+             (BitCast<uint64_t>(value) ==
+              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+    }
+  }
+}
+
+
 void JSValue::JSValueVerify() {
   Object* v = value();
   if (v->IsHeapObject()) {
@@ -317,6 +355,31 @@
   if (IsSymbol()) {
     CHECK(!HEAP->InNewSpace(this));
   }
+  if (IsConsString()) {
+    ConsString::cast(this)->ConsStringVerify();
+  } else if (IsSlicedString()) {
+    SlicedString::cast(this)->SlicedStringVerify();
+  }
+}
+
+
+void ConsString::ConsStringVerify() {
+  CHECK(this->first()->IsString());
+  CHECK(this->second() == GetHeap()->empty_string() ||
+        this->second()->IsString());
+  CHECK(this->length() >= String::kMinNonFlatLength);
+  if (this->IsFlat()) {
+    // A flat cons can only be created by String::SlowTryFlatten.
+    // Afterwards, the first part may be externalized.
+    CHECK(this->first()->IsSeqString() || this->first()->IsExternalString());
+  }
+}
+
+
+void SlicedString::SlicedStringVerify() {
+  CHECK(!this->parent()->IsConsString());
+  CHECK(!this->parent()->IsSlicedString());
+  CHECK(this->length() >= SlicedString::kMinLength);
 }
 
 
@@ -415,7 +478,17 @@
 void JSArray::JSArrayVerify() {
   JSObjectVerify();
   ASSERT(length()->IsNumber() || length()->IsUndefined());
-  ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
+  ASSERT(elements()->IsUndefined() ||
+         elements()->IsFixedArray() ||
+         elements()->IsFixedDoubleArray());
+}
+
+
+void JSWeakMap::JSWeakMapVerify() {
+  CHECK(IsJSWeakMap());
+  JSObjectVerify();
+  VerifyHeapPointer(table());
+  ASSERT(table()->IsHashTable());
 }
 
 
@@ -433,14 +506,22 @@
 
       FixedArray* arr = FixedArray::cast(data());
       Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
-      // TheHole : Not compiled yet.
+      // Smi : Not compiled yet (-1) or code prepared for flushing.
       // JSObject: Compilation error.
       // Code/ByteArray: Compiled code.
-      ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
-          (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+      ASSERT(ascii_data->IsSmi() ||
+             (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
       Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
-      ASSERT(uc16_data->IsTheHole() || uc16_data->IsJSObject() ||
-          (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+      ASSERT(uc16_data->IsSmi() ||
+             (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+
+      Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
+      ASSERT(ascii_saved->IsSmi() || ascii_saved->IsString() ||
+             ascii_saved->IsCode());
+      Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
+      ASSERT(uc16_saved->IsSmi() || uc16_saved->IsString() ||
+             uc16_saved->IsCode());
+
       ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
       ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
       break;
@@ -453,8 +534,22 @@
 }
 
 
-void Proxy::ProxyVerify() {
-  ASSERT(IsProxy());
+void JSProxy::JSProxyVerify() {
+  ASSERT(IsJSProxy());
+  VerifyPointer(handler());
+}
+
+
+void JSFunctionProxy::JSFunctionProxyVerify() {
+  ASSERT(IsJSFunctionProxy());
+  JSProxyVerify();
+  VerifyPointer(call_trap());
+  VerifyPointer(construct_trap());
+}
+
+
+void Foreign::ForeignVerify() {
+  ASSERT(IsForeign());
 }
 
 
@@ -605,7 +700,7 @@
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dict = element_dictionary();
+      SeededNumberDictionary* dict = element_dictionary();
       info->number_of_slow_used_elements_ += dict->NumberOfElements();
       info->number_of_slow_unused_elements_ +=
           dict->Capacity() - dict->NumberOfElements();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 65aec5d..e7b6a34 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -35,6 +35,7 @@
 #ifndef V8_OBJECTS_INL_H_
 #define V8_OBJECTS_INL_H_
 
+#include "elements.h"
 #include "objects.h"
 #include "contexts.h"
 #include "conversions-inl.h"
@@ -158,23 +159,33 @@
 }
 
 
+bool Object::IsSpecObject() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
+}
+
+
 bool Object::IsSymbol() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
   // Because the symbol tag is non-zero and no non-string types have the
   // symbol bit set we can test for symbols with a very simple test
   // operation.
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
   return (type & kIsSymbolMask) != 0;
 }
 
 
 bool Object::IsConsString() {
-  if (!this->IsHeapObject()) return false;
-  uint32_t type = HeapObject::cast(this)->map()->instance_type();
-  return (type & (kIsNotStringMask | kStringRepresentationMask)) ==
-         (kStringTag | kConsStringTag);
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsCons();
+}
+
+
+bool Object::IsSlicedString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSliced();
 }
 
 
@@ -217,6 +228,10 @@
          String::cast(this)->IsTwoByteRepresentation();
 }
 
+bool Object::HasValidElements() {
+  // Dictionary is covered under FixedArray.
+  return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
+}
 
 StringShape::StringShape(String* str)
   : type_(str->map()->instance_type()) {
@@ -241,7 +256,7 @@
 
 bool StringShape::IsSymbol() {
   ASSERT(valid());
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   return (type_ & kIsSymbolMask) != 0;
 }
 
@@ -258,6 +273,38 @@
 }
 
 
+bool String::IsAsciiRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  ASSERT(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kAsciiStringTag:
+      return true;
+    case kTwoByteStringTag:
+      return false;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsAsciiRepresentation();
+  }
+}
+
+
+bool String::IsTwoByteRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  ASSERT(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kAsciiStringTag:
+      return false;
+    case kTwoByteStringTag:
+      return true;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsTwoByteRepresentation();
+  }
+}
+
+
 bool String::HasOnlyAsciiChars() {
   uint32_t type = map()->instance_type();
   return (type & kStringEncodingMask) == kAsciiStringTag ||
@@ -270,6 +317,16 @@
 }
 
 
+bool StringShape::IsSliced() {
+  return (type_ & kStringRepresentationMask) == kSlicedStringTag;
+}
+
+
+bool StringShape::IsIndirect() {
+  return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
+}
+
+
 bool StringShape::IsExternal() {
   return (type_ & kStringRepresentationMask) == kExternalStringTag;
 }
@@ -286,6 +343,11 @@
 }
 
 
+uint32_t StringShape::encoding_tag() {
+  return type_ & kStringEncodingMask;
+}
+
+
 uint32_t StringShape::full_representation_tag() {
   return (type_ & (kStringRepresentationMask | kStringEncodingMask));
 }
@@ -406,6 +468,13 @@
 }
 
 
+bool Object::IsExternalDoubleArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_DOUBLE_ARRAY_TYPE;
+}
+
+
 bool MaybeObject::IsFailure() {
   return HAS_FAILURE_TAG(this);
 }
@@ -439,9 +508,33 @@
 }
 
 
+bool Object::IsJSReceiver() {
+  return IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+
+
 bool Object::IsJSObject() {
-  return IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+  return IsJSReceiver() && !IsJSProxy();
+}
+
+
+bool Object::IsJSProxy() {
+  return Object::IsHeapObject() &&
+     (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
+      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+}
+
+
+bool Object::IsJSFunctionProxy() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
+}
+
+
+bool Object::IsJSWeakMap() {
+  return Object::IsJSObject() &&
+      HeapObject::cast(this)->map()->instance_type() == JS_WEAK_MAP_TYPE;
 }
 
 
@@ -464,6 +557,13 @@
 }
 
 
+bool Object::IsFixedDoubleArray() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() ==
+          FIXED_DOUBLE_ARRAY_TYPE;
+}
+
+
 bool Object::IsDescriptorArray() {
   return IsFixedArray();
 }
@@ -498,22 +598,18 @@
 
 bool Object::IsContext() {
   if (Object::IsHeapObject()) {
-    Heap* heap = HeapObject::cast(this)->GetHeap();
-    return (HeapObject::cast(this)->map() == heap->context_map() ||
-            HeapObject::cast(this)->map() == heap->catch_context_map() ||
-            HeapObject::cast(this)->map() == heap->global_context_map());
+    Map* map = HeapObject::cast(this)->map();
+    Heap* heap = map->GetHeap();
+    return (map == heap->function_context_map() ||
+            map == heap->catch_context_map() ||
+            map == heap->with_context_map() ||
+            map == heap->global_context_map() ||
+            map == heap->block_context_map());
   }
   return false;
 }
 
 
-bool Object::IsCatchContext() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->catch_context_map();
-}
-
-
 bool Object::IsGlobalContext() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
@@ -521,6 +617,13 @@
 }
 
 
+bool Object::IsSerializedScopeInfo() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map() ==
+      HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
+}
+
+
 bool Object::IsJSFunction() {
   return Object::IsHeapObject()
       && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
@@ -577,9 +680,9 @@
 }
 
 
-bool Object::IsProxy() {
+bool Object::IsForeign() {
   return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+      && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
 }
 
 
@@ -614,8 +717,8 @@
 
 
 bool Object::IsDictionary() {
-  return IsHashTable() && this !=
-         HeapObject::cast(this)->GetHeap()->symbol_table();
+  return IsHashTable() &&
+      this != HeapObject::cast(this)->GetHeap()->symbol_table();
 }
 
 
@@ -663,6 +766,11 @@
 }
 
 
+bool Object::IsPolymorphicCodeCacheHashTable() {
+  return IsHashTable();
+}
+
+
 bool Object::IsMapCache() {
   return IsHashTable();
 }
@@ -854,7 +962,7 @@
 #else  // V8_TARGET_ARCH_MIPS
   // Prevent gcc from using load-double (mips ldc1) on (possibly)
   // non-64-bit aligned HeapNumber::value.
-  static inline double read_double_field(HeapNumber* p, int offset) {
+  static inline double read_double_field(void* p, int offset) {
     union conversion {
       double d;
       uint32_t u[2];
@@ -873,7 +981,7 @@
 #else  // V8_TARGET_ARCH_MIPS
   // Prevent gcc from using store-double (mips sdc1) on (possibly)
   // non-64-bit aligned HeapNumber::value.
-  static inline void write_double_field(HeapNumber* p, int offset,
+  static inline void write_double_field(void* p, int offset,
                                         double value) {
     union conversion {
       double d;
@@ -1286,20 +1394,20 @@
 ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
 
 
-HeapObject* JSObject::elements() {
+FixedArrayBase* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
-  // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(array->IsFixedArray() || array->IsExternalArray());
-  return reinterpret_cast<HeapObject*>(array);
+  ASSERT(array->HasValidElements());
+  return static_cast<FixedArrayBase*>(array);
 }
 
 
-void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
+void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
   ASSERT(map()->has_fast_elements() ==
          (value->map() == GetHeap()->fixed_array_map() ||
           value->map() == GetHeap()->fixed_cow_array_map()));
-  // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(value->IsFixedArray() || value->IsExternalArray());
+  ASSERT(map()->has_fast_double_elements() ==
+         value->IsFixedDoubleArray());
+  ASSERT(value->HasValidElements());
   WRITE_FIELD(this, kElementsOffset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
 }
@@ -1374,6 +1482,8 @@
       return JSValue::kSize;
     case JS_ARRAY_TYPE:
       return JSValue::kSize;
+    case JS_WEAK_MAP_TYPE:
+      return JSWeakMap::kSize;
     case JS_REGEXP_TYPE:
       return JSValue::kSize;
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -1547,6 +1657,12 @@
 }
 
 
+FixedArrayBase* FixedArrayBase::cast(Object* object) {
+  ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+  return reinterpret_cast<FixedArrayBase*>(object);
+}
+
+
 Object* FixedArray::get(int index) {
   ASSERT(index >= 0 && index < this->length());
   return READ_FIELD(this, kHeaderSize + index * kPointerSize);
@@ -1555,6 +1671,7 @@
 
 void FixedArray::set(int index, Smi* value) {
   ASSERT(map() != HEAP->fixed_cow_array_map());
+  ASSERT(index >= 0 && index < this->length());
   ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
@@ -1570,6 +1687,124 @@
 }
 
 
+inline bool FixedDoubleArray::is_the_hole_nan(double value) {
+  return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+}
+
+
+inline double FixedDoubleArray::hole_nan_as_double() {
+  return BitCast<double, uint64_t>(kHoleNanInt64);
+}
+
+
+inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
+  ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
+  ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
+  return OS::nan_value();
+}
+
+
+double FixedDoubleArray::get_scalar(int index) {
+  ASSERT(map() != HEAP->fixed_cow_array_map() &&
+         map() != HEAP->fixed_array_map());
+  ASSERT(index >= 0 && index < this->length());
+  double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+  ASSERT(!is_the_hole_nan(result));
+  return result;
+}
+
+
+MaybeObject* FixedDoubleArray::get(int index) {
+  if (is_the_hole(index)) {
+    return GetHeap()->the_hole_value();
+  } else {
+    return GetHeap()->NumberFromDouble(get_scalar(index));
+  }
+}
+
+
+void FixedDoubleArray::set(int index, double value) {
+  ASSERT(map() != HEAP->fixed_cow_array_map() &&
+         map() != HEAP->fixed_array_map());
+  int offset = kHeaderSize + index * kDoubleSize;
+  if (isnan(value)) value = canonical_not_the_hole_nan_as_double();
+  WRITE_DOUBLE_FIELD(this, offset, value);
+}
+
+
+void FixedDoubleArray::set_the_hole(int index) {
+  ASSERT(map() != HEAP->fixed_cow_array_map() &&
+         map() != HEAP->fixed_array_map());
+  int offset = kHeaderSize + index * kDoubleSize;
+  WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+}
+
+
+bool FixedDoubleArray::is_the_hole(int index) {
+  int offset = kHeaderSize + index * kDoubleSize;
+  return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
+}
+
+
+void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
+  int old_length = from->length();
+  ASSERT(old_length < length());
+  if (old_length * kDoubleSize >= OS::kMinComplexMemCopy) {
+    OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
+                FIELD_ADDR(from, kHeaderSize),
+                old_length * kDoubleSize);
+  } else {
+    for (int i = 0; i < old_length; ++i) {
+      if (from->is_the_hole(i)) {
+        set_the_hole(i);
+      } else {
+        set(i, from->get_scalar(i));
+      }
+    }
+  }
+  int offset = kHeaderSize + old_length * kDoubleSize;
+  for (int current = from->length(); current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+}
+
+
+void FixedDoubleArray::Initialize(FixedArray* from) {
+  int old_length = from->length();
+  ASSERT(old_length < length());
+  for (int i = 0; i < old_length; i++) {
+    Object* hole_or_object = from->get(i);
+    if (hole_or_object->IsTheHole()) {
+      set_the_hole(i);
+    } else {
+      set(i, hole_or_object->Number());
+    }
+  }
+  int offset = kHeaderSize + old_length * kDoubleSize;
+  for (int current = from->length(); current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+}
+
+
+void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
+  int offset = kHeaderSize;
+  for (int current = 0; current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+  for (int i = 0; i < from->Capacity(); i++) {
+    Object* key = from->KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t entry = static_cast<uint32_t>(key->Number());
+      set(entry, from->ValueAt(i)->Number());
+    }
+  }
+}
+
+
 WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
   if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
   return UPDATE_WRITE_BARRIER;
@@ -1661,9 +1896,21 @@
 
 
 bool DescriptorArray::IsEmpty() {
-  ASSERT(this->length() > kFirstIndex ||
+  ASSERT(this->IsSmi() ||
+         this->length() > kFirstIndex ||
          this == HEAP->empty_descriptor_array());
-  return length() <= kFirstIndex;
+  return this->IsSmi() || length() <= kFirstIndex;
+}
+
+
+int DescriptorArray::bit_field3_storage() {
+  Object* storage = READ_FIELD(this, kBitField3StorageOffset);
+  return Smi::cast(storage)->value();
+}
+
+void DescriptorArray::set_bit_field3_storage(int value) {
+  ASSERT(!IsEmpty());
+  WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
 }
 
 
@@ -1744,8 +1991,8 @@
 
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
-  Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+  Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
+  return reinterpret_cast<AccessorDescriptor*>(p->address());
 }
 
 
@@ -1757,7 +2004,7 @@
 bool DescriptorArray::IsTransition(int descriptor_number) {
   PropertyType t = GetType(descriptor_number);
   return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-      t == EXTERNAL_ARRAY_TRANSITION;
+      t == ELEMENTS_TRANSITION;
 }
 
 
@@ -1810,6 +2057,17 @@
 
 
 template<typename Shape, typename Key>
+int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) {
+  const int kMinCapacity = 32;
+  int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+  if (capacity < kMinCapacity) {
+    capacity = kMinCapacity;  // Guarantee min capacity.
+  }
+  return capacity;
+}
+
+
+template<typename Shape, typename Key>
 int HashTable<Shape, Key>::FindEntry(Key key) {
   return FindEntry(GetIsolate(), key);
 }
@@ -1819,13 +2077,14 @@
 template<typename Shape, typename Key>
 int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
   uint32_t capacity = Capacity();
-  uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+  uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity);
   uint32_t count = 1;
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    if (element == isolate->heap()->undefined_value()) break;  // Empty entry.
-    if (element != isolate->heap()->null_value() &&
+    // Empty entry.
+    if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
+    if (element != isolate->heap()->raw_unchecked_null_value() &&
         Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
@@ -1833,14 +2092,14 @@
 }
 
 
-bool NumberDictionary::requires_slow_elements() {
+bool SeededNumberDictionary::requires_slow_elements() {
   Object* max_index_object = get(kMaxNumberKeyIndex);
   if (!max_index_object->IsSmi()) return false;
   return 0 !=
       (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
 }
 
-uint32_t NumberDictionary::max_number_key() {
+uint32_t SeededNumberDictionary::max_number_key() {
   ASSERT(!requires_slow_elements());
   Object* max_index_object = get(kMaxNumberKeyIndex);
   if (!max_index_object->IsSmi()) return 0;
@@ -1848,7 +2107,7 @@
   return value >> kRequiresSlowElementsTagSize;
 }
 
-void NumberDictionary::set_requires_slow_elements() {
+void SeededNumberDictionary::set_requires_slow_elements() {
   set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
 }
 
@@ -1858,6 +2117,7 @@
 
 
 CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedDoubleArray)
 CAST_ACCESSOR(DescriptorArray)
 CAST_ACCESSOR(DeoptimizationInputData)
 CAST_ACCESSOR(DeoptimizationOutputData)
@@ -1866,15 +2126,18 @@
 CAST_ACCESSOR(NormalizedMapCache)
 CAST_ACCESSOR(CompilationCacheTable)
 CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
 CAST_ACCESSOR(MapCache)
 CAST_ACCESSOR(String)
 CAST_ACCESSOR(SeqString)
 CAST_ACCESSOR(SeqAsciiString)
 CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SlicedString)
 CAST_ACCESSOR(ConsString)
 CAST_ACCESSOR(ExternalString)
 CAST_ACCESSOR(ExternalAsciiString)
 CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(JSReceiver)
 CAST_ACCESSOR(JSObject)
 CAST_ACCESSOR(Smi)
 CAST_ACCESSOR(HeapObject)
@@ -1891,7 +2154,10 @@
 CAST_ACCESSOR(Code)
 CAST_ACCESSOR(JSArray)
 CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(Proxy)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSFunctionProxy)
+CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
@@ -1901,6 +2167,7 @@
 CAST_ACCESSOR(ExternalIntArray)
 CAST_ACCESSOR(ExternalUnsignedIntArray)
 CAST_ACCESSOR(ExternalFloatArray)
+CAST_ACCESSOR(ExternalDoubleArray)
 CAST_ACCESSOR(ExternalPixelArray)
 CAST_ACCESSOR(Struct)
 
@@ -1917,11 +2184,7 @@
 }
 
 
-SMI_ACCESSORS(FixedArray, length, kLengthOffset)
-SMI_ACCESSORS(ByteArray, length, kLengthOffset)
-
-INT_ACCESSORS(ExternalArray, length, kLengthOffset)
-
+SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
 
@@ -1951,7 +2214,7 @@
 MaybeObject* String::TryFlatten(PretenureFlag pretenure) {
   if (!StringShape(this).IsCons()) return this;
   ConsString* cons = ConsString::cast(this);
-  if (cons->second()->length() == 0) return cons->first();
+  if (cons->IsFlat()) return cons->first();
   return SlowTryFlatten(pretenure);
 }
 
@@ -1959,10 +2222,8 @@
 String* String::TryFlattenGetString(PretenureFlag pretenure) {
   MaybeObject* flat = TryFlatten(pretenure);
   Object* successfully_flattened;
-  if (flat->ToObject(&successfully_flattened)) {
-    return String::cast(successfully_flattened);
-  }
-  return this;
+  if (!flat->ToObject(&successfully_flattened)) return this;
+  return String::cast(successfully_flattened);
 }
 
 
@@ -1980,6 +2241,9 @@
       return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
     case kExternalStringTag | kTwoByteStringTag:
       return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+    case kSlicedStringTag | kAsciiStringTag:
+    case kSlicedStringTag | kTwoByteStringTag:
+      return SlicedString::cast(this)->SlicedStringGet(index);
     default:
       break;
   }
@@ -2000,15 +2264,19 @@
 
 
 bool String::IsFlat() {
-  switch (StringShape(this).representation_tag()) {
-    case kConsStringTag: {
-      String* second = ConsString::cast(this)->second();
-      // Only flattened strings have second part empty.
-      return second->length() == 0;
-    }
-    default:
-      return true;
-  }
+  if (!StringShape(this).IsCons()) return true;
+  return ConsString::cast(this)->second()->length() == 0;
+}
+
+
+String* String::GetUnderlying() {
+  // Giving direct access to underlying string only makes sense if the
+  // wrapping string is already flattened.
+  ASSERT(this->IsFlat());
+  ASSERT(StringShape(this).IsIndirect());
+  STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
+  const int kUnderlyingOffset = SlicedString::kParentOffset;
+  return String::cast(READ_FIELD(this, kUnderlyingOffset));
 }
 
 
@@ -2067,6 +2335,20 @@
 }
 
 
+String* SlicedString::parent() {
+  return String::cast(READ_FIELD(this, kParentOffset));
+}
+
+
+void SlicedString::set_parent(String* parent) {
+  ASSERT(parent->IsSeqString());
+  WRITE_FIELD(this, kParentOffset, parent);
+}
+
+
+SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
+
+
 String* ConsString::first() {
   return String::cast(READ_FIELD(this, kFirstOffset));
 }
@@ -2191,13 +2473,18 @@
 }
 
 
-uint8_t ExternalPixelArray::get(int index) {
+uint8_t ExternalPixelArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   uint8_t* ptr = external_pixel_pointer();
   return ptr[index];
 }
 
 
+MaybeObject* ExternalPixelArray::get(int index) {
+  return Smi::FromInt(static_cast<int>(get_scalar(index)));
+}
+
+
 void ExternalPixelArray::set(int index, uint8_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   uint8_t* ptr = external_pixel_pointer();
@@ -2217,13 +2504,18 @@
 }
 
 
-int8_t ExternalByteArray::get(int index) {
+int8_t ExternalByteArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   int8_t* ptr = static_cast<int8_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalByteArray::get(int index) {
+  return Smi::FromInt(static_cast<int>(get_scalar(index)));
+}
+
+
 void ExternalByteArray::set(int index, int8_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   int8_t* ptr = static_cast<int8_t*>(external_pointer());
@@ -2231,13 +2523,18 @@
 }
 
 
-uint8_t ExternalUnsignedByteArray::get(int index) {
+uint8_t ExternalUnsignedByteArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalUnsignedByteArray::get(int index) {
+  return Smi::FromInt(static_cast<int>(get_scalar(index)));
+}
+
+
 void ExternalUnsignedByteArray::set(int index, uint8_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
@@ -2245,13 +2542,18 @@
 }
 
 
-int16_t ExternalShortArray::get(int index) {
+int16_t ExternalShortArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   int16_t* ptr = static_cast<int16_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalShortArray::get(int index) {
+  return Smi::FromInt(static_cast<int>(get_scalar(index)));
+}
+
+
 void ExternalShortArray::set(int index, int16_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   int16_t* ptr = static_cast<int16_t*>(external_pointer());
@@ -2259,13 +2561,18 @@
 }
 
 
-uint16_t ExternalUnsignedShortArray::get(int index) {
+uint16_t ExternalUnsignedShortArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalUnsignedShortArray::get(int index) {
+  return Smi::FromInt(static_cast<int>(get_scalar(index)));
+}
+
+
 void ExternalUnsignedShortArray::set(int index, uint16_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
@@ -2273,13 +2580,18 @@
 }
 
 
-int32_t ExternalIntArray::get(int index) {
+int32_t ExternalIntArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   int32_t* ptr = static_cast<int32_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalIntArray::get(int index) {
+    return GetHeap()->NumberFromInt32(get_scalar(index));
+}
+
+
 void ExternalIntArray::set(int index, int32_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   int32_t* ptr = static_cast<int32_t*>(external_pointer());
@@ -2287,13 +2599,18 @@
 }
 
 
-uint32_t ExternalUnsignedIntArray::get(int index) {
+uint32_t ExternalUnsignedIntArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalUnsignedIntArray::get(int index) {
+    return GetHeap()->NumberFromUint32(get_scalar(index));
+}
+
+
 void ExternalUnsignedIntArray::set(int index, uint32_t value) {
   ASSERT((index >= 0) && (index < this->length()));
   uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
@@ -2301,13 +2618,18 @@
 }
 
 
-float ExternalFloatArray::get(int index) {
+float ExternalFloatArray::get_scalar(int index) {
   ASSERT((index >= 0) && (index < this->length()));
   float* ptr = static_cast<float*>(external_pointer());
   return ptr[index];
 }
 
 
+MaybeObject* ExternalFloatArray::get(int index) {
+    return GetHeap()->NumberFromDouble(get_scalar(index));
+}
+
+
 void ExternalFloatArray::set(int index, float value) {
   ASSERT((index >= 0) && (index < this->length()));
   float* ptr = static_cast<float*>(external_pointer());
@@ -2315,6 +2637,25 @@
 }
 
 
+double ExternalDoubleArray::get_scalar(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  double* ptr = static_cast<double*>(external_pointer());
+  return ptr[index];
+}
+
+
+MaybeObject* ExternalDoubleArray::get(int index) {
+    return GetHeap()->NumberFromDouble(get_scalar(index));
+}
+
+
+void ExternalDoubleArray::set(int index, double value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  double* ptr = static_cast<double*>(external_pointer());
+  ptr[index] = value;
+}
+
+
 int Map::visitor_id() {
   return READ_BYTE_FIELD(this, kVisitorIdOffset);
 }
@@ -2362,6 +2703,10 @@
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
   }
+  if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+    return FixedDoubleArray::SizeFor(
+        reinterpret_cast<FixedDoubleArray*>(this)->length());
+  }
   ASSERT(instance_type == CODE_TYPE);
   return reinterpret_cast<Code*>(this)->CodeSize();
 }
@@ -2499,14 +2844,14 @@
 
 void Map::set_is_shared(bool value) {
   if (value) {
-    set_bit_field2(bit_field2() | (1 << kIsShared));
+    set_bit_field3(bit_field3() | (1 << kIsShared));
   } else {
-    set_bit_field2(bit_field2() & ~(1 << kIsShared));
+    set_bit_field3(bit_field3() & ~(1 << kIsShared));
   }
 }
 
 bool Map::is_shared() {
-  return ((1 << kIsShared) & bit_field2()) != 0;
+  return ((1 << kIsShared) & bit_field3()) != 0;
 }
 
 
@@ -2527,7 +2872,7 @@
 
 
 void Code::set_flags(Code::Flags flags) {
-  STATIC_ASSERT(Code::NUMBER_OF_KINDS <= (kFlagsKindMask >> kFlagsKindShift)+1);
+  STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
   // Make sure that all call stubs have an arguments count.
   ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
           ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
@@ -2541,11 +2886,6 @@
 }
 
 
-InLoopFlag Code::ic_in_loop() {
-  return ExtractICInLoopFromFlags(flags());
-}
-
-
 InlineCacheState Code::ic_state() {
   InlineCacheState result = ExtractICStateFromFlags(flags());
   // Only allow uninitialized or debugger states for non-IC code
@@ -2566,7 +2906,6 @@
 
 
 PropertyType Code::type() {
-  ASSERT(ic_state() == MONOMORPHIC);
   return ExtractTypeFromFlags(flags());
 }
 
@@ -2579,16 +2918,20 @@
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
-         kind() == TYPE_RECORDING_BINARY_OP_IC ||
-         kind() == COMPARE_IC);
+         kind() == UNARY_OP_IC ||
+         kind() == BINARY_OP_IC ||
+         kind() == COMPARE_IC ||
+         kind() == TO_BOOLEAN_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
 }
 
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
-         kind() == TYPE_RECORDING_BINARY_OP_IC ||
-         kind() == COMPARE_IC);
+         kind() == UNARY_OP_IC ||
+         kind() == BINARY_OP_IC ||
+         kind() == COMPARE_IC ||
+         kind() == TO_BOOLEAN_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
 }
@@ -2608,13 +2951,31 @@
 
 bool Code::has_deoptimization_support() {
   ASSERT(kind() == FUNCTION);
-  return READ_BYTE_FIELD(this, kHasDeoptimizationSupportOffset) == 1;
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
 }
 
 
 void Code::set_has_deoptimization_support(bool value) {
   ASSERT(kind() == FUNCTION);
-  WRITE_BYTE_FIELD(this, kHasDeoptimizationSupportOffset, value ? 1 : 0);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
+bool Code::has_debug_break_slots() {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
+}
+
+
+void Code::set_has_debug_break_slots(bool value) {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
 }
 
 
@@ -2682,39 +3043,38 @@
 }
 
 
-ExternalArrayType Code::external_array_type() {
-  ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
-  byte type = READ_BYTE_FIELD(this, kExternalArrayTypeOffset);
-  return static_cast<ExternalArrayType>(type);
+byte Code::unary_op_type() {
+  ASSERT(is_unary_op_stub());
+  return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
 }
 
 
-void Code::set_external_array_type(ExternalArrayType value) {
-  ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
-  WRITE_BYTE_FIELD(this, kExternalArrayTypeOffset, value);
+void Code::set_unary_op_type(byte value) {
+  ASSERT(is_unary_op_stub());
+  WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
 }
 
 
-byte Code::type_recording_binary_op_type() {
-  ASSERT(is_type_recording_binary_op_stub());
+byte Code::binary_op_type() {
+  ASSERT(is_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
 }
 
 
-void Code::set_type_recording_binary_op_type(byte value) {
-  ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_type(byte value) {
+  ASSERT(is_binary_op_stub());
   WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
 }
 
 
-byte Code::type_recording_binary_op_result_type() {
-  ASSERT(is_type_recording_binary_op_stub());
+byte Code::binary_op_result_type() {
+  ASSERT(is_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
 }
 
 
-void Code::set_type_recording_binary_op_result_type(byte value) {
-  ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_result_type(byte value) {
+  ASSERT(is_binary_op_stub());
   WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
 }
 
@@ -2731,6 +3091,17 @@
 }
 
 
+byte Code::to_boolean_state() {
+  ASSERT(is_to_boolean_ic_stub());
+  return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
+}
+
+
+void Code::set_to_boolean_state(byte value) {
+  ASSERT(is_to_boolean_ic_stub());
+  WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
+}
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -2738,36 +3109,25 @@
 
 
 Code::Flags Code::ComputeFlags(Kind kind,
-                               InLoopFlag in_loop,
                                InlineCacheState ic_state,
                                ExtraICState extra_ic_state,
                                PropertyType type,
                                int argc,
                                InlineCacheHolderFlag holder) {
-  // Extra IC state is only allowed for monomorphic call IC stubs
-  // or for store IC stubs.
+  // Extra IC state is only allowed for call IC stubs or for store IC
+  // stubs.
   ASSERT(extra_ic_state == kNoExtraICState ||
-         (kind == CALL_IC && (ic_state == MONOMORPHIC ||
-                              ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
-         (kind == STORE_IC) ||
-         (kind == KEYED_STORE_IC));
+         kind == CALL_IC ||
+         kind == STORE_IC ||
+         kind == KEYED_STORE_IC);
   // Compute the bit mask.
-  int bits = kind << kFlagsKindShift;
-  if (in_loop) bits |= kFlagsICInLoopMask;
-  bits |= ic_state << kFlagsICStateShift;
-  bits |= type << kFlagsTypeShift;
-  bits |= extra_ic_state << kFlagsExtraICStateShift;
-  bits |= argc << kFlagsArgumentsCountShift;
-  if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
-  // Cast to flags and validate result before returning it.
-  Flags result = static_cast<Flags>(bits);
-  ASSERT(ExtractKindFromFlags(result) == kind);
-  ASSERT(ExtractICStateFromFlags(result) == ic_state);
-  ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
-  ASSERT(ExtractTypeFromFlags(result) == type);
-  ASSERT(ExtractExtraICStateFromFlags(result) == extra_ic_state);
-  ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
-  return result;
+  int bits = KindField::encode(kind)
+      | ICStateField::encode(ic_state)
+      | TypeField::encode(type)
+      | ExtraICStateField::encode(extra_ic_state)
+      | (argc << kArgumentsCountShift)
+      | CacheHolderField::encode(holder);
+  return static_cast<Flags>(bits);
 }
 
 
@@ -2775,56 +3135,43 @@
                                           PropertyType type,
                                           ExtraICState extra_ic_state,
                                           InlineCacheHolderFlag holder,
-                                          InLoopFlag in_loop,
                                           int argc) {
-  return ComputeFlags(
-      kind, in_loop, MONOMORPHIC, extra_ic_state, type, argc, holder);
+  return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
 }
 
 
 Code::Kind Code::ExtractKindFromFlags(Flags flags) {
-  int bits = (flags & kFlagsKindMask) >> kFlagsKindShift;
-  return static_cast<Kind>(bits);
+  return KindField::decode(flags);
 }
 
 
 InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
-  int bits = (flags & kFlagsICStateMask) >> kFlagsICStateShift;
-  return static_cast<InlineCacheState>(bits);
+  return ICStateField::decode(flags);
 }
 
 
 Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
-  int bits = (flags & kFlagsExtraICStateMask) >> kFlagsExtraICStateShift;
-  return static_cast<ExtraICState>(bits);
-}
-
-
-InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
-  int bits = (flags & kFlagsICInLoopMask);
-  return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
+  return ExtraICStateField::decode(flags);
 }
 
 
 PropertyType Code::ExtractTypeFromFlags(Flags flags) {
-  int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
-  return static_cast<PropertyType>(bits);
+  return TypeField::decode(flags);
 }
 
 
 int Code::ExtractArgumentsCountFromFlags(Flags flags) {
-  return (flags & kFlagsArgumentsCountMask) >> kFlagsArgumentsCountShift;
+  return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
 }
 
 
 InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
-  int bits = (flags & kFlagsCacheInPrototypeMapMask);
-  return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
+  return CacheHolderField::decode(flags);
 }
 
 
 Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
-  int bits = flags & ~kFlagsTypeMask;
+  int bits = flags & ~TypeField::kMask;
   return static_cast<Flags>(bits);
 }
 
@@ -2894,7 +3241,7 @@
 
 
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
-  ASSERT(value->IsNull() || value->IsJSObject());
+  ASSERT(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
 }
@@ -2907,27 +3254,114 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
-  new_map->set_has_fast_elements(true);
-  isolate()->counters()->map_slow_to_fast_elements()->Increment();
+  new_map->set_elements_kind(FAST_ELEMENTS);
+  isolate()->counters()->map_to_fast_elements()->Increment();
   return new_map;
 }
 
 
-MaybeObject* Map::GetSlowElementsMap() {
-  if (!has_fast_elements()) return this;
+MaybeObject* Map::GetFastDoubleElementsMap() {
+  if (has_fast_double_elements()) return this;
   Object* obj;
   { MaybeObject* maybe_obj = CopyDropTransitions();
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
-  new_map->set_has_fast_elements(false);
-  isolate()->counters()->map_fast_to_slow_elements()->Increment();
+  new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+  isolate()->counters()->map_to_fast_double_elements()->Increment();
   return new_map;
 }
 
 
-ACCESSORS(Map, instance_descriptors, DescriptorArray,
-          kInstanceDescriptorsOffset)
+MaybeObject* Map::GetSlowElementsMap() {
+  if (!has_fast_elements() && !has_fast_double_elements()) return this;
+  Object* obj;
+  { MaybeObject* maybe_obj = CopyDropTransitions();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  Map* new_map = Map::cast(obj);
+  new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+  isolate()->counters()->map_to_slow_elements()->Increment();
+  return new_map;
+}
+
+
+DescriptorArray* Map::instance_descriptors() {
+  Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    return HEAP->empty_descriptor_array();
+  } else {
+    return DescriptorArray::cast(object);
+  }
+}
+
+
+void Map::init_instance_descriptors() {
+  WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
+}
+
+
+void Map::clear_instance_descriptors() {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (!object->IsSmi()) {
+    WRITE_FIELD(
+        this,
+        kInstanceDescriptorsOrBitField3Offset,
+        Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
+  }
+}
+
+
+void Map::set_instance_descriptors(DescriptorArray* value,
+                                   WriteBarrierMode mode) {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (value == isolate()->heap()->empty_descriptor_array()) {
+    clear_instance_descriptors();
+    return;
+  } else {
+    if (object->IsSmi()) {
+      value->set_bit_field3_storage(Smi::cast(object)->value());
+    } else {
+      value->set_bit_field3_storage(
+          DescriptorArray::cast(object)->bit_field3_storage());
+    }
+  }
+  ASSERT(!is_shared());
+  WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kInstanceDescriptorsOrBitField3Offset,
+                            mode);
+}
+
+
+int Map::bit_field3() {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    return Smi::cast(object)->value();
+  } else {
+    return DescriptorArray::cast(object)->bit_field3_storage();
+  }
+}
+
+
+void Map::set_bit_field3(int value) {
+  ASSERT(Smi::IsValid(value));
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    WRITE_FIELD(this,
+                kInstanceDescriptorsOrBitField3Offset,
+                Smi::FromInt(value));
+  } else {
+    DescriptorArray::cast(object)->set_bit_field3_storage(value);
+  }
+}
+
+
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
@@ -3003,7 +3437,7 @@
 ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
 ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
+ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
 ACCESSORS(Script, type, Smi, kTypeOffset)
 ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
@@ -3040,17 +3474,28 @@
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
                kNeedsAccessCheckBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
+               kReadOnlyPrototypeBit)
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
                kIsExpressionBit)
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
                kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
+BOOL_GETTER(SharedFunctionInfo,
+            compiler_hints,
             has_only_simple_this_property_assignments,
             kHasOnlySimpleThisPropertyAssignments)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
                allows_lazy_compilation,
                kAllowLazyCompilation)
+BOOL_ACCESSORS(SharedFunctionInfo,
+               compiler_hints,
+               uses_arguments,
+               kUsesArguments)
+BOOL_ACCESSORS(SharedFunctionInfo,
+               compiler_hints,
+               has_duplicate_parameters,
+               kHasDuplicateParameters)
 
 
 #if V8_HOST_ARCH_32_BIT
@@ -3134,18 +3579,10 @@
 }
 
 
-bool SharedFunctionInfo::live_objects_may_exist() {
-  return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
-}
-
-
-void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
-  if (value) {
-    set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
-  } else {
-    set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
-  }
-}
+BOOL_ACCESSORS(SharedFunctionInfo,
+               compiler_hints,
+               live_objects_may_exist,
+               kLiveObjectsMayExist)
 
 
 bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
@@ -3153,9 +3590,10 @@
 }
 
 
-bool SharedFunctionInfo::optimization_disabled() {
-  return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
-}
+BOOL_GETTER(SharedFunctionInfo,
+            compiler_hints,
+            optimization_disabled,
+            kOptimizationDisabled)
 
 
 void SharedFunctionInfo::set_optimization_disabled(bool disable) {
@@ -3170,21 +3608,20 @@
 }
 
 
-bool SharedFunctionInfo::strict_mode() {
-  return BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-
-void SharedFunctionInfo::set_strict_mode(bool value) {
-  set_compiler_hints(BooleanBit::set(compiler_hints(),
-                                     kStrictModeFunction,
-                                     value));
-}
-
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
+               kStrictModeFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
+               name_should_print_as_anonymous,
+               kNameShouldPrintAsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
+ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
+
 bool Script::HasValidSource() {
   Object* src = this->source();
   if (!src->IsString()) return true;
@@ -3473,13 +3910,35 @@
 }
 
 
-Address Proxy::proxy() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
+ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
+ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
+
+
+void JSProxy::InitializeBody(int object_size, Object* value) {
+  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
+  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+    WRITE_FIELD(this, offset, value);
+  }
 }
 
 
-void Proxy::set_proxy(Address value) {
-  WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
+ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
+ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
+
+
+ObjectHashTable* JSWeakMap::unchecked_table() {
+  return reinterpret_cast<ObjectHashTable*>(READ_FIELD(this, kTableOffset));
+}
+
+
+Address Foreign::address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
+}
+
+
+void Foreign::set_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
 }
 
 
@@ -3512,6 +3971,8 @@
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, next_code_flushing_candidate,
+          Object, kNextCodeFlushingCandidateOffset)
 
 
 byte* Code::instruction_start()  {
@@ -3575,6 +4036,12 @@
 }
 
 
+JSRegExp::Type JSRegExp::TypeTagUnchecked() {
+  Smi* smi = Smi::cast(DataAtUnchecked(kTagIndex));
+  return static_cast<JSRegExp::Type>(smi->value());
+}
+
+
 int JSRegExp::CaptureCount() {
   switch (TypeTag()) {
     case ATOM:
@@ -3610,6 +4077,13 @@
 }
 
 
+Object* JSRegExp::DataAtUnchecked(int index) {
+  FixedArray* fa = reinterpret_cast<FixedArray*>(data());
+  int offset = FixedArray::kHeaderSize + index * kPointerSize;
+  return READ_FIELD(fa, offset);
+}
+
+
 void JSRegExp::SetDataAt(int index, Object* value) {
   ASSERT(TypeTag() != NOT_COMPILED);
   ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
@@ -3617,42 +4091,34 @@
 }
 
 
-JSObject::ElementsKind JSObject::GetElementsKind() {
-  if (map()->has_fast_elements()) {
-    ASSERT(elements()->map() == GetHeap()->fixed_array_map() ||
-           elements()->map() == GetHeap()->fixed_cow_array_map());
-    return FAST_ELEMENTS;
+void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
+  ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
+  FixedArray* fa = reinterpret_cast<FixedArray*>(data());
+  if (value->IsSmi()) {
+    fa->set_unchecked(index, Smi::cast(value));
+  } else {
+    fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
   }
-  HeapObject* array = elements();
-  if (array->IsFixedArray()) {
-    // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a
-    // FixedArray, but FAST_ELEMENTS is already handled above.
-    ASSERT(array->IsDictionary());
-    return DICTIONARY_ELEMENTS;
-  }
-  ASSERT(!map()->has_fast_elements());
-  if (array->IsExternalArray()) {
-    switch (array->map()->instance_type()) {
-      case EXTERNAL_BYTE_ARRAY_TYPE:
-        return EXTERNAL_BYTE_ELEMENTS;
-      case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
-        return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
-      case EXTERNAL_SHORT_ARRAY_TYPE:
-        return EXTERNAL_SHORT_ELEMENTS;
-      case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
-        return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
-      case EXTERNAL_INT_ARRAY_TYPE:
-        return EXTERNAL_INT_ELEMENTS;
-      case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
-        return EXTERNAL_UNSIGNED_INT_ELEMENTS;
-      case EXTERNAL_PIXEL_ARRAY_TYPE:
-        return EXTERNAL_PIXEL_ELEMENTS;
-      default:
-        break;
-    }
-  }
-  ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
-  return EXTERNAL_FLOAT_ELEMENTS;
+}
+
+
+ElementsKind JSObject::GetElementsKind() {
+  ElementsKind kind = map()->elements_kind();
+  ASSERT((kind == FAST_ELEMENTS &&
+          (elements()->map() == GetHeap()->fixed_array_map() ||
+           elements()->map() == GetHeap()->fixed_cow_array_map())) ||
+         (kind == FAST_DOUBLE_ELEMENTS &&
+          elements()->IsFixedDoubleArray()) ||
+         (kind == DICTIONARY_ELEMENTS &&
+          elements()->IsFixedArray() &&
+          elements()->IsDictionary()) ||
+         (kind > DICTIONARY_ELEMENTS));
+  return kind;
+}
+
+
+ElementsAccessor* JSObject::GetElementsAccessor() {
+  return ElementsAccessor::ForKind(GetElementsKind());
 }
 
 
@@ -3661,6 +4127,11 @@
 }
 
 
+bool JSObject::HasFastDoubleElements() {
+  return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
+}
+
+
 bool JSObject::HasDictionaryElements() {
   return GetElementsKind() == DICTIONARY_ELEMENTS;
 }
@@ -3693,6 +4164,8 @@
                         EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
 EXTERNAL_ELEMENTS_CHECK(Float,
                         EXTERNAL_FLOAT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Double,
+                        EXTERNAL_DOUBLE_ARRAY_TYPE)
 EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
 
 
@@ -3707,7 +4180,8 @@
 
 
 bool JSObject::AllowsSetElementsLength() {
-  bool result = elements()->IsFixedArray();
+  bool result = elements()->IsFixedArray() ||
+      elements()->IsFixedDoubleArray();
   ASSERT(result == !HasExternalArrayElements());
   return result;
 }
@@ -3737,9 +4211,9 @@
 }
 
 
-NumberDictionary* JSObject::element_dictionary() {
+SeededNumberDictionary* JSObject::element_dictionary() {
   ASSERT(HasDictionaryElements());
-  return NumberDictionary::cast(elements());
+  return SeededNumberDictionary::cast(elements());
 }
 
 
@@ -3762,13 +4236,15 @@
 }
 
 
-StringHasher::StringHasher(int length)
+StringHasher::StringHasher(int length, uint32_t seed)
   : length_(length),
-    raw_running_hash_(0),
+    raw_running_hash_(seed),
     array_index_(0),
     is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
     is_first_char_(true),
-    is_valid_(true) { }
+    is_valid_(true) {
+  ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
+}
 
 
 bool StringHasher::has_trivial_hash() {
@@ -3820,7 +4296,7 @@
   result += (result << 3);
   result ^= (result >> 11);
   result += (result << 15);
-  if (result == 0) {
+  if ((result & String::kHashBitMask) == 0) {
     result = 27;
   }
   return result;
@@ -3828,8 +4304,8 @@
 
 
 template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length) {
-  StringHasher hasher(length);
+uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
+  StringHasher hasher(length, seed);
   if (!hasher.has_trivial_hash()) {
     int i;
     for (i = 0; hasher.is_array_index() && (i < length); i++) {
@@ -3852,12 +4328,28 @@
 }
 
 
-Object* JSObject::GetPrototype() {
-  return JSObject::cast(this)->map()->prototype();
+Object* JSReceiver::GetPrototype() {
+  return HeapObject::cast(this)->map()->prototype();
 }
 
 
-PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
+bool JSReceiver::HasProperty(String* name) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->HasPropertyWithHandler(name);
+  }
+  return GetPropertyAttribute(name) != ABSENT;
+}
+
+
+bool JSReceiver::HasLocalProperty(String* name) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->HasPropertyWithHandler(name);
+  }
+  return GetLocalPropertyAttribute(name) != ABSENT;
+}
+
+
+PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
   return GetPropertyAttributeWithReceiver(this, key);
 }
 
@@ -3905,6 +4397,11 @@
 }
 
 
+bool JSObject::HasHiddenProperties() {
+  return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
+}
+
+
 bool JSObject::HasElement(uint32_t index) {
   return HasElementWithReceiver(this, index);
 }
@@ -3946,9 +4443,7 @@
 
 
 void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
-  ASSERT(AttributesField::is_valid(attributes));
-  int rest_value = flag()->value() & ~AttributesField::mask();
-  set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
+  set_flag(Smi::FromInt(AttributesField::update(flag()->value(), attributes)));
 }
 
 
@@ -3981,16 +4476,27 @@
 }
 
 
-uint32_t NumberDictionaryShape::Hash(uint32_t key) {
-  return ComputeIntegerHash(key);
+uint32_t UnseededNumberDictionaryShape::Hash(uint32_t key) {
+  return ComputeIntegerHash(key, 0);
 }
 
 
-uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
+                                                      Object* other) {
   ASSERT(other->IsNumber());
-  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
 }
 
+uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
+  return ComputeIntegerHash(key, seed);
+}
+
+uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
+                                                          uint32_t seed,
+                                                          Object* other) {
+  ASSERT(other->IsNumber());
+  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
+}
 
 MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
   return Isolate::Current()->heap()->NumberFromUint32(key);
@@ -4020,6 +4526,36 @@
 }
 
 
+bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
+  return key == JSObject::cast(other);
+}
+
+
+uint32_t ObjectHashTableShape::Hash(JSObject* key) {
+  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+  ASSERT(!maybe_hash->IsFailure());
+  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+}
+
+
+uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
+  MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
+      JSObject::OMIT_CREATION);
+  ASSERT(!maybe_hash->IsFailure());
+  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+}
+
+
+MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+  return key;
+}
+
+
+void ObjectHashTable::RemoveEntry(int entry) {
+  RemoveEntry(entry, GetHeap());
+}
+
+
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
@@ -4084,16 +4620,16 @@
 }
 
 
-void Proxy::ProxyIterateBody(ObjectVisitor* v) {
+void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
-void Proxy::ProxyIterateBody() {
+void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index b7e2fdd..0398572 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -114,6 +114,9 @@
     case EXTERNAL_FLOAT_ARRAY_TYPE:
       ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
+      break;
     case FILLER_TYPE:
       PrintF(out, "filler");
       break;
@@ -145,8 +148,17 @@
     case CODE_TYPE:
       Code::cast(this)->CodePrint(out);
       break;
-    case PROXY_TYPE:
-      Proxy::cast(this)->ProxyPrint(out);
+    case JS_PROXY_TYPE:
+      JSProxy::cast(this)->JSProxyPrint(out);
+      break;
+    case JS_FUNCTION_PROXY_TYPE:
+      JSFunctionProxy::cast(this)->JSFunctionProxyPrint(out);
+      break;
+    case JS_WEAK_MAP_TYPE:
+      JSWeakMap::cast(this)->JSWeakMapPrint(out);
+      break;
+    case FOREIGN_TYPE:
+      Foreign::cast(this)->ForeignPrint(out);
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
@@ -217,6 +229,11 @@
 }
 
 
+void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
+  PrintF(out, "external double array");
+}
+
+
 void JSObject::PrintProperties(FILE* out) {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
@@ -271,17 +288,30 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // Print in array notation for non-sparse arrays.
+      FixedDoubleArray* p = FixedDoubleArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        if (p->is_the_hole(i)) {
+          PrintF(out, "   %d: <the hole>", i);
+        } else {
+          PrintF(out, "   %d: %g", i, p->get_scalar(i));
+        }
+        PrintF(out, "\n");
+      }
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* p = ExternalPixelArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, p->get(i));
+        PrintF(out, "   %d: %d\n", i, p->get_scalar(i));
       }
       break;
     }
     case EXTERNAL_BYTE_ELEMENTS: {
       ExternalByteArray* p = ExternalByteArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
@@ -289,14 +319,14 @@
       ExternalUnsignedByteArray* p =
           ExternalUnsignedByteArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
     case EXTERNAL_SHORT_ELEMENTS: {
       ExternalShortArray* p = ExternalShortArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
@@ -304,14 +334,14 @@
       ExternalUnsignedShortArray* p =
           ExternalUnsignedShortArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
     case EXTERNAL_INT_ELEMENTS: {
       ExternalIntArray* p = ExternalIntArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
@@ -319,23 +349,36 @@
       ExternalUnsignedIntArray* p =
           ExternalUnsignedIntArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
       }
       break;
     }
     case EXTERNAL_FLOAT_ELEMENTS: {
       ExternalFloatArray* p = ExternalFloatArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: %f\n", i, p->get(i));
+        PrintF(out, "   %d: %f\n", i, p->get_scalar(i));
+      }
+      break;
+    }
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF(out, "  %d: %f\n", i, p->get_scalar(i));
       }
       break;
     }
     case DICTIONARY_ELEMENTS:
       elements()->Print(out);
       break;
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* p = FixedArray::cast(elements());
+      for (int i = 2; i < p->length(); i++) {
+        PrintF(out, "   %d: ", i);
+        p->get(i)->ShortPrint(out);
+        PrintF(out, "\n");
+      }
       break;
+    }
   }
 }
 
@@ -383,6 +426,7 @@
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
       return "EXTERNAL_UNSIGNED_INT_ARRAY";
     case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
+    case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
     case FILLER_TYPE: return "FILLER";
     case JS_OBJECT_TYPE: return "JS_OBJECT";
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@@ -392,19 +436,20 @@
     case JS_FUNCTION_TYPE: return "JS_FUNCTION";
     case CODE_TYPE: return "CODE";
     case JS_ARRAY_TYPE: return "JS_ARRAY";
+    case JS_PROXY_TYPE: return "JS_PROXY";
+    case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
     case JS_REGEXP_TYPE: return "JS_REGEXP";
     case JS_VALUE_TYPE: return "JS_VALUE";
     case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
     case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
     case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
-    case PROXY_TYPE: return "PROXY";
-    case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
+    case FOREIGN_TYPE: return "FOREIGN";
     case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
 #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
+    default: return "UNKNOWN";
   }
-  return "UNKNOWN";
 }
 
 
@@ -453,6 +498,13 @@
 }
 
 
+void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "PolymorphicCodeCache");
+  PrintF(out, "\n - cache: ");
+  cache()->ShortPrint(out);
+}
+
+
 void FixedArray::FixedArrayPrint(FILE* out) {
   HeapObject::PrintHeader(out, "FixedArray");
   PrintF(out, " - length: %d", length());
@@ -515,6 +567,53 @@
 }
 
 
+// This method is only meant to be called from gdb for debugging purposes.
+// Since the string can also be in two-byte encoding, non-ascii characters
+// will be ignored in the output.
+char* String::ToAsciiArray() {
+  // Static so that subsequent calls frees previously allocated space.
+  // This also means that previous results will be overwritten.
+  static char* buffer = NULL;
+  if (buffer != NULL) free(buffer);
+  buffer = new char[length()+1];
+  WriteToFlat(this, buffer, 0, length());
+  buffer[length()] = 0;
+  return buffer;
+}
+
+
+void JSProxy::JSProxyPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSProxy");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - handler = ");
+  handler()->Print(out);
+  PrintF(out, "\n");
+}
+
+
+void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSFunctionProxy");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - handler = ");
+  handler()->Print(out);
+  PrintF(out, " - call_trap = ");
+  call_trap()->Print(out);
+  PrintF(out, " - construct_trap = ");
+  construct_trap()->Print(out);
+  PrintF(out, "\n");
+}
+
+
+void JSWeakMap::JSWeakMapPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSWeakMap");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
+  PrintF(out, " - table = ");
+  table()->ShortPrint(out);
+  PrintF(out, "\n");
+}
+
+
 void JSFunction::JSFunctionPrint(FILE* out) {
   HeapObject::PrintHeader(out, "Function");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -607,8 +706,8 @@
 }
 
 
-void Proxy::ProxyPrint(FILE* out) {
-  PrintF(out, "proxy to %p", proxy());
+void Foreign::ForeignPrint(FILE* out) {
+  PrintF(out, "foreign address : %p", address());
 }
 
 
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 5a23658..0aa21dd 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,6 +58,9 @@
           return kVisitConsString;
         }
 
+      case kSlicedStringTag:
+        return kVisitSlicedString;
+
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject,
                                    kVisitDataObjectGeneric,
@@ -73,6 +76,9 @@
     case FIXED_ARRAY_TYPE:
       return kVisitFixedArray;
 
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      return kVisitFixedDoubleArray;
+
     case ODDBALL_TYPE:
       return kVisitOddball;
 
@@ -85,13 +91,29 @@
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       return kVisitPropertyCell;
 
+    case JS_WEAK_MAP_TYPE:
+      return kVisitJSWeakMap;
+
+    case JS_REGEXP_TYPE:
+      return kVisitJSRegExp;
+
     case SHARED_FUNCTION_INFO_TYPE:
       return kVisitSharedFunctionInfo;
 
-    case PROXY_TYPE:
+    case JS_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSProxy::kSize);
+
+    case JS_FUNCTION_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSFunctionProxy::kSize);
+
+    case FOREIGN_TYPE:
       return GetVisitorIdForSize(kVisitDataObject,
                                  kVisitDataObjectGeneric,
-                                 Proxy::kSize);
+                                 Foreign::kSize);
 
     case FILLER_TYPE:
       return kVisitDataObjectGeneric;
@@ -100,7 +122,6 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
     case JS_ARRAY_TYPE:
-    case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
     case JS_BUILTINS_OBJECT_TYPE:
@@ -121,6 +142,7 @@
     case EXTERNAL_INT_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
     case EXTERNAL_FLOAT_ARRAY_TYPE:
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
       return GetVisitorIdForSize(kVisitDataObject,
                                  kVisitDataObjectGeneric,
                                  instance_size);
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index da955da..4ce1bd0 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,24 @@
 #ifndef V8_OBJECTS_VISITING_H_
 #define V8_OBJECTS_VISITING_H_
 
+#include "allocation.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -50,6 +68,7 @@
     kVisitShortcutCandidate,
     kVisitByteArray,
     kVisitFixedArray,
+    kVisitFixedDoubleArray,
     kVisitGlobalContext,
 
     // For data objects, JS objects and structs along with generic visitor which
@@ -96,12 +115,15 @@
     kVisitStructGeneric,
 
     kVisitConsString,
+    kVisitSlicedString,
     kVisitOddball,
     kVisitCode,
     kVisitMap,
     kVisitPropertyCell,
     kVisitSharedFunctionInfo,
     kVisitJSFunction,
+    kVisitJSWeakMap,
+    kVisitJSRegExp,
 
     kVisitorIdCount,
     kMinObjectSizeInWords = 2
@@ -278,11 +300,18 @@
                                       ConsString::BodyDescriptor,
                                       int>::Visit);
 
+    table_.Register(kVisitSlicedString,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      SlicedString::BodyDescriptor,
+                                      int>::Visit);
+
     table_.Register(kVisitFixedArray,
                     &FlexibleBodyVisitor<StaticVisitor,
                                          FixedArray::BodyDescriptor,
                                          int>::Visit);
 
+    table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
     table_.Register(kVisitGlobalContext,
                     &FixedBodyVisitor<StaticVisitor,
                                       Context::ScavengeBodyDescriptor,
@@ -295,6 +324,10 @@
                                       SharedFunctionInfo::BodyDescriptor,
                                       int>::Visit);
 
+    table_.Register(kVisitJSWeakMap, &VisitJSObject);
+
+    table_.Register(kVisitJSRegExp, &VisitJSObject);
+
     table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
 
     table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
@@ -327,6 +360,15 @@
     return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
   }
 
+  static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    return FixedDoubleArray::SizeFor(length);
+  }
+
+  static inline int VisitJSObject(Map* map, HeapObject* object) {
+    return JSObjectVisitor::Visit(map, object);
+  }
+
   static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
     return SeqAsciiString::cast(object)->
         SeqAsciiStringSize(map->instance_type());
diff --git a/src/objects.cc b/src/objects.cc
index fac83f1..88ebbf4 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -33,6 +33,7 @@
 #include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
+#include "elements.h"
 #include "execution.h"
 #include "full-codegen.h"
 #include "hydrogen.h"
@@ -40,8 +41,6 @@
 #include "objects-visiting.h"
 #include "macro-assembler.h"
 #include "safepoint-table.h"
-#include "scanner-base.h"
-#include "scopeinfo.h"
 #include "string-stream.h"
 #include "utils.h"
 #include "vm-state-inl.h"
@@ -59,7 +58,6 @@
 const int kGetterIndex = 0;
 const int kSetterIndex = 1;
 
-
 MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
                                                   Object* value) {
   Object* result;
@@ -86,7 +84,7 @@
 
 
 MaybeObject* Object::ToObject() {
-  if (IsJSObject()) {
+  if (IsJSReceiver()) {
     return this;
   } else if (IsNumber()) {
     Isolate* isolate = Isolate::Current();
@@ -135,23 +133,21 @@
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
   if (IsSmi()) {
-    Heap* heap = Isolate::Current()->heap();
-    Context* global_context = heap->isolate()->context()->global_context();
+    Context* global_context = Isolate::Current()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
   } else {
     HeapObject* heap_object = HeapObject::cast(this);
     if (heap_object->IsJSObject()) {
       return JSObject::cast(this)->Lookup(name, result);
+    } else if (heap_object->IsJSProxy()) {
+      return result->HandlerResult();
     }
-    Heap* heap = heap_object->GetHeap();
+    Context* global_context = Isolate::Current()->context()->global_context();
     if (heap_object->IsString()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->string_function()->instance_prototype();
     } else if (heap_object->IsHeapNumber()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->number_function()->instance_prototype();
     } else if (heap_object->IsBoolean()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->boolean_function()->instance_prototype();
     }
   }
@@ -177,11 +173,12 @@
                                              Object* holder) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  if (structure->IsProxy()) {
+  if (structure->IsForeign()) {
     AccessorDescriptor* callback =
-        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+        reinterpret_cast<AccessorDescriptor*>(
+            Foreign::cast(structure)->address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -192,7 +189,7 @@
     AccessorInfo* data = AccessorInfo::cast(structure);
     Object* fun_obj = data->getter();
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
-    HandleScope scope;
+    HandleScope scope(isolate);
     JSObject* self = JSObject::cast(receiver);
     JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
@@ -228,6 +225,35 @@
 }
 
 
+MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
+                                            String* name_raw,
+                                            Object* handler_raw) {
+  Isolate* isolate = name_raw->GetIsolate();
+  HandleScope scope(isolate);
+  Handle<Object> receiver(receiver_raw);
+  Handle<Object> name(name_raw);
+  Handle<Object> handler(handler_raw);
+
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    // Get the derived `get' property.
+    trap = isolate->derived_get_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = { receiver.location(), name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  return *result;
+}
+
+
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
                                                   JSFunction* getter) {
   HandleScope scope;
@@ -463,7 +489,16 @@
       cell->set_value(cell->heap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
-      return dictionary->DeleteProperty(entry, mode);
+      Object* deleted = dictionary->DeleteProperty(entry, mode);
+      if (deleted == GetHeap()->true_value()) {
+        FixedArray* new_properties = NULL;
+        MaybeObject* maybe_properties = dictionary->Shrink(name);
+        if (!maybe_properties->To(&new_properties)) {
+          return maybe_properties;
+        }
+        set_properties(new_properties);
+      }
+      return deleted;
     }
   }
   return GetHeap()->true_value();
@@ -495,30 +530,34 @@
   Heap* heap = name->GetHeap();
 
   // Traverse the prototype chain from the current object (this) to
-  // the holder and check for access rights. This avoid traversing the
+  // the holder and check for access rights. This avoids traversing the
   // objects more than once in case of interceptors, because the
   // holder will always be the interceptor holder and the search may
   // only continue with a current object just after the interceptor
   // holder in the prototype chain.
-  Object* last = result->IsProperty() ? result->holder() : heap->null_value();
-  for (Object* current = this; true; current = current->GetPrototype()) {
-    if (current->IsAccessCheckNeeded()) {
-      // Check if we're allowed to read from the current object. Note
-      // that even though we may not actually end up loading the named
-      // property from the current object, we still check that we have
-      // access to it.
-      JSObject* checked = JSObject::cast(current);
-      if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
-        return checked->GetPropertyWithFailedAccessCheck(receiver,
-                                                         result,
-                                                         name,
-                                                         attributes);
+  // Proxy handlers do not use the proxy's prototype, so we can skip this.
+  if (!result->IsHandler()) {
+    Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+    ASSERT(this != this->GetPrototype());
+    for (Object* current = this; true; current = current->GetPrototype()) {
+      if (current->IsAccessCheckNeeded()) {
+        // Check if we're allowed to read from the current object. Note
+        // that even though we may not actually end up loading the named
+        // property from the current object, we still check that we have
+        // access to it.
+        JSObject* checked = JSObject::cast(current);
+        if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+          return checked->GetPropertyWithFailedAccessCheck(receiver,
+                                                           result,
+                                                           name,
+                                                           attributes);
+        }
       }
+      // Stop traversing the chain once we reach the last object in the
+      // chain; either the holder of the result or null in case of an
+      // absent property.
+      if (current == last) break;
     }
-    // Stop traversing the chain once we reach the last object in the
-    // chain; either the holder of the result or null in case of an
-    // absent property.
-    if (current == last) break;
   }
 
   if (!result->IsProperty()) {
@@ -544,46 +583,90 @@
                                      result->GetCallbackObject(),
                                      name,
                                      holder);
+    case HANDLER: {
+      JSProxy* proxy = JSProxy::cast(this);
+      return GetPropertyWithHandler(receiver, name, proxy->handler());
+    }
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
       return holder->GetPropertyWithInterceptor(recvr, name, attributes);
     }
-    default:
-      UNREACHABLE();
-      return NULL;
+    case MAP_TRANSITION:
+    case ELEMENTS_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case NULL_DESCRIPTOR:
+      break;
   }
+  UNREACHABLE();
+  return NULL;
 }
 
 
 MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
-  Object* holder = NULL;
-  if (IsSmi()) {
-    Context* global_context = Isolate::Current()->context()->global_context();
-    holder = global_context->number_function()->instance_prototype();
-  } else {
-    HeapObject* heap_object = HeapObject::cast(this);
+  Heap* heap = IsSmi()
+      ? Isolate::Current()->heap()
+      : HeapObject::cast(this)->GetHeap();
+  Object* holder = this;
 
-    if (heap_object->IsJSObject()) {
-      return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
-    }
-    Heap* heap = heap_object->GetHeap();
-    Isolate* isolate = heap->isolate();
-
-    Context* global_context = isolate->context()->global_context();
-    if (heap_object->IsString()) {
-      holder = global_context->string_function()->instance_prototype();
-    } else if (heap_object->IsHeapNumber()) {
+  // Iterate up the prototype chain until an element is found or the null
+  // prototype is encountered.
+  for (holder = this;
+       holder != heap->null_value();
+       holder = holder->GetPrototype()) {
+    if (holder->IsSmi()) {
+      Context* global_context = Isolate::Current()->context()->global_context();
       holder = global_context->number_function()->instance_prototype();
-    } else if (heap_object->IsBoolean()) {
-      holder = global_context->boolean_function()->instance_prototype();
     } else {
-      // Undefined and null have no indexed properties.
-      ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
-      return heap->undefined_value();
+      HeapObject* heap_object = HeapObject::cast(holder);
+      if (!heap_object->IsJSObject()) {
+        Isolate* isolate = heap->isolate();
+        Context* global_context = isolate->context()->global_context();
+        if (heap_object->IsString()) {
+          holder = global_context->string_function()->instance_prototype();
+        } else if (heap_object->IsHeapNumber()) {
+          holder = global_context->number_function()->instance_prototype();
+        } else if (heap_object->IsBoolean()) {
+          holder = global_context->boolean_function()->instance_prototype();
+        } else if (heap_object->IsJSProxy()) {
+          // TODO(rossberg): do something
+          return heap->undefined_value();  // For now...
+        } else {
+          // Undefined and null have no indexed properties.
+          ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+          return heap->undefined_value();
+        }
+      }
+    }
+
+    // Inline the case for JSObjects. Doing so significantly improves the
+    // performance of fetching elements where checking the prototype chain is
+    // necessary.
+    JSObject* js_object = JSObject::cast(holder);
+
+    // Check access rights if needed.
+    if (js_object->IsAccessCheckNeeded()) {
+      Isolate* isolate = heap->isolate();
+      if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
+        isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
+        return heap->undefined_value();
+      }
+    }
+
+    if (js_object->HasIndexedInterceptor()) {
+      return js_object->GetElementWithInterceptor(receiver, index);
+    }
+
+    if (js_object->elements() != heap->empty_fixed_array()) {
+      MaybeObject* result = js_object->GetElementsAccessor()->Get(
+          js_object->elements(),
+          index,
+          js_object,
+          receiver);
+      if (result != heap->the_hole_value()) return result;
     }
   }
 
-  return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
+  return heap->undefined_value();
 }
 
 
@@ -596,9 +679,10 @@
 
   HeapObject* heap_object = HeapObject::cast(this);
 
-  // The object is either a number, a string, a boolean, or a real JS object.
-  if (heap_object->IsJSObject()) {
-    return JSObject::cast(this)->map()->prototype();
+  // The object is either a number, a string, a boolean,
+  // a real JS object, or a Harmony proxy.
+  if (heap_object->IsJSReceiver()) {
+    return heap_object->map()->prototype();
   }
   Heap* heap = heap_object->GetHeap();
   Context* context = heap->isolate()->context()->global_context();
@@ -913,6 +997,11 @@
       accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
       break;
     }
+    case JS_WEAK_MAP_TYPE: {
+      int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
+      accumulator->Add("<JS WeakMap[%d]>", elements);
+      break;
+    }
     case JS_REGEXP_TYPE: {
       accumulator->Add("<JS RegExp>");
       break;
@@ -1045,6 +1134,10 @@
       accumulator->Add("<ExternalFloatArray[%u]>",
                        ExternalFloatArray::cast(this)->length());
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      accumulator->Add("<ExternalDoubleArray[%u]>",
+                       ExternalDoubleArray::cast(this)->length());
+      break;
     case SHARED_FUNCTION_INFO_TYPE:
       accumulator->Add("<SharedFunctionInfo>");
       break;
@@ -1082,8 +1175,14 @@
       HeapNumber::cast(this)->HeapNumberPrint(accumulator);
       accumulator->Put('>');
       break;
-    case PROXY_TYPE:
-      accumulator->Add("<Proxy>");
+    case JS_PROXY_TYPE:
+      accumulator->Add("<JSProxy>");
+      break;
+    case JS_FUNCTION_PROXY_TYPE:
+      accumulator->Add("<JSFunctionProxy>");
+      break;
+    case FOREIGN_TYPE:
+      accumulator->Add("<Foreign>");
       break;
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       accumulator->Add("Cell for ");
@@ -1116,6 +1215,9 @@
       case kConsStringTag:
         ConsString::BodyDescriptor::IterateBody(this, v);
         break;
+      case kSlicedStringTag:
+        SlicedString::BodyDescriptor::IterateBody(this, v);
+        break;
       case kExternalStringTag:
         if ((type & kStringEncodingMask) == kAsciiStringTag) {
           reinterpret_cast<ExternalAsciiString*>(this)->
@@ -1133,10 +1235,13 @@
     case FIXED_ARRAY_TYPE:
       FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      break;
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_WEAK_MAP_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
@@ -1151,8 +1256,14 @@
     case ODDBALL_TYPE:
       Oddball::BodyDescriptor::IterateBody(this, v);
       break;
-    case PROXY_TYPE:
-      reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
+    case JS_PROXY_TYPE:
+      JSProxy::BodyDescriptor::IterateBody(this, v);
+      break;
+    case JS_FUNCTION_PROXY_TYPE:
+      JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
+      break;
+    case FOREIGN_TYPE:
+      reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
       break;
     case MAP_TYPE:
       Map::BodyDescriptor::IterateBody(this, v);
@@ -1174,6 +1285,7 @@
     case EXTERNAL_INT_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
     case EXTERNAL_FLOAT_ARRAY_TYPE:
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
@@ -1232,8 +1344,8 @@
 }
 
 
-String* JSObject::class_name() {
-  if (IsJSFunction()) {
+String* JSReceiver::class_name() {
+  if (IsJSFunction() && IsJSFunctionProxy()) {
     return GetHeap()->function_class_symbol();
   }
   if (map()->constructor()->IsJSFunction()) {
@@ -1245,7 +1357,7 @@
 }
 
 
-String* JSObject::constructor_name() {
+String* JSReceiver::constructor_name() {
   if (map()->constructor()->IsJSFunction()) {
     JSFunction* constructor = JSFunction::cast(map()->constructor());
     String* name = String::cast(constructor->shared()->name());
@@ -1255,6 +1367,7 @@
     Object* proto = GetPrototype();
     if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
   }
+  // TODO(rossberg): what about proxies?
   // If the constructor is not present, return "Object".
   return GetHeap()->Object_symbol();
 }
@@ -1333,13 +1446,12 @@
   // it's unrelated to properties.
   int descriptor_index = old_descriptors->Search(name);
 
-  // External array transitions are stored in the descriptor for property "",
-  // which is not a identifier and should have forced a switch to slow
-  // properties above.
+  // Element transitions are stored in the descriptor for property "", which is
+  // not a identifier and should have forced a switch to slow properties above.
   ASSERT(descriptor_index == DescriptorArray::kNotFound ||
-      old_descriptors->GetType(descriptor_index) != EXTERNAL_ARRAY_TRANSITION);
+      old_descriptors->GetType(descriptor_index) != ELEMENTS_TRANSITION);
   bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound ||
-      old_descriptors->GetType(descriptor_index) == EXTERNAL_ARRAY_TRANSITION;
+      old_descriptors->GetType(descriptor_index) == ELEMENTS_TRANSITION;
   bool allow_map_transition =
       can_insert_transition &&
       (isolate->context()->global_context()->object_function()->map() != map());
@@ -1709,10 +1821,10 @@
 }
 
 
-MaybeObject* JSObject::SetProperty(String* name,
-                                   Object* value,
-                                   PropertyAttributes attributes,
-                                   StrictModeFlag strict_mode) {
+MaybeObject* JSReceiver::SetProperty(String* name,
+                                     Object* value,
+                                     PropertyAttributes attributes,
+                                     StrictModeFlag strict_mode) {
   LookupResult result;
   LocalLookup(name, &result);
   return SetProperty(&result, name, value, attributes, strict_mode);
@@ -1722,7 +1834,8 @@
 MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
                                                String* name,
                                                Object* value,
-                                               JSObject* holder) {
+                                               JSObject* holder,
+                                               StrictModeFlag strict_mode) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
 
@@ -1732,11 +1845,12 @@
   Handle<Object> value_handle(value, isolate);
 
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  if (structure->IsProxy()) {
+  if (structure->IsForeign()) {
     AccessorDescriptor* callback =
-        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+        reinterpret_cast<AccessorDescriptor*>(
+            Foreign::cast(structure)->address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -1769,6 +1883,9 @@
     if (setter->IsJSFunction()) {
      return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
     } else {
+      if (strict_mode == kNonStrictMode) {
+        return value;
+      }
       Handle<String> key(name);
       Handle<Object> holder_handle(holder, isolate);
       Handle<Object> args[2] = { key, holder_handle };
@@ -1822,24 +1939,30 @@
 }
 
 
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
-                                                                Object* value,
-                                                                bool* found) {
+MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
+    uint32_t index,
+    Object* value,
+    bool* found,
+    StrictModeFlag strict_mode) {
   Heap* heap = GetHeap();
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
     if (!JSObject::cast(pt)->HasDictionaryElements()) {
-        continue;
+      continue;
     }
-    NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
+    SeededNumberDictionary* dictionary =
+        JSObject::cast(pt)->element_dictionary();
     int entry = dictionary->FindEntry(index);
-    if (entry != NumberDictionary::kNotFound) {
+    if (entry != SeededNumberDictionary::kNotFound) {
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
         *found = true;
-        return SetElementWithCallback(
-            dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
+        return SetElementWithCallback(dictionary->ValueAt(entry),
+                                      index,
+                                      value,
+                                      JSObject::cast(pt),
+                                      strict_mode);
       }
     }
   }
@@ -1877,25 +2000,25 @@
 }
 
 
-MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
-                                              bool safe_to_add_transition) {
+MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
+                                           bool safe_to_add_transition) {
   Heap* current_heap = heap();
   DescriptorArray* descriptors = instance_descriptors();
-  String* external_array_sentinel_name = current_heap->empty_symbol();
+  String* elements_transition_sentinel_name = current_heap->empty_symbol();
 
   if (safe_to_add_transition) {
     // It's only safe to manipulate the descriptor array if it would be
     // safe to add a transition.
 
     ASSERT(!is_shared());  // no transitions can be added to shared maps.
-    // Check if the external array transition already exists.
+    // Check if the elements transition already exists.
     DescriptorLookupCache* cache =
         current_heap->isolate()->descriptor_lookup_cache();
-    int index = cache->Lookup(descriptors, external_array_sentinel_name);
+    int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
     if (index == DescriptorLookupCache::kAbsent) {
-      index = descriptors->Search(external_array_sentinel_name);
+      index = descriptors->Search(elements_transition_sentinel_name);
       cache->Update(descriptors,
-                    external_array_sentinel_name,
+                    elements_transition_sentinel_name,
                     index);
     }
 
@@ -1903,8 +2026,8 @@
     // return it.
     if (index != DescriptorArray::kNotFound) {
       PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
-      if (details.type() == EXTERNAL_ARRAY_TRANSITION &&
-          details.array_type() == array_type) {
+      if (details.type() == ELEMENTS_TRANSITION &&
+          details.elements_kind() == elements_kind) {
         return descriptors->GetValue(index);
       } else {
         safe_to_add_transition = false;
@@ -1912,29 +2035,29 @@
     }
   }
 
-  // No transition to an existing external array map. Make a new one.
+  // No transition to an existing map for the given ElementsKind. Make a new
+  // one.
   Object* obj;
   { MaybeObject* maybe_map = CopyDropTransitions();
     if (!maybe_map->ToObject(&obj)) return maybe_map;
   }
   Map* new_map = Map::cast(obj);
 
-  new_map->set_has_fast_elements(false);
-  new_map->set_has_external_array_elements(true);
+  new_map->set_elements_kind(elements_kind);
   GetIsolate()->counters()->map_to_external_array_elements()->Increment();
 
   // Only remember the map transition if the object's map is NOT equal to the
   // global object_function's map and there is not an already existing
-  // non-matching external array transition.
+  // non-matching element transition.
   bool allow_map_transition =
       safe_to_add_transition &&
       (GetIsolate()->context()->global_context()->object_function()->map() !=
        map());
   if (allow_map_transition) {
     // Allocate new instance descriptors for the old map with map transition.
-    ExternalArrayTransitionDescriptor desc(external_array_sentinel_name,
-                                           Map::cast(new_map),
-                                           array_type);
+    ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
+                                      Map::cast(new_map),
+                                      elements_kind);
     Object* new_descriptors;
     MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
         &desc,
@@ -2020,10 +2143,12 @@
 
 
 // We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
-                                                        String* name,
-                                                        Object* value,
-                                                        bool check_prototype) {
+MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+    LookupResult* result,
+    String* name,
+    Object* value,
+    bool check_prototype,
+    StrictModeFlag strict_mode) {
   if (check_prototype && !result->IsProperty()) {
     LookupCallbackSetterInPrototypes(name, result);
   }
@@ -2039,7 +2164,8 @@
               return SetPropertyWithCallback(result->GetCallbackObject(),
                                              name,
                                              value,
-                                             result->holder());
+                                             result->holder(),
+                                             strict_mode);
             }
           }
           break;
@@ -2050,8 +2176,11 @@
           LookupResult r;
           LookupRealNamedProperty(name, &r);
           if (r.IsProperty()) {
-            return SetPropertyWithFailedAccessCheck(&r, name, value,
-                                                    check_prototype);
+            return SetPropertyWithFailedAccessCheck(&r,
+                                                    name,
+                                                    value,
+                                                    check_prototype,
+                                                    strict_mode);
           }
           break;
         }
@@ -2062,19 +2191,184 @@
     }
   }
 
-  HandleScope scope;
-  Handle<Object> value_handle(value);
   Heap* heap = GetHeap();
+  HandleScope scope(heap->isolate());
+  Handle<Object> value_handle(value);
   heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
 
 
-MaybeObject* JSObject::SetProperty(LookupResult* result,
-                                   String* name,
-                                   Object* value,
-                                   PropertyAttributes attributes,
-                                   StrictModeFlag strict_mode) {
+MaybeObject* JSReceiver::SetProperty(LookupResult* result,
+                                     String* key,
+                                     Object* value,
+                                     PropertyAttributes attributes,
+                                     StrictModeFlag strict_mode) {
+  if (result->IsFound() && result->type() == HANDLER) {
+    return JSProxy::cast(this)->SetPropertyWithHandler(
+        key, value, attributes, strict_mode);
+  } else {
+    return JSObject::cast(this)->SetPropertyForResult(
+        result, key, value, attributes, strict_mode);
+  }
+}
+
+
+bool JSProxy::HasPropertyWithHandler(String* name_raw) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<Object> receiver(this);
+  Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
+
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    trap = isolate->derived_has_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = { name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  return result->ToBoolean()->IsTrue();
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
+    String* name_raw,
+    Object* value_raw,
+    PropertyAttributes attributes,
+    StrictModeFlag strict_mode) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<Object> receiver(this);
+  Handle<Object> name(name_raw);
+  Handle<Object> value(value_raw);
+  Handle<Object> handler(this->handler());
+
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    trap = isolate->derived_set_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = {
+      receiver.location(), name.location(), value.location()
+  };
+  bool has_exception;
+  Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  return *value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
+    String* name_raw, DeleteMode mode) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<Object> receiver(this);
+  Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
+
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    Handle<Object> args[] = { handler, trap_name };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+    isolate->Throw(*error);
+    return Failure::Exception();
+  }
+
+  // Call trap function.
+  Object** args[] = { name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  Object* bool_result = result->ToBoolean();
+  if (mode == STRICT_DELETION &&
+      bool_result == isolate->heap()->false_value()) {
+    Handle<Object> args[] = { handler, trap_name };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
+    isolate->Throw(*error);
+    return Failure::Exception();
+  }
+  return bool_result;
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+    JSReceiver* receiver_raw,
+    String* name_raw,
+    bool* has_exception) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<JSReceiver> receiver(receiver_raw);
+  Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
+
+  // Extract trap function.
+  Handle<String> trap_name =
+      isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return NONE;
+  if (trap->IsUndefined()) {
+    Handle<Object> args[] = { handler, trap_name };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+    isolate->Throw(*error);
+    *has_exception = true;
+    return NONE;
+  }
+
+  // Call trap function.
+  Object** args[] = { name.location() };
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
+  if (has_exception) return NONE;
+
+  // TODO(rossberg): convert result to PropertyAttributes
+  USE(result);
+  return NONE;
+}
+
+
+void JSProxy::Fix() {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<JSProxy> self(this);
+
+  if (IsJSFunctionProxy()) {
+    isolate->factory()->BecomeJSFunction(self);
+    // Code will be set on the JavaScript side.
+  } else {
+    isolate->factory()->BecomeJSObject(self);
+  }
+  ASSERT(self->IsJSObject());
+}
+
+
+
+MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
+                                            String* name,
+                                            Object* value,
+                                            PropertyAttributes attributes,
+                                            StrictModeFlag strict_mode) {
   Heap* heap = GetHeap();
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -2095,7 +2389,11 @@
   // Check access rights if needed.
   if (IsAccessCheckNeeded()
       && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(result, name, value, true);
+    return SetPropertyWithFailedAccessCheck(result,
+                                            name,
+                                            value,
+                                            true,
+                                            strict_mode);
   }
 
   if (IsJSGlobalProxy()) {
@@ -2115,7 +2413,8 @@
       return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
                                      name,
                                      value,
-                                     accessor_result.holder());
+                                     accessor_result.holder(),
+                                     strict_mode);
     }
   }
   if (!result->IsFound()) {
@@ -2124,7 +2423,7 @@
   }
   if (result->IsReadOnly() && result->IsProperty()) {
     if (strict_mode == kStrictMode) {
-      HandleScope scope;
+      HandleScope scope(heap->isolate());
       Handle<String> key(name);
       Handle<Object> holder(this);
       Handle<Object> args[2] = { key, holder };
@@ -2159,7 +2458,8 @@
       return SetPropertyWithCallback(result->GetCallbackObject(),
                                      name,
                                      value,
-                                     result->holder());
+                                     result->holder(),
+                                     strict_mode);
     case INTERCEPTOR:
       return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
     case CONSTANT_TRANSITION: {
@@ -2182,7 +2482,7 @@
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
     }
     case NULL_DESCRIPTOR:
-    case EXTERNAL_ARRAY_TRANSITION:
+    case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
     default:
       UNREACHABLE();
@@ -2198,6 +2498,9 @@
 // callback setter removed.  The two lines looking up the LookupResult
 // result are also added.  If one of the functions is changed, the other
 // should be.
+// Note that this method cannot be used to set the prototype of a function
+// because ConvertDescriptorToField() which is called in "case CALLBACKS:"
+// doesn't handle function prototypes correctly.
 MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
     String* name,
     Object* value,
@@ -2212,7 +2515,11 @@
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
     if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-      return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+      return SetPropertyWithFailedAccessCheck(&result,
+                                              name,
+                                              value,
+                                              false,
+                                              kNonStrictMode);
     }
   }
 
@@ -2263,7 +2570,7 @@
       // if the value is a function.
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
     case NULL_DESCRIPTOR:
-    case EXTERNAL_ARRAY_TRANSITION:
+    case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
     default:
       UNREACHABLE();
@@ -2345,12 +2652,13 @@
 }
 
 
-PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
-      JSObject* receiver,
+PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
+      JSReceiver* receiver,
       String* key) {
   uint32_t index = 0;
-  if (key->AsArrayIndex(&index)) {
-    if (HasElementWithReceiver(receiver, index)) return NONE;
+  if (IsJSObject() && key->AsArrayIndex(&index)) {
+    if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
+      return NONE;
     return ABSENT;
   }
   // Named property.
@@ -2360,18 +2668,17 @@
 }
 
 
-PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
-                                                  LookupResult* result,
-                                                  String* name,
-                                                  bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
+                                                    LookupResult* result,
+                                                    String* name,
+                                                    bool continue_search) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
+    JSObject* this_obj = JSObject::cast(this);
     Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
-      return GetPropertyAttributeWithFailedAccessCheck(receiver,
-                                                       result,
-                                                       name,
-                                                       continue_search);
+    if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
+      return this_obj->GetPropertyAttributeWithFailedAccessCheck(
+          receiver, result, name, continue_search);
     }
   }
   if (result->IsProperty()) {
@@ -2381,9 +2688,15 @@
       case CONSTANT_FUNCTION:
       case CALLBACKS:
         return result->GetAttributes();
+      case HANDLER: {
+        // TODO(rossberg): propagate exceptions properly.
+        bool has_exception = false;
+        return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
+            receiver, name, &has_exception);
+      }
       case INTERCEPTOR:
-        return result->holder()->
-          GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
+        return result->holder()->GetPropertyAttributeWithInterceptor(
+            JSObject::cast(receiver), name, continue_search);
       default:
         UNREACHABLE();
     }
@@ -2392,11 +2705,11 @@
 }
 
 
-PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
   // Check whether the name is an array index.
   uint32_t index = 0;
-  if (name->AsArrayIndex(&index)) {
-    if (HasLocalElement(index)) return NONE;
+  if (IsJSObject() && name->AsArrayIndex(&index)) {
+    if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
     return ABSENT;
   }
   // Named property.
@@ -2410,10 +2723,12 @@
                                      PropertyNormalizationMode mode) {
   Isolate* isolate = obj->GetIsolate();
   Map* fast = obj->map();
-  int index = Hash(fast) % kEntries;
+  int index = fast->Hash() % kEntries;
   Object* result = get(index);
-  if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+  if (result->IsMap() &&
+      Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
 #ifdef DEBUG
+    Map::cast(result)->SharedMapVerify();
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit.
       Object* fresh;
@@ -2449,42 +2764,6 @@
 }
 
 
-int NormalizedMapCache::Hash(Map* fast) {
-  // For performance reasons we only hash the 3 most variable fields of a map:
-  // constructor, prototype and bit_field2.
-
-  // Shift away the tag.
-  int hash = (static_cast<uint32_t>(
-        reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
-
-  // XOR-ing the prototype and constructor directly yields too many zero bits
-  // when the two pointers are close (which is fairly common).
-  // To avoid this we shift the prototype 4 bits relatively to the constructor.
-  hash ^= (static_cast<uint32_t>(
-        reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
-
-  return hash ^ (hash >> 16) ^ fast->bit_field2();
-}
-
-
-bool NormalizedMapCache::CheckHit(Map* slow,
-                                  Map* fast,
-                                  PropertyNormalizationMode mode) {
-#ifdef DEBUG
-  slow->SharedMapVerify();
-#endif
-  return
-    slow->constructor() == fast->constructor() &&
-    slow->prototype() == fast->prototype() &&
-    slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
-                                    0 :
-                                    fast->inobject_properties()) &&
-    slow->instance_type() == fast->instance_type() &&
-    slow->bit_field() == fast->bit_field() &&
-    (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
-}
-
-
 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
   if (map()->is_shared()) {
     // Fast case maps are never marked as shared.
@@ -2572,6 +2851,7 @@
       case CONSTANT_TRANSITION:
       case NULL_DESCRIPTOR:
       case INTERCEPTOR:
+      case ELEMENTS_TRANSITION:
         break;
       default:
         UNREACHABLE();
@@ -2602,7 +2882,7 @@
                                      instance_size_delta);
 
   set_map(new_map);
-  new_map->set_instance_descriptors(current_heap->empty_descriptor_array());
+  new_map->clear_instance_descriptors();
 
   set_properties(dictionary);
 
@@ -2628,48 +2908,79 @@
 
 MaybeObject* JSObject::NormalizeElements() {
   ASSERT(!HasExternalArrayElements());
-  if (HasDictionaryElements()) return this;
-  Map* old_map = map();
-  ASSERT(old_map->has_fast_elements());
 
-  Object* obj;
-  { MaybeObject* maybe_obj = old_map->GetSlowElementsMap();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  // Find the backing store.
+  FixedArrayBase* array = FixedArrayBase::cast(elements());
+  Map* old_map = array->map();
+  bool is_arguments =
+      (old_map == old_map->heap()->non_strict_arguments_elements_map());
+  if (is_arguments) {
+    array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
   }
-  Map* new_map = Map::cast(obj);
+  if (array->IsDictionary()) return array;
 
-  // Get number of entries.
-  FixedArray* array = FixedArray::cast(elements());
-
-  // Compute the effective length.
-  int length = IsJSArray() ?
-               Smi::cast(JSArray::cast(this)->length())->value() :
-               array->length();
-  { MaybeObject* maybe_obj = NumberDictionary::Allocate(length);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  ASSERT(HasFastElements() ||
+         HasFastDoubleElements() ||
+         HasFastArgumentsElements());
+  // Compute the effective length and allocate a new backing store.
+  int length = IsJSArray()
+      ? Smi::cast(JSArray::cast(this)->length())->value()
+      : array->length();
+  int old_capacity = 0;
+  int used_elements = 0;
+  GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+  SeededNumberDictionary* dictionary = NULL;
+  { Object* object;
+    MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements);
+    if (!maybe->ToObject(&object)) return maybe;
+    dictionary = SeededNumberDictionary::cast(object);
   }
-  NumberDictionary* dictionary = NumberDictionary::cast(obj);
-  // Copy entries.
+
+  // Copy the elements to the new backing store.
+  bool has_double_elements = array->IsFixedDoubleArray();
   for (int i = 0; i < length; i++) {
-    Object* value = array->get(i);
-    if (!value->IsTheHole()) {
-      PropertyDetails details = PropertyDetails(NONE, NORMAL);
-      Object* result;
-      { MaybeObject* maybe_result =
-            dictionary->AddNumberEntry(i, array->get(i), details);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
+    Object* value = NULL;
+    if (has_double_elements) {
+      FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+      if (double_array->is_the_hole(i)) {
+        value = GetIsolate()->heap()->the_hole_value();
+      } else {
+        // Objects must be allocated in the old object space, since the
+        // overall number of HeapNumbers needed for the conversion might
+        // exceed the capacity of new space, and we would fail repeatedly
+        // trying to convert the FixedDoubleArray.
+        MaybeObject* maybe_value_object =
+            GetHeap()->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
+        if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
       }
-      dictionary = NumberDictionary::cast(result);
+    } else {
+      ASSERT(old_map->has_fast_elements());
+      value = FixedArray::cast(array)->get(i);
+    }
+    PropertyDetails details = PropertyDetails(NONE, NORMAL);
+    if (!value->IsTheHole()) {
+      Object* result;
+      MaybeObject* maybe_result =
+          dictionary->AddNumberEntry(i, value, details);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+      dictionary = SeededNumberDictionary::cast(result);
     }
   }
-  // Switch to using the dictionary as the backing storage for
-  // elements. Set the new map first to satify the elements type
-  // assert in set_elements().
-  set_map(new_map);
-  set_elements(dictionary);
 
-  new_map->heap()->isolate()->counters()->elements_to_dictionary()->
-      Increment();
+  // Switch to using the dictionary as the backing storage for elements.
+  if (is_arguments) {
+    FixedArray::cast(elements())->set(1, dictionary);
+  } else {
+    // Set the new map first to satify the elements type assert in
+    // set_elements().
+    Object* new_map;
+    MaybeObject* maybe = map()->GetSlowElementsMap();
+    if (!maybe->ToObject(&new_map)) return maybe;
+    set_map(Map::cast(new_map));
+    set_elements(dictionary);
+  }
+
+  old_map->isolate()->counters()->elements_to_dictionary()->Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2678,7 +2989,100 @@
   }
 #endif
 
-  return this;
+  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+  return dictionary;
+}
+
+
+MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+  Isolate* isolate = GetIsolate();
+  Heap* heap = isolate->heap();
+  Object* holder = BypassGlobalProxy();
+  if (holder->IsUndefined()) return heap->undefined_value();
+  JSObject* obj = JSObject::cast(holder);
+  if (obj->HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = obj->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == heap->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
+    }
+  }
+
+  // Only attempt to find the hidden properties in the local object and not
+  // in the prototype chain.
+  if (!obj->HasHiddenPropertiesObject()) {
+    // Hidden properties object not found. Allocate a new hidden properties
+    // object if requested. Otherwise return the undefined value.
+    if (flag == ALLOW_CREATION) {
+      Object* hidden_obj;
+      { MaybeObject* maybe_obj = heap->AllocateJSObject(
+            isolate->context()->global_context()->object_function());
+        if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
+      }
+      // Don't allow leakage of the hidden object through accessors
+      // on Object.prototype.
+      {
+        MaybeObject* maybe_obj =
+            JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
+        if (maybe_obj->IsFailure()) return maybe_obj;
+      }
+      return obj->SetHiddenPropertiesObject(hidden_obj);
+    } else {
+      return heap->undefined_value();
+    }
+  }
+  return obj->GetHiddenPropertiesObject();
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
+  Isolate* isolate = GetIsolate();
+  Object* hidden_props_obj;
+  { MaybeObject* maybe_obj = GetHiddenProperties(flag);
+    if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
+  }
+  if (!hidden_props_obj->IsJSObject()) {
+    // We failed to create hidden properties.  That's a detached
+    // global proxy.
+    ASSERT(hidden_props_obj->IsUndefined());
+    return Smi::FromInt(0);
+  }
+  JSObject* hidden_props = JSObject::cast(hidden_props_obj);
+  String* hash_symbol = isolate->heap()->identity_hash_symbol();
+  {
+    // Note that HasLocalProperty() can cause a GC in the general case in the
+    // presence of interceptors.
+    AssertNoAllocation no_alloc;
+    if (hidden_props->HasLocalProperty(hash_symbol)) {
+      MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
+      return Smi::cast(hash->ToObjectChecked());
+    }
+  }
+
+  int hash_value;
+  int attempts = 0;
+  do {
+    // Generate a random 32-bit hash value but limit range to fit
+    // within a smi.
+    hash_value = V8::Random(isolate) & Smi::kMaxValue;
+    attempts++;
+  } while (hash_value == 0 && attempts < 30);
+  hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
+
+  Smi* hash = Smi::FromInt(hash_value);
+  { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
+        hash_symbol,
+        hash,
+        static_cast<PropertyAttributes>(None));
+    if (result->IsFailure()) return result;
+  }
+  return hash;
 }
 
 
@@ -2731,39 +3135,6 @@
 }
 
 
-MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
-                                                    DeleteMode mode) {
-  ASSERT(!HasExternalArrayElements());
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      Object* obj;
-      { MaybeObject* maybe_obj = EnsureWritableFastElements();
-        if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-      }
-      uint32_t length = IsJSArray() ?
-      static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
-      static_cast<uint32_t>(FixedArray::cast(elements())->length());
-      if (index < length) {
-        FixedArray::cast(elements())->set_the_hole(index);
-      }
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = element_dictionary();
-      int entry = dictionary->FindEntry(index);
-      if (entry != NumberDictionary::kNotFound) {
-        return dictionary->DeleteProperty(entry, mode);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  return GetHeap()->true_value();
-}
-
-
 MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
   Isolate* isolate = GetIsolate();
   Heap* heap = isolate->heap();
@@ -2791,8 +3162,10 @@
     ASSERT(result->IsBoolean());
     return *v8::Utils::OpenHandle(*result);
   }
-  MaybeObject* raw_result =
-      this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
+  MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
+      *this_handle,
+      index,
+      NORMAL_DELETION);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return raw_result;
 }
@@ -2816,61 +3189,22 @@
 
   if (HasIndexedInterceptor()) {
     // Skip interceptor if forcing deletion.
-    if (mode == FORCE_DELETION) {
-      return DeleteElementPostInterceptor(index, mode);
+    if (mode != FORCE_DELETION) {
+      return DeleteElementWithInterceptor(index);
     }
-    return DeleteElementWithInterceptor(index);
+    mode = JSReceiver::FORCE_DELETION;
   }
 
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      Object* obj;
-      { MaybeObject* maybe_obj = EnsureWritableFastElements();
-        if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-      }
-      uint32_t length = IsJSArray() ?
-      static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
-      static_cast<uint32_t>(FixedArray::cast(elements())->length());
-      if (index < length) {
-        FixedArray::cast(elements())->set_the_hole(index);
-      }
-      break;
-    }
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-      // Pixel and external array elements cannot be deleted. Just
-      // silently ignore here.
-      break;
-    case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = element_dictionary();
-      int entry = dictionary->FindEntry(index);
-      if (entry != NumberDictionary::kNotFound) {
-        Object* result = dictionary->DeleteProperty(entry, mode);
-        if (mode == STRICT_DELETION && result ==
-            isolate->heap()->false_value()) {
-          // In strict mode, deleting a non-configurable property throws
-          // exception. dictionary->DeleteProperty will return false_value()
-          // if a non-configurable property is being deleted.
-          HandleScope scope;
-          Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
-          Handle<Object> args[2] = { i, Handle<Object>(this) };
-          return isolate->Throw(*isolate->factory()->NewTypeError(
-              "strict_delete_property", HandleVector(args, 2)));
-        }
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
+  return GetElementsAccessor()->Delete(this, index, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+  } else {
+    return JSObject::cast(this)->DeleteProperty(name, mode);
   }
-  return isolate->heap()->true_value();
 }
 
 
@@ -2931,6 +3265,27 @@
 }
 
 
+bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
+                                            ElementsKind kind,
+                                            Object* object) {
+  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  if (kind == FAST_ELEMENTS) {
+    int length = IsJSArray()
+        ? Smi::cast(JSArray::cast(this)->length())->value()
+        : elements->length();
+    for (int i = 0; i < length; ++i) {
+      Object* element = elements->get(i);
+      if (!element->IsTheHole() && element == object) return true;
+    }
+  } else {
+    Object* key =
+        SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
+    if (!key->IsUndefined()) return true;
+  }
+  return false;
+}
+
+
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
   Map* map_of_this = map();
@@ -2954,7 +3309,8 @@
   }
 
   // Check if the object is among the indexed properties.
-  switch (GetElementsKind()) {
+  ElementsKind kind = GetElementsKind();
+  switch (kind) {
     case EXTERNAL_PIXEL_ELEMENTS:
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -2963,31 +3319,31 @@
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
     case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
-    case FAST_ELEMENTS: {
-      int length = IsJSArray() ?
-          Smi::cast(JSArray::cast(this)->length())->value() :
-          FixedArray::cast(elements())->length();
-      for (int i = 0; i < length; i++) {
-        Object* element = FixedArray::cast(elements())->get(i);
-        if (!element->IsTheHole() && element == obj) {
-          return true;
-        }
-      }
-      break;
-    }
+    case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS: {
-      key = element_dictionary()->SlowReverseLookup(obj);
-      if (!key->IsUndefined()) {
-        return true;
-      }
+      FixedArray* elements = FixedArray::cast(this->elements());
+      if (ReferencesObjectFromElements(elements, kind, obj)) return true;
       break;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      // Check the mapped parameters.
+      int length = parameter_map->length();
+      for (int i = 2; i < length; ++i) {
+        Object* value = parameter_map->get(i);
+        if (!value->IsTheHole() && value == obj) return true;
+      }
+      // Check the arguments.
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+      if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
       break;
+    }
   }
 
   // For functions check the context.
@@ -3022,9 +3378,9 @@
       }
     }
 
-    // Check the context extension if any.
-    if (context->has_extension()) {
-      return context->extension()->ReferencesObject(obj);
+    // Check the context extension (if any) if it can have references.
+    if (context->has_extension() && !context->IsCatchContext()) {
+      return JSObject::cast(context->extension())->ReferencesObject(obj);
     }
   }
 
@@ -3050,24 +3406,34 @@
     return JSObject::cast(proto)->PreventExtensions();
   }
 
-  // If there are fast elements we normalize.
-  if (HasFastElements()) {
-    Object* ok;
-    { MaybeObject* maybe_ok = NormalizeElements();
-      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
-    }
+  // It's not possible to seal objects with external array elements
+  if (HasExternalArrayElements()) {
+    HandleScope scope(isolate);
+    Handle<Object> object(this);
+    Handle<Object> error  =
+        isolate->factory()->NewTypeError(
+            "cant_prevent_ext_external_array_elements",
+            HandleVector(&object, 1));
+    return isolate->Throw(*error);
   }
+
+  // If there are fast elements we normalize.
+  SeededNumberDictionary* dictionary = NULL;
+  { MaybeObject* maybe = NormalizeElements();
+    if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
+  }
+  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   // Make sure that we never go back to fast case.
-  element_dictionary()->set_requires_slow_elements();
+  dictionary->set_requires_slow_elements();
 
   // Do a map transition, other objects with this map may still
   // be extensible.
-  Object* new_map;
-  { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
-    if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+  Map* new_map;
+  { MaybeObject* maybe = map()->CopyDropTransitions();
+    if (!maybe->To<Map>(&new_map)) return maybe;
   }
-  Map::cast(new_map)->set_is_extensible(false);
-  set_map(Map::cast(new_map));
+  new_map->set_is_extensible(false);
+  set_map(new_map);
   ASSERT(!map()->is_extensible());
   return new_map;
 }
@@ -3144,6 +3510,15 @@
 }
 
 
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
+  if (IsJSProxy()) {
+    result->HandlerResult();
+  } else {
+    JSObject::cast(this)->LocalLookup(name, result);
+  }
+}
+
+
 void JSObject::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
@@ -3163,8 +3538,7 @@
   }
 
   // Check __proto__ before interceptor.
-  if (name->Equals(heap->Proto_symbol()) &&
-      !IsJSContextExtensionObject()) {
+  if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
     result->ConstantResult(this);
     return;
   }
@@ -3179,7 +3553,7 @@
 }
 
 
-void JSObject::Lookup(String* name, LookupResult* result) {
+void JSReceiver::Lookup(String* name, LookupResult* result) {
   // Ecma-262 3rd 8.6.2.4
   Heap* heap = GetHeap();
   for (Object* current = this;
@@ -3205,6 +3579,24 @@
 }
 
 
+// Search for a getter or setter in an elements dictionary.  Returns either
+// undefined if the element is read-only, or the getter/setter pair (fixed
+// array) if there is an existing one, or the hole value if the element does
+// not exist or is a normal non-getter/setter data element.
+static Object* FindGetterSetterInDictionary(SeededNumberDictionary* dictionary,
+                                            uint32_t index,
+                                            Heap* heap) {
+  int entry = dictionary->FindEntry(index);
+  if (entry != SeededNumberDictionary::kNotFound) {
+    Object* result = dictionary->ValueAt(entry);
+    PropertyDetails details = dictionary->DetailsAt(entry);
+    if (details.IsReadOnly()) return heap->undefined_value();
+    if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+  }
+  return heap->the_hole_value();
+}
+
+
 MaybeObject* JSObject::DefineGetterSetter(String* name,
                                           PropertyAttributes attributes) {
   Heap* heap = GetHeap();
@@ -3225,6 +3617,7 @@
   if (is_element) {
     switch (GetElementsKind()) {
       case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
         break;
       case EXTERNAL_PIXEL_ELEMENTS:
       case EXTERNAL_BYTE_ELEMENTS:
@@ -3234,29 +3627,36 @@
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
         // Ignore getters and setters on pixel and external array
         // elements.
         return heap->undefined_value();
       case DICTIONARY_ELEMENTS: {
-        // Lookup the index.
-        NumberDictionary* dictionary = element_dictionary();
-        int entry = dictionary->FindEntry(index);
-        if (entry != NumberDictionary::kNotFound) {
-          Object* result = dictionary->ValueAt(entry);
-          PropertyDetails details = dictionary->DetailsAt(entry);
-          if (details.IsReadOnly()) return heap->undefined_value();
-          if (details.type() == CALLBACKS) {
-            if (result->IsFixedArray()) {
-              return result;
-            }
-            // Otherwise allow to override it.
+        Object* probe =
+            FindGetterSetterInDictionary(element_dictionary(), index, heap);
+        if (!probe->IsTheHole()) return probe;
+        // Otherwise allow to override it.
+        break;
+      }
+      case NON_STRICT_ARGUMENTS_ELEMENTS: {
+        // Ascertain whether we have read-only properties or an existing
+        // getter/setter pair in an arguments elements dictionary backing
+        // store.
+        FixedArray* parameter_map = FixedArray::cast(elements());
+        uint32_t length = parameter_map->length();
+        Object* probe =
+            index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+        if (probe == NULL || probe->IsTheHole()) {
+          FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+          if (arguments->IsDictionary()) {
+            SeededNumberDictionary* dictionary =
+                SeededNumberDictionary::cast(arguments);
+            probe = FindGetterSetterInDictionary(dictionary, index, heap);
+            if (!probe->IsTheHole()) return probe;
           }
         }
         break;
       }
-      default:
-        UNREACHABLE();
-        break;
     }
   } else {
     // Lookup the name.
@@ -3319,22 +3719,38 @@
   PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   // Normalize elements to make this operation simple.
-  Object* ok;
-  { MaybeObject* maybe_ok = NormalizeElements();
-    if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+  SeededNumberDictionary* dictionary = NULL;
+  { Object* result;
+    MaybeObject* maybe = NormalizeElements();
+    if (!maybe->ToObject(&result)) return maybe;
+    dictionary = SeededNumberDictionary::cast(result);
   }
+  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
 
   // Update the dictionary with the new CALLBACKS property.
-  Object* dict;
-  { MaybeObject* maybe_dict =
-        element_dictionary()->Set(index, structure, details);
-    if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+  { Object* result;
+    MaybeObject* maybe = dictionary->Set(index, structure, details);
+    if (!maybe->ToObject(&result)) return maybe;
+    dictionary = SeededNumberDictionary::cast(result);
   }
 
-  NumberDictionary* elements = NumberDictionary::cast(dict);
-  elements->set_requires_slow_elements();
-  // Set the potential new dictionary on the object.
-  set_elements(elements);
+  dictionary->set_requires_slow_elements();
+  // Update the dictionary backing store on the object.
+  if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+    // Also delete any parameter alias.
+    //
+    // TODO(kmillikin): when deleting the last parameter alias we could
+    // switch to a direct backing store without the parameter map.  This
+    // would allow GC of the context.
+    FixedArray* parameter_map = FixedArray::cast(elements());
+    uint32_t length = parameter_map->length();
+    if (index < length - 2) {
+      parameter_map->set(index + 2, GetHeap()->the_hole_value());
+    }
+    parameter_map->set(1, dictionary);
+  } else {
+    set_elements(dictionary);
+  }
 
   return structure;
 }
@@ -3451,6 +3867,7 @@
     // Accessors overwrite previous callbacks (cf. with getters/setters).
     switch (GetElementsKind()) {
       case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
         break;
       case EXTERNAL_PIXEL_ELEMENTS:
       case EXTERNAL_BYTE_ELEMENTS:
@@ -3460,13 +3877,14 @@
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
         // Ignore getters and setters on pixel and external array
         // elements.
         return isolate->heap()->undefined_value();
       case DICTIONARY_ELEMENTS:
         break;
-      default:
-        UNREACHABLE();
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNIMPLEMENTED();
         break;
     }
 
@@ -3518,9 +3936,9 @@
          obj = JSObject::cast(obj)->GetPrototype()) {
       JSObject* js_object = JSObject::cast(obj);
       if (js_object->HasDictionaryElements()) {
-        NumberDictionary* dictionary = js_object->element_dictionary();
+        SeededNumberDictionary* dictionary = js_object->element_dictionary();
         int entry = dictionary->FindEntry(index);
-        if (entry != NumberDictionary::kNotFound) {
+        if (entry != SeededNumberDictionary::kNotFound) {
           Object* element = dictionary->ValueAt(entry);
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.type() == CALLBACKS) {
@@ -3587,8 +4005,7 @@
   // pointing to the same transition which is bad because the garbage
   // collector relies on being able to reverse pointers from transitions
   // to maps.  If properties need to be retained use CopyDropTransitions.
-  Map::cast(result)->set_instance_descriptors(
-      heap->empty_descriptor_array());
+  Map::cast(result)->clear_instance_descriptors();
   // Please note instance_type and instance_size are set when allocated.
   Map::cast(result)->set_inobject_properties(inobject_properties());
   Map::cast(result)->set_unused_property_fields(unused_property_fields());
@@ -3610,6 +4027,7 @@
   }
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->set_bit_field3(bit_field3());
   Map::cast(result)->set_is_shared(false);
   Map::cast(result)->ClearCodeCache(heap);
   return result;
@@ -3638,6 +4056,7 @@
 
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->set_bit_field3(bit_field3());
 
   Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
 
@@ -3716,7 +4135,7 @@
   Object** map_or_index_field = NULL;
   while (current != meta_map) {
     DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
-        *RawField(current, Map::kInstanceDescriptorsOffset));
+        *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
     if (!d->IsEmpty()) {
       FixedArray* contents = reinterpret_cast<FixedArray*>(
           d->get(DescriptorArray::kContentArrayIndex));
@@ -3750,7 +4169,7 @@
     Object** proto_map_or_index_field =
         RawField(prototype_transitions, HeapObject::kMapOffset);
     Object* map_or_index = *proto_map_or_index_field;
-    const int start = 2;
+    const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
     int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
     if (i < prototype_transitions->length()) {
       // Found a map in the prototype transition array.  Record progress in
@@ -3760,7 +4179,7 @@
         Map* next = Map::cast(perhaps_map);
         next->set_map(current);
         *proto_map_or_index_field =
-            Smi::FromInt(i + 2);
+            Smi::FromInt(i + kProtoTransitionElementsPerEntry);
         current = next;
         continue;
       }
@@ -3782,8 +4201,6 @@
 
 
 MaybeObject* CodeCache::Update(String* name, Code* code) {
-  ASSERT(code->ic_state() == MONOMORPHIC);
-
   // The number of monomorphic stubs for normal load/store/call IC's can grow to
   // a large number and therefore they need to go into a hash table. They are
   // used to load global properties from cells.
@@ -4004,6 +4421,7 @@
  private:
   String* name_;
   Code::Flags flags_;
+  // TODO(jkummerow): We should be able to get by without this.
   Code* code_;
 };
 
@@ -4023,7 +4441,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  // Don't use this, as the table might have grown.
+  // Don't use |this|, as the table might have grown.
   CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
 
   int entry = cache->FindInsertionEntry(key.Hash());
@@ -4055,105 +4473,196 @@
 }
 
 
-static bool HasKey(FixedArray* array, Object* key) {
-  int len0 = array->length();
-  for (int i = 0; i < len0; i++) {
-    Object* element = array->get(i);
-    if (element->IsSmi() && key->IsSmi() && (element == key)) return true;
-    if (element->IsString() &&
-        key->IsString() && String::cast(element)->Equals(String::cast(key))) {
-      return true;
+MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+                                          Code::Flags flags,
+                                          Code* code) {
+  // Initialize cache if necessary.
+  if (cache()->IsUndefined()) {
+    Object* result;
+    { MaybeObject* maybe_result =
+          PolymorphicCodeCacheHashTable::Allocate(
+              PolymorphicCodeCacheHashTable::kInitialSize);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
     }
+    set_cache(result);
+  } else {
+    // This entry shouldn't be contained in the cache yet.
+    ASSERT(PolymorphicCodeCacheHashTable::cast(cache())
+               ->Lookup(maps, flags)->IsUndefined());
   }
-  return false;
+  PolymorphicCodeCacheHashTable* hash_table =
+      PolymorphicCodeCacheHashTable::cast(cache());
+  Object* new_cache;
+  { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code);
+    if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
+  }
+  set_cache(new_cache);
+  return this;
+}
+
+
+Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+  if (!cache()->IsUndefined()) {
+    PolymorphicCodeCacheHashTable* hash_table =
+        PolymorphicCodeCacheHashTable::cast(cache());
+    return hash_table->Lookup(maps, flags);
+  } else {
+    return GetHeap()->undefined_value();
+  }
+}
+
+
+// Despite their name, object of this class are not stored in the actual
+// hash table; instead they're temporarily used for lookups. It is therefore
+// safe to have a weak (non-owning) pointer to a MapList as a member field.
+class PolymorphicCodeCacheHashTableKey : public HashTableKey {
+ public:
+  // Callers must ensure that |maps| outlives the newly constructed object.
+  PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+      : maps_(maps),
+        code_flags_(code_flags) {}
+
+  bool IsMatch(Object* other) {
+    MapList other_maps(kDefaultListAllocationSize);
+    int other_flags;
+    FromObject(other, &other_flags, &other_maps);
+    if (code_flags_ != other_flags) return false;
+    if (maps_->length() != other_maps.length()) return false;
+    // Compare just the hashes first because it's faster.
+    int this_hash = MapsHashHelper(maps_, code_flags_);
+    int other_hash = MapsHashHelper(&other_maps, other_flags);
+    if (this_hash != other_hash) return false;
+
+    // Full comparison: for each map in maps_, look for an equivalent map in
+    // other_maps. This implementation is slow, but probably good enough for
+    // now because the lists are short (<= 4 elements currently).
+    for (int i = 0; i < maps_->length(); ++i) {
+      bool match_found = false;
+      for (int j = 0; j < other_maps.length(); ++j) {
+        if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+          match_found = true;
+          break;
+        }
+      }
+      if (!match_found) return false;
+    }
+    return true;
+  }
+
+  static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+    uint32_t hash = code_flags;
+    for (int i = 0; i < maps->length(); ++i) {
+      hash ^= maps->at(i)->Hash();
+    }
+    return hash;
+  }
+
+  uint32_t Hash() {
+    return MapsHashHelper(maps_, code_flags_);
+  }
+
+  uint32_t HashForObject(Object* obj) {
+    MapList other_maps(kDefaultListAllocationSize);
+    int other_flags;
+    FromObject(obj, &other_flags, &other_maps);
+    return MapsHashHelper(&other_maps, other_flags);
+  }
+
+  MUST_USE_RESULT MaybeObject* AsObject() {
+    Object* obj;
+    // The maps in |maps_| must be copied to a newly allocated FixedArray,
+    // both because the referenced MapList is short-lived, and because C++
+    // objects can't be stored in the heap anyway.
+    { MaybeObject* maybe_obj =
+        HEAP->AllocateUninitializedFixedArray(maps_->length() + 1);
+      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    }
+    FixedArray* list = FixedArray::cast(obj);
+    list->set(0, Smi::FromInt(code_flags_));
+    for (int i = 0; i < maps_->length(); ++i) {
+      list->set(i + 1, maps_->at(i));
+    }
+    return list;
+  }
+
+ private:
+  static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+    FixedArray* list = FixedArray::cast(obj);
+    maps->Rewind(0);
+    *code_flags = Smi::cast(list->get(0))->value();
+    for (int i = 1; i < list->length(); ++i) {
+      maps->Add(Map::cast(list->get(i)));
+    }
+    return maps;
+  }
+
+  MapList* maps_;  // weak.
+  int code_flags_;
+  static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
+};
+
+
+Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+  PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return GetHeap()->undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+                                                int code_flags,
+                                                Code* code) {
+  PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+  Object* obj;
+  { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  PolymorphicCodeCacheHashTable* cache =
+      reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
+  int entry = cache->FindInsertionEntry(key.Hash());
+  { MaybeObject* maybe_obj = key.AsObject();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  cache->set(EntryToIndex(entry), obj);
+  cache->set(EntryToIndex(entry) + 1, code);
+  cache->ElementAdded();
+  return cache;
 }
 
 
 MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
-  ASSERT(!array->HasExternalArrayElements());
-  switch (array->GetElementsKind()) {
-    case JSObject::FAST_ELEMENTS:
-      return UnionOfKeys(FixedArray::cast(array->elements()));
-    case JSObject::DICTIONARY_ELEMENTS: {
-      NumberDictionary* dict = array->element_dictionary();
-      int size = dict->NumberOfElements();
-
-      // Allocate a temporary fixed array.
-      Object* object;
-      { MaybeObject* maybe_object = GetHeap()->AllocateFixedArray(size);
-        if (!maybe_object->ToObject(&object)) return maybe_object;
-      }
-      FixedArray* key_array = FixedArray::cast(object);
-
-      int capacity = dict->Capacity();
-      int pos = 0;
-      // Copy the elements from the JSArray to the temporary fixed array.
-      for (int i = 0; i < capacity; i++) {
-        if (dict->IsKey(dict->KeyAt(i))) {
-          key_array->set(pos++, dict->ValueAt(i));
-        }
-      }
-      // Compute the union of this and the temporary fixed array.
-      return UnionOfKeys(key_array);
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  MaybeObject* maybe_result =
+      accessor->AddElementsToFixedArray(array->elements(), this, array, array);
+  FixedArray* result;
+  if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
+#ifdef DEBUG
+  if (FLAG_enable_slow_asserts) {
+    for (int i = 0; i < result->length(); i++) {
+      Object* current = result->get(i);
+      ASSERT(current->IsNumber() || current->IsString());
     }
-    default:
-      UNREACHABLE();
   }
-  UNREACHABLE();
-  return GetHeap()->null_value();  // Failure case needs to "return" a value.
+#endif
+  return result;
 }
 
 
 MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
-  int len0 = length();
+  ElementsAccessor* accessor = ElementsAccessor::ForArray(other);
+  MaybeObject* maybe_result =
+      accessor->AddElementsToFixedArray(other, this, NULL, NULL);
+  FixedArray* result;
+  if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) {
-    for (int i = 0; i < len0; i++) {
-      ASSERT(get(i)->IsString() || get(i)->IsNumber());
+    for (int i = 0; i < result->length(); i++) {
+      Object* current = result->get(i);
+      ASSERT(current->IsNumber() || current->IsString());
     }
   }
 #endif
-  int len1 = other->length();
-  // Optimize if 'other' is empty.
-  // We cannot optimize if 'this' is empty, as other may have holes
-  // or non keys.
-  if (len1 == 0) return this;
-
-  // Compute how many elements are not in this.
-  int extra = 0;
-  for (int y = 0; y < len1; y++) {
-    Object* value = other->get(y);
-    if (!value->IsTheHole() && !HasKey(this, value)) extra++;
-  }
-
-  if (extra == 0) return this;
-
-  // Allocate the result
-  Object* obj;
-  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  // Fill in the content
-  AssertNoAllocation no_gc;
-  FixedArray* result = FixedArray::cast(obj);
-  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-  for (int i = 0; i < len0; i++) {
-    Object* e = get(i);
-    ASSERT(e->IsString() || e->IsNumber());
-    result->set(i, e, mode);
-  }
-  // Fill in the extra keys.
-  int index = 0;
-  for (int y = 0; y < len1; y++) {
-    Object* value = other->get(y);
-    if (!value->IsTheHole() && !HasKey(this, value)) {
-      Object* e = other->get(y);
-      ASSERT(e->IsString() || e->IsNumber());
-      result->set(len0 + index, e, mode);
-      index++;
-    }
-  }
-  ASSERT(extra == index);
   return result;
 }
 
@@ -4218,6 +4727,7 @@
         heap->AllocateFixedArray(number_of_descriptors << 1);
     if (!maybe_array->ToObject(&array)) return maybe_array;
   }
+  result->set(kBitField3StorageIndex, Smi::FromInt(0));
   result->set(kContentArrayIndex, array);
   result->set(kEnumerationIndexIndex,
               Smi::FromInt(PropertyDetails::kInitialIndex));
@@ -4534,65 +5044,55 @@
 }
 
 
-Vector<const char> String::ToAsciiVector() {
-  ASSERT(IsAsciiRepresentation());
-  ASSERT(IsFlat());
-
-  int offset = 0;
+String::FlatContent String::GetFlatContent() {
   int length = this->length();
-  StringRepresentationTag string_tag = StringShape(this).representation_tag();
+  StringShape shape(this);
   String* string = this;
-  if (string_tag == kConsStringTag) {
+  int offset = 0;
+  if (shape.representation_tag() == kConsStringTag) {
     ConsString* cons = ConsString::cast(string);
-    ASSERT(cons->second()->length() == 0);
+    if (cons->second()->length() != 0) {
+      return FlatContent();
+    }
     string = cons->first();
-    string_tag = StringShape(string).representation_tag();
+    shape = StringShape(string);
   }
-  if (string_tag == kSeqStringTag) {
-    SeqAsciiString* seq = SeqAsciiString::cast(string);
-    char* start = seq->GetChars();
-    return Vector<const char>(start + offset, length);
+  if (shape.representation_tag() == kSlicedStringTag) {
+    SlicedString* slice = SlicedString::cast(string);
+    offset = slice->offset();
+    string = slice->parent();
+    shape = StringShape(string);
+    ASSERT(shape.representation_tag() != kConsStringTag &&
+           shape.representation_tag() != kSlicedStringTag);
   }
-  ASSERT(string_tag == kExternalStringTag);
-  ExternalAsciiString* ext = ExternalAsciiString::cast(string);
-  const char* start = ext->resource()->data();
-  return Vector<const char>(start + offset, length);
+  if (shape.encoding_tag() == kAsciiStringTag) {
+    const char* start;
+    if (shape.representation_tag() == kSeqStringTag) {
+      start = SeqAsciiString::cast(string)->GetChars();
+    } else {
+      start = ExternalAsciiString::cast(string)->resource()->data();
+    }
+    return FlatContent(Vector<const char>(start + offset, length));
+  } else {
+    ASSERT(shape.encoding_tag() == kTwoByteStringTag);
+    const uc16* start;
+    if (shape.representation_tag() == kSeqStringTag) {
+      start = SeqTwoByteString::cast(string)->GetChars();
+    } else {
+      start = ExternalTwoByteString::cast(string)->resource()->data();
+    }
+    return FlatContent(Vector<const uc16>(start + offset, length));
+  }
 }
 
 
-Vector<const uc16> String::ToUC16Vector() {
-  ASSERT(IsTwoByteRepresentation());
-  ASSERT(IsFlat());
-
-  int offset = 0;
-  int length = this->length();
-  StringRepresentationTag string_tag = StringShape(this).representation_tag();
-  String* string = this;
-  if (string_tag == kConsStringTag) {
-    ConsString* cons = ConsString::cast(string);
-    ASSERT(cons->second()->length() == 0);
-    string = cons->first();
-    string_tag = StringShape(string).representation_tag();
-  }
-  if (string_tag == kSeqStringTag) {
-    SeqTwoByteString* seq = SeqTwoByteString::cast(string);
-    return Vector<const uc16>(seq->GetChars() + offset, length);
-  }
-  ASSERT(string_tag == kExternalStringTag);
-  ExternalTwoByteString* ext = ExternalTwoByteString::cast(string);
-  const uc16* start =
-      reinterpret_cast<const uc16*>(ext->resource()->data());
-  return Vector<const uc16>(start + offset, length);
-}
-
-
-SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
-                                     RobustnessFlag robust_flag,
-                                     int offset,
-                                     int length,
-                                     int* length_return) {
+SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+                                          RobustnessFlag robust_flag,
+                                          int offset,
+                                          int length,
+                                          int* length_return) {
   if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
-    return SmartPointer<char>(NULL);
+    return SmartArrayPointer<char>(NULL);
   }
   Heap* heap = GetHeap();
 
@@ -4636,13 +5136,13 @@
     character_position++;
   }
   result[utf8_byte_position] = 0;
-  return SmartPointer<char>(result);
+  return SmartArrayPointer<char>(result);
 }
 
 
-SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
-                                     RobustnessFlag robust_flag,
-                                     int* length_return) {
+SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+                                          RobustnessFlag robust_flag,
+                                          int* length_return) {
   return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
 }
 
@@ -4653,13 +5153,17 @@
 
 
 const uc16* String::GetTwoByteData(unsigned start) {
-  ASSERT(!IsAsciiRepresentation());
+  ASSERT(!IsAsciiRepresentationUnderneath());
   switch (StringShape(this).representation_tag()) {
     case kSeqStringTag:
       return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
     case kExternalStringTag:
       return ExternalTwoByteString::cast(this)->
         ExternalTwoByteStringGetData(start);
+    case kSlicedStringTag: {
+      SlicedString* slice = SlicedString::cast(this);
+      return slice->parent()->GetTwoByteData(start + slice->offset());
+    }
     case kConsStringTag:
       UNREACHABLE();
       return NULL;
@@ -4669,9 +5173,9 @@
 }
 
 
-SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
+SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
   if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
-    return SmartPointer<uc16>();
+    return SmartArrayPointer<uc16>();
   }
   Heap* heap = GetHeap();
 
@@ -4687,7 +5191,7 @@
     result[i++] = character;
   }
   result[i] = 0;
-  return SmartPointer<uc16>(result);
+  return SmartArrayPointer<uc16>(result);
 }
 
 
@@ -4950,6 +5454,10 @@
                                                      max_chars);
         return rbb->util_buffer;
       }
+    case kSlicedStringTag:
+      return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
+                                                              offset_ptr,
+                                                              max_chars);
     default:
       break;
   }
@@ -4976,8 +5484,7 @@
 
 
 // Archive statics that are thread local.
-char* Relocatable::ArchiveState(char* to) {
-  Isolate* isolate = Isolate::Current();
+char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
   *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
   isolate->set_relocatable_top(NULL);
   return to + ArchiveSpacePerThread();
@@ -4985,8 +5492,7 @@
 
 
 // Restore statics that are thread local.
-char* Relocatable::RestoreState(char* from) {
-  Isolate* isolate = Isolate::Current();
+char* Relocatable::RestoreState(Isolate* isolate, char* from) {
   isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
   return from + ArchiveSpacePerThread();
 }
@@ -5034,11 +5540,13 @@
   if (str_ == NULL) return;
   Handle<String> str(str_);
   ASSERT(str->IsFlat());
-  is_ascii_ = str->IsAsciiRepresentation();
+  String::FlatContent content = str->GetFlatContent();
+  ASSERT(content.IsFlat());
+  is_ascii_ = content.IsAscii();
   if (is_ascii_) {
-    start_ = str->ToAsciiVector().start();
+    start_ = content.ToAsciiVector().start();
   } else {
-    start_ = str->ToUC16Vector().start();
+    start_ = content.ToUC16Vector().start();
   }
 }
 
@@ -5093,6 +5601,11 @@
                                                      max_chars);
        }
        return;
+    case kSlicedStringTag:
+      SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
+                                                                 offset_ptr,
+                                                                 max_chars);
+      return;
     default:
       break;
   }
@@ -5227,6 +5740,31 @@
 }
 
 
+uint16_t SlicedString::SlicedStringGet(int index) {
+  return parent()->Get(offset() + index);
+}
+
+
+const unibrow::byte* SlicedString::SlicedStringReadBlock(
+    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
+  unsigned offset = this->offset();
+  *offset_ptr += offset;
+  const unibrow::byte* answer = String::ReadBlock(String::cast(parent()),
+                                                  buffer, offset_ptr, chars);
+  *offset_ptr -= offset;
+  return answer;
+}
+
+
+void SlicedString::SlicedStringReadBlockIntoBuffer(
+    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
+  unsigned offset = this->offset();
+  *offset_ptr += offset;
+  String::ReadBlockIntoBuffer(String::cast(parent()),
+                              buffer, offset_ptr, chars);
+  *offset_ptr -= offset;
+}
+
 template <typename sinkchar>
 void String::WriteToFlat(String* src,
                          sinkchar* sink,
@@ -5294,6 +5832,13 @@
         }
         break;
       }
+      case kAsciiStringTag | kSlicedStringTag:
+      case kTwoByteStringTag | kSlicedStringTag: {
+        SlicedString* slice = SlicedString::cast(source);
+        unsigned offset = slice->offset();
+        WriteToFlat(slice->parent(), sink, from + offset, to + offset);
+        return;
+      }
     }
   }
 }
@@ -5358,12 +5903,13 @@
 static inline bool CompareStringContentsPartial(Isolate* isolate,
                                                 IteratorA* ia,
                                                 String* b) {
-  if (b->IsFlat()) {
-    if (b->IsAsciiRepresentation()) {
-      VectorIterator<char> ib(b->ToAsciiVector());
+  String::FlatContent content = b->GetFlatContent();
+  if (content.IsFlat()) {
+    if (content.IsAscii()) {
+      VectorIterator<char> ib(content.ToAsciiVector());
       return CompareStringContents(ia, &ib);
     } else {
-      VectorIterator<uc16> ib(b->ToUC16Vector());
+      VectorIterator<uc16> ib(content.ToUC16Vector());
       return CompareStringContents(ia, &ib);
     }
   } else {
@@ -5383,6 +5929,20 @@
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (HasHashCode() && other->HasHashCode()) {
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      if (Hash() != other->Hash()) {
+        bool found_difference = false;
+        for (int i = 0; i < len; i++) {
+          if (Get(i) != other->Get(i)) {
+            found_difference = true;
+            break;
+          }
+        }
+        ASSERT(found_difference);
+      }
+    }
+#endif
     if (Hash() != other->Hash()) return false;
   }
 
@@ -5402,16 +5962,18 @@
   }
 
   Isolate* isolate = GetIsolate();
-  if (lhs->IsFlat()) {
-    if (lhs->IsAsciiRepresentation()) {
-      Vector<const char> vec1 = lhs->ToAsciiVector();
-      if (rhs->IsFlat()) {
-        if (rhs->IsAsciiRepresentation()) {
-          Vector<const char> vec2 = rhs->ToAsciiVector();
+  String::FlatContent lhs_content = lhs->GetFlatContent();
+  String::FlatContent rhs_content = rhs->GetFlatContent();
+  if (lhs_content.IsFlat()) {
+    if (lhs_content.IsAscii()) {
+      Vector<const char> vec1 = lhs_content.ToAsciiVector();
+      if (rhs_content.IsFlat()) {
+        if (rhs_content.IsAscii()) {
+          Vector<const char> vec2 = rhs_content.ToAsciiVector();
           return CompareRawStringContents(vec1, vec2);
         } else {
           VectorIterator<char> buf1(vec1);
-          VectorIterator<uc16> ib(rhs->ToUC16Vector());
+          VectorIterator<uc16> ib(rhs_content.ToUC16Vector());
           return CompareStringContents(&buf1, &ib);
         }
       } else {
@@ -5421,14 +5983,14 @@
             isolate->objects_string_compare_buffer_b());
       }
     } else {
-      Vector<const uc16> vec1 = lhs->ToUC16Vector();
-      if (rhs->IsFlat()) {
-        if (rhs->IsAsciiRepresentation()) {
+      Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
+      if (rhs_content.IsFlat()) {
+        if (rhs_content.IsAscii()) {
           VectorIterator<uc16> buf1(vec1);
-          VectorIterator<char> ib(rhs->ToAsciiVector());
+          VectorIterator<char> ib(rhs_content.ToAsciiVector());
           return CompareStringContents(&buf1, &ib);
         } else {
-          Vector<const uc16> vec2(rhs->ToUC16Vector());
+          Vector<const uc16> vec2(rhs_content.ToUC16Vector());
           return CompareRawStringContents(vec1, vec2);
         }
       } else {
@@ -5481,6 +6043,11 @@
 bool String::IsAsciiEqualTo(Vector<const char> str) {
   int slen = length();
   if (str.length() != slen) return false;
+  FlatContent content = GetFlatContent();
+  if (content.IsAscii()) {
+    return CompareChars(content.ToAsciiVector().start(),
+                        str.start(), slen) == 0;
+  }
   for (int i = 0; i < slen; i++) {
     if (Get(i) != static_cast<uint16_t>(str[i])) return false;
   }
@@ -5491,6 +6058,10 @@
 bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
   int slen = length();
   if (str.length() != slen) return false;
+  FlatContent content = GetFlatContent();
+  if (content.IsTwoByte()) {
+    return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+  }
   for (int i = 0; i < slen; i++) {
     if (Get(i) != str[i]) return false;
   }
@@ -5507,12 +6078,16 @@
   // Compute the hash code.
   uint32_t field = 0;
   if (StringShape(this).IsSequentialAscii()) {
-    field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), len);
+    field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
+                                 len,
+                                 GetHeap()->HashSeed());
   } else if (StringShape(this).IsSequentialTwoByte()) {
-    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), len);
+    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
+                                 len,
+                                 GetHeap()->HashSeed());
   } else {
     StringInputBuffer buffer(this);
-    field = ComputeHashField(&buffer, len);
+    field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
   }
 
   // Store the hash code in the object.
@@ -5603,8 +6178,9 @@
 
 
 uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
-                                  int length) {
-  StringHasher hasher(length);
+                                  int length,
+                                  uint32_t seed) {
+  StringHasher hasher(length, seed);
 
   // Very long strings have a trivial hash that doesn't inspect the
   // string contents.
@@ -5648,7 +6224,7 @@
   DescriptorArray* descriptors = instance_descriptors();
   for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
     if (descriptors->GetType(i) == MAP_TRANSITION ||
-        descriptors->GetType(i) == EXTERNAL_ARRAY_TRANSITION ||
+        descriptors->GetType(i) == ELEMENTS_TRANSITION ||
         descriptors->GetType(i) == CONSTANT_TRANSITION) {
       // Get target.
       Map* target = Map::cast(descriptors->GetValue(i));
@@ -5676,8 +6252,8 @@
   // Live DescriptorArray objects will be marked, so we must use
   // low-level accessors to get and modify their data.
   DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
-      *RawField(this, Map::kInstanceDescriptorsOffset));
-  if (d == heap->raw_unchecked_empty_descriptor_array()) return;
+      *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
+  if (d->IsEmpty()) return;
   Smi* NullDescriptorDetails =
     PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
   FixedArray* contents = reinterpret_cast<FixedArray*>(
@@ -5691,7 +6267,7 @@
     // non-live object.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
     if (details.type() == MAP_TRANSITION ||
-        details.type() == EXTERNAL_ARRAY_TRANSITION ||
+        details.type() == ELEMENTS_TRANSITION ||
         details.type() == CONSTANT_TRANSITION) {
       Map* target = reinterpret_cast<Map*>(contents->get(i));
       ASSERT(target->IsHeapObject());
@@ -5709,6 +6285,40 @@
 }
 
 
+int Map::Hash() {
+  // For performance reasons we only hash the 3 most variable fields of a map:
+  // constructor, prototype and bit_field2.
+
+  // Shift away the tag.
+  int hash = (static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(constructor())) >> 2);
+
+  // XOR-ing the prototype and constructor directly yields too many zero bits
+  // when the two pointers are close (which is fairly common).
+  // To avoid this we shift the prototype 4 bits relatively to the constructor.
+  hash ^= (static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(prototype())) << 2);
+
+  return hash ^ (hash >> 16) ^ bit_field2();
+}
+
+
+bool Map::EquivalentToForNormalization(Map* other,
+                                       PropertyNormalizationMode mode) {
+  return
+    constructor() == other->constructor() &&
+    prototype() == other->prototype() &&
+    inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+                              0 :
+                              other->inobject_properties()) &&
+    instance_type() == other->instance_type() &&
+    bit_field() == other->bit_field() &&
+    bit_field2() == other->bit_field2() &&
+    (bit_field3() & ~(1<<Map::kIsShared)) ==
+        (other->bit_field3() & ~(1<<Map::kIsShared));
+}
+
+
 void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
   // Iterate over all fields in the body but take care in dealing with
   // the code entry.
@@ -5727,19 +6337,6 @@
 }
 
 
-uint32_t JSFunction::SourceHash() {
-  uint32_t hash = 0;
-  Object* script = shared()->script();
-  if (!script->IsUndefined()) {
-    Object* source = Script::cast(script)->source();
-    if (source->IsUndefined()) hash = String::cast(source)->Hash();
-  }
-  hash ^= ComputeIntegerHash(shared()->start_position_and_type());
-  hash += ComputeIntegerHash(shared()->end_position());
-  return hash;
-}
-
-
 bool JSFunction::IsInlineable() {
   if (IsBuiltin()) return false;
   SharedFunctionInfo* shared_info = shared();
@@ -5830,7 +6427,7 @@
 
 
 void JSFunction::PrintName(FILE* out) {
-  SmartPointer<char> name = shared()->DebugName()->ToCString();
+  SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
   PrintF(out, "%s", *name);
 }
 
@@ -6071,6 +6668,29 @@
 }
 
 
+void SharedFunctionInfo::DisableOptimization(JSFunction* function) {
+  // Disable optimization for the shared function info and mark the
+  // code as non-optimizable. The marker on the shared function info
+  // is there because we flush non-optimized code thereby loosing the
+  // non-optimizable information for the code. When the code is
+  // regenerated and set on the shared function info it is marked as
+  // non-optimizable if optimization is disabled for the shared
+  // function info.
+  set_optimization_disabled(true);
+  // Code should be the lazy compilation stub or else unoptimized.  If the
+  // latter, disable optimization for the code too.
+  ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
+  if (code()->kind() == Code::FUNCTION) {
+    code()->set_optimizable(false);
+  }
+  if (FLAG_trace_opt) {
+    PrintF("[disabled optimization for: ");
+    function->PrintName();
+    PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+  }
+}
+
+
 bool SharedFunctionInfo::VerifyBailoutId(int id) {
   // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
   // we are always bailing out on ARM.
@@ -6184,6 +6804,7 @@
   if (slack != 0) {
     // Resize the initial map and all maps in its transition tree.
     map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+
     // Give the correct expected_nof_properties to initial maps created later.
     ASSERT(expected_nof_properties() >= slack);
     set_expected_nof_properties(expected_nof_properties() - slack);
@@ -6368,134 +6989,108 @@
 
 #ifdef ENABLE_DISASSEMBLER
 
-#ifdef OBJECT_PRINT
-
 void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
   disasm::NameConverter converter;
   int deopt_count = DeoptCount();
   PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
   if (0 == deopt_count) return;
 
-  PrintF(out, "%6s  %6s  %6s  %12s\n", "index", "ast id", "argc", "commands");
+  PrintF(out, "%6s  %6s  %6s %6s %12s\n", "index", "ast id", "argc", "pc",
+         FLAG_print_code_verbose ? "commands" : "");
   for (int i = 0; i < deopt_count; i++) {
-    int command_count = 0;
-    PrintF(out, "%6d  %6d  %6d",
-           i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
+    PrintF(out, "%6d  %6d  %6d %6d",
+           i,
+           AstId(i)->value(),
+           ArgumentsStackHeight(i)->value(),
+           Pc(i)->value());
+
+    if (!FLAG_print_code_verbose) {
+      PrintF(out, "\n");
+      continue;
+    }
+    // Print details of the frame translation.
     int translation_index = TranslationIndex(i)->value();
     TranslationIterator iterator(TranslationByteArray(), translation_index);
     Translation::Opcode opcode =
         static_cast<Translation::Opcode>(iterator.Next());
     ASSERT(Translation::BEGIN == opcode);
     int frame_count = iterator.Next();
-    if (FLAG_print_code_verbose) {
-      PrintF(out, "  %s {count=%d}\n", Translation::StringFor(opcode),
-             frame_count);
-    }
+    PrintF(out, "  %s {count=%d}\n", Translation::StringFor(opcode),
+           frame_count);
 
-    for (int i = 0; i < frame_count; ++i) {
-      opcode = static_cast<Translation::Opcode>(iterator.Next());
-      ASSERT(Translation::FRAME == opcode);
-      int ast_id = iterator.Next();
-      int function_id = iterator.Next();
-      JSFunction* function =
-          JSFunction::cast(LiteralArray()->get(function_id));
-      unsigned height = iterator.Next();
-      if (FLAG_print_code_verbose) {
-        PrintF(out, "%24s  %s {ast_id=%d, function=",
-               "", Translation::StringFor(opcode), ast_id);
-        function->PrintName(out);
-        PrintF(out, ", height=%u}\n", height);
+    while (iterator.HasNext() &&
+           Translation::BEGIN !=
+           (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
+      PrintF(out, "%24s    %s ", "", Translation::StringFor(opcode));
+
+      switch (opcode) {
+        case Translation::BEGIN:
+          UNREACHABLE();
+          break;
+
+        case Translation::FRAME: {
+          int ast_id = iterator.Next();
+          int function_id = iterator.Next();
+          JSFunction* function =
+              JSFunction::cast(LiteralArray()->get(function_id));
+          unsigned height = iterator.Next();
+          PrintF(out, "{ast_id=%d, function=", ast_id);
+          function->PrintName(out);
+          PrintF(out, ", height=%u}", height);
+          break;
+        }
+
+        case Translation::DUPLICATE:
+          break;
+
+        case Translation::REGISTER: {
+          int reg_code = iterator.Next();
+            PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+          break;
+        }
+
+        case Translation::INT32_REGISTER: {
+          int reg_code = iterator.Next();
+          PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+          break;
+        }
+
+        case Translation::DOUBLE_REGISTER: {
+          int reg_code = iterator.Next();
+          PrintF(out, "{input=%s}",
+                 DoubleRegister::AllocationIndexToString(reg_code));
+          break;
+        }
+
+        case Translation::STACK_SLOT: {
+          int input_slot_index = iterator.Next();
+          PrintF(out, "{input=%d}", input_slot_index);
+          break;
+        }
+
+        case Translation::INT32_STACK_SLOT: {
+          int input_slot_index = iterator.Next();
+          PrintF(out, "{input=%d}", input_slot_index);
+          break;
+        }
+
+        case Translation::DOUBLE_STACK_SLOT: {
+          int input_slot_index = iterator.Next();
+          PrintF(out, "{input=%d}", input_slot_index);
+          break;
+        }
+
+        case Translation::LITERAL: {
+          unsigned literal_index = iterator.Next();
+          PrintF(out, "{literal_id=%u}", literal_index);
+          break;
+        }
+
+        case Translation::ARGUMENTS_OBJECT:
+          break;
       }
-
-      // Size of translation is height plus all incoming arguments including
-      // receiver.
-      int size = height + function->shared()->formal_parameter_count() + 1;
-      command_count += size;
-      for (int j = 0; j < size; ++j) {
-        opcode = static_cast<Translation::Opcode>(iterator.Next());
-        if (FLAG_print_code_verbose) {
-          PrintF(out, "%24s    %s ", "", Translation::StringFor(opcode));
-        }
-
-        if (opcode == Translation::DUPLICATE) {
-          opcode = static_cast<Translation::Opcode>(iterator.Next());
-          if (FLAG_print_code_verbose) {
-            PrintF(out, "%s ", Translation::StringFor(opcode));
-          }
-          --j;  // Two commands share the same frame index.
-        }
-
-        switch (opcode) {
-          case Translation::BEGIN:
-          case Translation::FRAME:
-          case Translation::DUPLICATE:
-            UNREACHABLE();
-            break;
-
-          case Translation::REGISTER: {
-            int reg_code = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
-            }
-            break;
-          }
-
-          case Translation::INT32_REGISTER: {
-            int reg_code = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
-            }
-            break;
-          }
-
-          case Translation::DOUBLE_REGISTER: {
-            int reg_code = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%s}",
-                     DoubleRegister::AllocationIndexToString(reg_code));
-            }
-            break;
-          }
-
-          case Translation::STACK_SLOT: {
-            int input_slot_index = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%d}", input_slot_index);
-            }
-            break;
-          }
-
-          case Translation::INT32_STACK_SLOT: {
-            int input_slot_index = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%d}", input_slot_index);
-            }
-            break;
-          }
-
-          case Translation::DOUBLE_STACK_SLOT: {
-            int input_slot_index = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{input=%d}", input_slot_index);
-            }
-            break;
-          }
-
-          case Translation::LITERAL: {
-            unsigned literal_index = iterator.Next();
-            if (FLAG_print_code_verbose)  {
-              PrintF(out, "{literal_id=%u}", literal_index);
-            }
-            break;
-          }
-
-          case Translation::ARGUMENTS_OBJECT:
-            break;
-        }
-        if (FLAG_print_code_verbose) PrintF(out, "\n");
-      }
+      PrintF(out, "\n");
     }
-    if (!FLAG_print_code_verbose) PrintF(out, "  %12d\n", command_count);
   }
 }
 
@@ -6516,8 +7111,6 @@
   }
 }
 
-#endif
-
 
 // Identify kind of code.
 const char* Code::Kind2String(Kind kind) {
@@ -6528,14 +7121,14 @@
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
     case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
-    case KEYED_EXTERNAL_ARRAY_LOAD_IC: return "KEYED_EXTERNAL_ARRAY_LOAD_IC";
     case STORE_IC: return "STORE_IC";
     case KEYED_STORE_IC: return "KEYED_STORE_IC";
-    case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
+    case UNARY_OP_IC: return "UNARY_OP_IC";
+    case BINARY_OP_IC: return "BINARY_OP_IC";
     case COMPARE_IC: return "COMPARE_IC";
+    case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
   }
   UNREACHABLE();
   return NULL;
@@ -6563,9 +7156,10 @@
     case FIELD: return "FIELD";
     case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
     case CALLBACKS: return "CALLBACKS";
+    case HANDLER: return "HANDLER";
     case INTERCEPTOR: return "INTERCEPTOR";
     case MAP_TRANSITION: return "MAP_TRANSITION";
-    case EXTERNAL_ARRAY_TRANSITION: return "EXTERNAL_ARRAY_TRANSITION";
+    case ELEMENTS_TRANSITION: return "ELEMENTS_TRANSITION";
     case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
     case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
   }
@@ -6594,7 +7188,7 @@
   if (name != NULL) {
     PrintF(out, "extra_ic_state = %s\n", name);
   } else {
-    PrintF(out, "etra_ic_state = %d\n", extra);
+    PrintF(out, "extra_ic_state = %d\n", extra);
   }
 }
 
@@ -6604,10 +7198,12 @@
   if (is_inline_cache_stub()) {
     PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
     PrintExtraICState(out, kind(), extra_ic_state());
-    PrintF(out, "ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
     if (ic_state() == MONOMORPHIC) {
       PrintF(out, "type = %s\n", PropertyType2String(type()));
     }
+    if (is_call_stub() || is_keyed_call_stub()) {
+      PrintF(out, "argc = %d\n", arguments_count());
+    }
   }
   if ((name != NULL) && (name[0] != '\0')) {
     PrintF(out, "name = %s\n", name);
@@ -6620,7 +7216,6 @@
   Disassembler::Decode(out, this);
   PrintF(out, "\n");
 
-#ifdef DEBUG
   if (kind() == FUNCTION) {
     DeoptimizationOutputData* data =
         DeoptimizationOutputData::cast(this->deoptimization_data());
@@ -6631,7 +7226,6 @@
     data->DeoptimizationInputDataPrint(out);
   }
   PrintF("\n");
-#endif
 
   if (kind() == OPTIMIZED_FUNCTION) {
     SafepointTable table(this);
@@ -6678,44 +7272,167 @@
 #endif  // ENABLE_DISASSEMBLER
 
 
+static void CopyFastElementsToFast(FixedArray* source,
+                                   FixedArray* destination,
+                                   WriteBarrierMode mode) {
+  uint32_t count = static_cast<uint32_t>(source->length());
+  for (uint32_t i = 0; i < count; ++i) {
+    destination->set(i, source->get(i), mode);
+  }
+}
+
+
+static void CopySlowElementsToFast(SeededNumberDictionary* source,
+                                   FixedArray* destination,
+                                   WriteBarrierMode mode) {
+  for (int i = 0; i < source->Capacity(); ++i) {
+    Object* key = source->KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t entry = static_cast<uint32_t>(key->Number());
+      destination->set(entry, source->ValueAt(i), mode);
+    }
+  }
+}
+
+
 MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
                                                         int length) {
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
+  // Allocate a new fast elements backing store.
+  FixedArray* new_elements = NULL;
+  { Object* object;
+    MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+    if (!maybe->ToObject(&object)) return maybe;
+    new_elements = FixedArray::cast(object);
+  }
+
+  // Find the new map to use for this object if there is a map change.
+  Map* new_map = NULL;
+  if (elements()->map() != heap->non_strict_arguments_elements_map()) {
+    Object* object;
+    MaybeObject* maybe = map()->GetFastElementsMap();
+    if (!maybe->ToObject(&object)) return maybe;
+    new_map = Map::cast(object);
+  }
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      CopySlowElementsToFast(SeededNumberDictionary::cast(elements()),
+                             new_elements,
+                             mode);
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      // The object's map and the parameter map are unchanged, the unaliased
+      // arguments are copied to the new backing store.
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        CopySlowElementsToFast(SeededNumberDictionary::cast(arguments),
+                               new_elements,
+                               mode);
+      } else {
+        CopyFastElementsToFast(arguments, new_elements, mode);
+      }
+      parameter_map->set(1, new_elements);
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+      uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+      // Fill out the new array with this content and array holes.
+      for (uint32_t i = 0; i < old_length; i++) {
+        if (!old_elements->is_the_hole(i)) {
+          Object* obj;
+          // Objects must be allocated in the old object space, since the
+          // overall number of HeapNumbers needed for the conversion might
+          // exceed the capacity of new space, and we would fail repeatedly
+          // trying to convert the FixedDoubleArray.
+          MaybeObject* maybe_value_object =
+              GetHeap()->AllocateHeapNumber(old_elements->get_scalar(i),
+                                            TENURED);
+          if (!maybe_value_object->ToObject(&obj)) return maybe_value_object;
+          // Force write barrier. It's not worth trying to exploit
+          // elems->GetWriteBarrierMode(), since it requires an
+          // AssertNoAllocation stack object that would have to be positioned
+          // after the HeapNumber allocation anyway.
+          new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
+        }
+      }
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+
+  // Update the length if necessary.
+  if (IsJSArray()) {
+    JSArray::cast(this)->set_length(Smi::FromInt(length));
+  }
+
+  return new_elements;
+}
+
+
+MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
+    int capacity,
+    int length) {
+  Heap* heap = GetHeap();
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasExternalArrayElements());
+
   Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
+  { MaybeObject* maybe_obj =
+        heap->AllocateUninitializedFixedDoubleArray(capacity);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  FixedArray* elems = FixedArray::cast(obj);
+  FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
 
-  { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+  { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
 
   AssertNoAllocation no_gc;
-  WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
-      FixedArray* old_elements = FixedArray::cast(elements());
-      uint32_t old_length = static_cast<uint32_t>(old_elements->length());
-      // Fill out the new array with this content and array holes.
-      for (uint32_t i = 0; i < old_length; i++) {
-        elems->set(i, old_elements->get(i), mode);
-      }
+      elems->Initialize(FixedArray::cast(elements()));
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      elems->Initialize(FixedDoubleArray::cast(elements()));
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = NumberDictionary::cast(elements());
-      for (int i = 0; i < dictionary->Capacity(); i++) {
-        Object* key = dictionary->KeyAt(i);
-        if (key->IsNumber()) {
-          uint32_t entry = static_cast<uint32_t>(key->Number());
-          elems->set(entry, dictionary->ValueAt(i), mode);
-        }
-      }
+      elems->Initialize(SeededNumberDictionary::cast(elements()));
       break;
     }
     default:
@@ -6723,7 +7440,9 @@
       break;
   }
 
+  ASSERT(new_map->has_fast_double_elements());
   set_map(new_map);
+  ASSERT(elems->IsFixedDoubleArray());
   set_elements(elems);
 
   if (IsJSArray()) {
@@ -6742,13 +7461,12 @@
 
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
+    case FAST_DOUBLE_ELEMENTS:
       // Make sure we never try to shrink dense arrays into sparse arrays.
-      ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
-                                   new_length);
-      Object* obj;
-      { MaybeObject* maybe_obj = NormalizeElements();
-        if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-      }
+      ASSERT(static_cast<uint32_t>(
+          FixedArrayBase::cast(elements())->length()) <= new_length);
+      MaybeObject* result = NormalizeElements();
+      if (result->IsFailure()) return result;
 
       // Update length for JSArrays.
       if (IsJSArray()) JSArray::cast(this)->set_length(len);
@@ -6763,7 +7481,18 @@
       }
       break;
     }
-    default:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNIMPLEMENTED();
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
       UNREACHABLE();
       break;
   }
@@ -6803,7 +7532,7 @@
 
 
 static Failure* ArrayLengthRangeError(Heap* heap) {
-  HandleScope scope;
+  HandleScope scope(heap->isolate());
   return heap->isolate()->Throw(
       *FACTORY->NewRangeError("invalid_array_length",
           HandleVector<Object>(NULL, 0)));
@@ -6819,21 +7548,58 @@
   if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
     const int value = Smi::cast(smi_length)->value();
     if (value < 0) return ArrayLengthRangeError(GetHeap());
-    switch (GetElementsKind()) {
-      case FAST_ELEMENTS: {
-        int old_capacity = FixedArray::cast(elements())->length();
+    ElementsKind elements_kind = GetElementsKind();
+    switch (elements_kind) {
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS: {
+        int old_capacity = FixedArrayBase::cast(elements())->length();
         if (value <= old_capacity) {
           if (IsJSArray()) {
             Object* obj;
-            { MaybeObject* maybe_obj = EnsureWritableFastElements();
+            if (elements_kind == FAST_ELEMENTS) {
+              MaybeObject* maybe_obj = EnsureWritableFastElements();
               if (!maybe_obj->ToObject(&obj)) return maybe_obj;
             }
-            int old_length = FastD2I(JSArray::cast(this)->length()->Number());
-            // NOTE: We may be able to optimize this by removing the
-            // last part of the elements backing storage array and
-            // setting the capacity to the new size.
-            for (int i = value; i < old_length; i++) {
-              FixedArray::cast(elements())->set_the_hole(i);
+            if (2 * value <= old_capacity) {
+              // If more than half the elements won't be used, trim the array.
+              if (value == 0) {
+                initialize_elements();
+              } else {
+                Address filler_start;
+                int filler_size;
+                if (GetElementsKind() == FAST_ELEMENTS) {
+                  FixedArray* fast_elements = FixedArray::cast(elements());
+                  fast_elements->set_length(value);
+                  filler_start = fast_elements->address() +
+                      FixedArray::OffsetOfElementAt(value);
+                  filler_size = (old_capacity - value) * kPointerSize;
+                } else {
+                  ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+                  FixedDoubleArray* fast_double_elements =
+                      FixedDoubleArray::cast(elements());
+                  fast_double_elements->set_length(value);
+                  filler_start = fast_double_elements->address() +
+                      FixedDoubleArray::OffsetOfElementAt(value);
+                  filler_size = (old_capacity - value) * kDoubleSize;
+                }
+                GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+              }
+            } else {
+              // Otherwise, fill the unused tail with holes.
+              int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+              if (GetElementsKind() == FAST_ELEMENTS) {
+                FixedArray* fast_elements = FixedArray::cast(elements());
+                for (int i = value; i < old_length; i++) {
+                  fast_elements->set_the_hole(i);
+                }
+              } else {
+                ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+                FixedDoubleArray* fast_double_elements =
+                    FixedDoubleArray::cast(elements());
+                for (int i = value; i < old_length; i++) {
+                  fast_double_elements->set_the_hole(i);
+                }
+              }
             }
             JSArray::cast(this)->set_length(Smi::cast(smi_length));
           }
@@ -6841,13 +7607,16 @@
         }
         int min = NewElementsCapacity(old_capacity);
         int new_capacity = value > min ? value : min;
-        if (new_capacity <= kMaxFastElementsLength ||
-            !ShouldConvertToSlowElements(new_capacity)) {
-          Object* obj;
-          { MaybeObject* maybe_obj =
-                SetFastElementsCapacityAndLength(new_capacity, value);
-            if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+        if (!ShouldConvertToSlowElements(new_capacity)) {
+          MaybeObject* result;
+          if (GetElementsKind() == FAST_ELEMENTS) {
+            result = SetFastElementsCapacityAndLength(new_capacity, value);
+          }  else {
+            ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+            result = SetFastDoubleElementsCapacityAndLength(new_capacity,
+                                                            value);
           }
+          if (result->IsFailure()) return result;
           return this;
         }
         break;
@@ -6872,7 +7641,16 @@
         }
         return this;
       }
-      default:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case EXTERNAL_PIXEL_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -6903,53 +7681,74 @@
 
 Object* Map::GetPrototypeTransition(Object* prototype) {
   FixedArray* cache = prototype_transitions();
-  int capacity = cache->length();
-  if (capacity == 0) return NULL;
-  int finger = Smi::cast(cache->get(0))->value();
-  for (int i = 1; i < finger; i += 2) {
-    if (cache->get(i) == prototype) return cache->get(i + 1);
+  int number_of_transitions = NumberOfProtoTransitions();
+  const int proto_offset =
+      kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
+  const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
+  const int step = kProtoTransitionElementsPerEntry;
+  for (int i = 0; i < number_of_transitions; i++) {
+    if (cache->get(proto_offset + i * step) == prototype) {
+      Object* map = cache->get(map_offset + i * step);
+      ASSERT(map->IsMap());
+      return map;
+    }
   }
   return NULL;
 }
 
 
 MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
+  ASSERT(map->IsMap());
+  ASSERT(HeapObject::cast(prototype)->map()->IsMap());
   // Don't cache prototype transition if this map is shared.
   if (is_shared() || !FLAG_cache_prototype_transitions) return this;
 
   FixedArray* cache = prototype_transitions();
 
-  int capacity = cache->length();
+  const int step = kProtoTransitionElementsPerEntry;
+  const int header = kProtoTransitionHeaderSize;
 
-  int finger = (capacity == 0) ? 1 : Smi::cast(cache->get(0))->value();
+  int capacity = (cache->length() - header) / step;
 
-  if (finger >= capacity) {
+  int transitions = NumberOfProtoTransitions() + 1;
+
+  if (transitions > capacity) {
     if (capacity > kMaxCachedPrototypeTransitions) return this;
 
     FixedArray* new_cache;
-    { MaybeObject* maybe_cache = heap()->AllocateFixedArray(finger * 2 + 1);
+    // Grow array by factor 2 over and above what we need.
+    { MaybeObject* maybe_cache =
+          heap()->AllocateFixedArray(transitions * 2 * step + header);
       if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
     }
 
-    for (int i = 1; i < capacity; i++) new_cache->set(i, cache->get(i));
+    for (int i = 0; i < capacity * step; i++) {
+      new_cache->set(i + header, cache->get(i + header));
+    }
     cache = new_cache;
     set_prototype_transitions(cache);
   }
 
-  cache->set(finger, prototype);
-  cache->set(finger + 1, map);
-  cache->set(0, Smi::FromInt(finger + 2));
+  int last = transitions - 1;
+
+  cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
+  cache->set(header + last * step + kProtoTransitionMapOffset, map);
+  SetNumberOfProtoTransitions(transitions);
 
   return cache;
 }
 
 
-MaybeObject* JSObject::SetPrototype(Object* value,
-                                    bool skip_hidden_prototypes) {
+MaybeObject* JSReceiver::SetPrototype(Object* value,
+                                      bool skip_hidden_prototypes) {
+#ifdef DEBUG
+  int size = Size();
+#endif
+
   Heap* heap = GetHeap();
   // Silently ignore the change if value is not a JSObject or null.
   // SpiderMonkey behaves this way.
-  if (!value->IsJSObject() && !value->IsNull()) return value;
+  if (!value->IsJSReceiver() && !value->IsNull()) return value;
 
   // From 8.6.2 Object Internal Methods
   // ...
@@ -6960,7 +7759,7 @@
   // or [[Extensible]] must not violate the invariants defined in the preceding
   // paragraph.
   if (!this->map()->is_extensible()) {
-    HandleScope scope;
+    HandleScope scope(heap->isolate());
     Handle<Object> handle(this, heap->isolate());
     return heap->isolate()->Throw(
         *FACTORY->NewTypeError("non_extensible_proto",
@@ -6974,13 +7773,13 @@
   for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
     if (JSObject::cast(pt) == this) {
       // Cycle detected.
-      HandleScope scope;
+      HandleScope scope(heap->isolate());
       return heap->isolate()->Throw(
           *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
     }
   }
 
-  JSObject* real_receiver = this;
+  JSReceiver* real_receiver = this;
 
   if (skip_hidden_prototypes) {
     // Find the first object in the chain whose prototype object is not
@@ -7016,12 +7815,12 @@
   real_receiver->set_map(Map::cast(new_map));
 
   heap->ClearInstanceofCache();
-
+  ASSERT(size == Size());
   return value;
 }
 
 
-bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
@@ -7034,6 +7833,17 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+        return true;
+      }
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
       if (index < static_cast<uint32_t>(pixels->length())) {
@@ -7047,7 +7857,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) {
         return true;
@@ -7056,12 +7867,12 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
-          != NumberDictionary::kNotFound) {
+          != SeededNumberDictionary::kNotFound) {
         return true;
       }
       break;
     }
-    default:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
       UNREACHABLE();
       break;
   }
@@ -7075,14 +7886,14 @@
 }
 
 
-bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
   Isolate* isolate = GetIsolate();
   // Make sure that the top context does not change when doing
   // callbacks or interceptor calls.
   AssertNoContextChange ncc;
   HandleScope scope(isolate);
   Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
-  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSReceiver> receiver_handle(receiver);
   Handle<JSObject> holder_handle(this);
   CustomArguments args(isolate, interceptor->data(), receiver, this);
   v8::AccessorInfo info(args.end());
@@ -7158,6 +7969,17 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+        return FAST_ELEMENT;
+      }
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
       if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
@@ -7169,28 +7991,72 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
       break;
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index) !=
-              NumberDictionary::kNotFound) {
+          SeededNumberDictionary::kNotFound) {
         return DICTIONARY_ELEMENT;
       }
       break;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      // Aliased parameters and non-aliased elements in a fast backing store
+      // behave as FAST_ELEMENT.  Non-aliased elements in a dictionary
+      // backing store behave as DICTIONARY_ELEMENT.
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      uint32_t length = parameter_map->length();
+      Object* probe =
+          index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+      if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+      // If not aliased, check the arguments.
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        SeededNumberDictionary* dictionary =
+            SeededNumberDictionary::cast(arguments);
+        if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
+          return DICTIONARY_ELEMENT;
+        }
+      } else {
+        length = arguments->length();
+        probe = (index < length) ? arguments->get(index) : NULL;
+        if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+      }
       break;
+    }
   }
 
   return UNDEFINED_ELEMENT;
 }
 
 
-bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementInElements(FixedArray* elements,
+                                    ElementsKind kind,
+                                    uint32_t index) {
+  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  if (kind == FAST_ELEMENTS) {
+    int length = IsJSArray()
+        ? Smi::cast(JSArray::cast(this)->length())->value()
+        : elements->length();
+    if (index < static_cast<uint32_t>(length) &&
+        !elements->get(index)->IsTheHole()) {
+      return true;
+    }
+  } else {
+    if (SeededNumberDictionary::cast(elements)->FindEntry(index) !=
+        SeededNumberDictionary::kNotFound) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
@@ -7205,7 +8071,8 @@
     return HasElementWithInterceptor(receiver, index);
   }
 
-  switch (GetElementsKind()) {
+  ElementsKind kind = GetElementsKind();
+  switch (kind) {
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -7215,6 +8082,15 @@
           !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index)) return true;
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
       if (index < static_cast<uint32_t>(pixels->length())) {
@@ -7228,7 +8104,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) {
         return true;
@@ -7237,14 +8114,24 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
-          != NumberDictionary::kNotFound) {
+          != SeededNumberDictionary::kNotFound) {
         return true;
       }
       break;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      uint32_t length = parameter_map->length();
+      Object* probe =
+          (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+      if (probe != NULL && !probe->IsTheHole()) return true;
+
+      // Not a mapped parameter, check the arguments.
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+      if (HasElementInElements(arguments, kind, index)) return true;
       break;
+    }
   }
 
   // Handle [] on String objects.
@@ -7299,18 +8186,18 @@
                                               uint32_t index,
                                               Object* holder) {
   Isolate* isolate = GetIsolate();
-  ASSERT(!structure->IsProxy());
+  ASSERT(!structure->IsForeign());
 
   // api style callbacks.
   if (structure->IsAccessorInfo()) {
-    AccessorInfo* data = AccessorInfo::cast(structure);
+    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
     Object* fun_obj = data->getter();
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
     Handle<JSObject> self(JSObject::cast(receiver));
     Handle<JSObject> holder_handle(JSObject::cast(holder));
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
-    Handle<String> key(isolate->factory()->NumberToString(number));
+    Handle<String> key = isolate->factory()->NumberToString(number);
     LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
     CustomArguments args(isolate, data->data(), *self, *holder_handle);
     v8::AccessorInfo info(args.end());
@@ -7344,7 +8231,8 @@
 MaybeObject* JSObject::SetElementWithCallback(Object* structure,
                                               uint32_t index,
                                               Object* value,
-                                              JSObject* holder) {
+                                              JSObject* holder,
+                                              StrictModeFlag strict_mode) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
 
@@ -7354,20 +8242,22 @@
   Handle<Object> value_handle(value, isolate);
 
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  ASSERT(!structure->IsProxy());
+  ASSERT(!structure->IsForeign());
 
   if (structure->IsAccessorInfo()) {
     // api style callbacks
-    AccessorInfo* data = AccessorInfo::cast(structure);
+    Handle<JSObject> self(this);
+    Handle<JSObject> holder_handle(JSObject::cast(holder));
+    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
     Object* call_obj = data->setter();
     v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
     if (call_fun == NULL) return value;
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
     Handle<String> key(isolate->factory()->NumberToString(number));
-    LOG(isolate, ApiNamedPropertyAccess("store", this, *key));
-    CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
+    LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+    CustomArguments args(isolate, data->data(), *self, *holder_handle);
     v8::AccessorInfo info(args.end());
     {
       // Leaving JavaScript.
@@ -7381,10 +8271,13 @@
   }
 
   if (structure->IsFixedArray()) {
-    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+    Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
     if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+     return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
     } else {
+      if (strict_mode == kNonStrictMode) {
+        return value;
+      }
       Handle<Object> holder_handle(holder, isolate);
       Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
       Handle<Object> args[2] = { key, holder_handle };
@@ -7399,6 +8292,30 @@
 }
 
 
+bool JSObject::HasFastArgumentsElements() {
+  Heap* heap = GetHeap();
+  if (!elements()->IsFixedArray()) return false;
+  FixedArray* elements = FixedArray::cast(this->elements());
+  if (elements->map() != heap->non_strict_arguments_elements_map()) {
+    return false;
+  }
+  FixedArray* arguments = FixedArray::cast(elements->get(1));
+  return !arguments->IsDictionary();
+}
+
+
+bool JSObject::HasDictionaryArgumentsElements() {
+  Heap* heap = GetHeap();
+  if (!elements()->IsFixedArray()) return false;
+  FixedArray* elements = FixedArray::cast(this->elements());
+  if (elements->map() != heap->non_strict_arguments_elements_map()) {
+    return false;
+  }
+  FixedArray* arguments = FixedArray::cast(elements->get(1));
+  return arguments->IsDictionary();
+}
+
+
 // Adding n elements in fast case is O(n*n).
 // Note: revisit design to have dual undefined values to capture absent
 // elements.
@@ -7406,27 +8323,214 @@
                                       Object* value,
                                       StrictModeFlag strict_mode,
                                       bool check_prototype) {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastElements() || HasFastArgumentsElements());
 
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj = EnsureWritableFastElements();
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+  FixedArray* backing_store = FixedArray::cast(elements());
+  if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
+    backing_store = FixedArray::cast(backing_store->get(1));
+  } else {
+    Object* writable;
+    MaybeObject* maybe = EnsureWritableFastElements();
+    if (!maybe->ToObject(&writable)) return maybe;
+    backing_store = FixedArray::cast(writable);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  uint32_t elms_length = static_cast<uint32_t>(elms->length());
+  uint32_t length = static_cast<uint32_t>(backing_store->length());
 
   if (check_prototype &&
-      (index >= elms_length || elms->get(index)->IsTheHole())) {
+      (index >= length || backing_store->get(index)->IsTheHole())) {
     bool found;
-    MaybeObject* result =
-        SetElementWithCallbackSetterInPrototypes(index, value, &found);
+    MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+                                                                   value,
+                                                                   &found,
+                                                                   strict_mode);
     if (found) return result;
   }
 
+  // Check whether there is extra space in fixed array.
+  if (index < length) {
+    backing_store->set(index, value);
+    if (IsJSArray()) {
+      // Update the length of the array if needed.
+      uint32_t array_length = 0;
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+      if (index >= array_length) {
+        JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+      }
+    }
+    return value;
+  }
 
-  // Check whether there is extra space in fixed array..
+  // Allow gap in fast case.
+  if ((index - length) < kMaxGap) {
+    // Try allocating extra space.
+    int new_capacity = NewElementsCapacity(index + 1);
+    if (!ShouldConvertToSlowElements(new_capacity)) {
+      ASSERT(static_cast<uint32_t>(new_capacity) > index);
+      Object* new_elements;
+      MaybeObject* maybe =
+          SetFastElementsCapacityAndLength(new_capacity, index + 1);
+      if (!maybe->ToObject(&new_elements)) return maybe;
+      FixedArray::cast(new_elements)->set(index, value);
+      return value;
+    }
+  }
+
+  // Otherwise default to slow case.
+  MaybeObject* result = NormalizeElements();
+  if (result->IsFailure()) return result;
+  return SetDictionaryElement(index, value, strict_mode, check_prototype);
+}
+
+
+MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
+                                            Object* value,
+                                            StrictModeFlag strict_mode,
+                                            bool check_prototype) {
+  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+  Isolate* isolate = GetIsolate();
+  Heap* heap = isolate->heap();
+
+  // Insert element in the dictionary.
+  FixedArray* elements = FixedArray::cast(this->elements());
+  bool is_arguments =
+      (elements->map() == heap->non_strict_arguments_elements_map());
+  SeededNumberDictionary* dictionary = NULL;
+  if (is_arguments) {
+    dictionary = SeededNumberDictionary::cast(elements->get(1));
+  } else {
+    dictionary = SeededNumberDictionary::cast(elements);
+  }
+
+  int entry = dictionary->FindEntry(index);
+  if (entry != SeededNumberDictionary::kNotFound) {
+    Object* element = dictionary->ValueAt(entry);
+    PropertyDetails details = dictionary->DetailsAt(entry);
+    if (details.type() == CALLBACKS) {
+      return SetElementWithCallback(element, index, value, this, strict_mode);
+    } else {
+      dictionary->UpdateMaxNumberKey(index);
+      // If put fails in strict mode, throw an exception.
+      if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) {
+        Handle<Object> holder(this);
+        Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+        Handle<Object> args[2] = { number, holder };
+        Handle<Object> error =
+            isolate->factory()->NewTypeError("strict_read_only_property",
+                                             HandleVector(args, 2));
+        return isolate->Throw(*error);
+      }
+    }
+  } else {
+    // Index not already used. Look for an accessor in the prototype chain.
+    if (check_prototype) {
+      bool found;
+      MaybeObject* result =
+          SetElementWithCallbackSetterInPrototypes(
+              index, value, &found, strict_mode);
+      if (found) return result;
+    }
+    // When we set the is_extensible flag to false we always force the
+    // element into dictionary mode (and force them to stay there).
+    if (!map()->is_extensible()) {
+      if (strict_mode == kNonStrictMode) {
+        return isolate->heap()->undefined_value();
+      } else {
+        Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+        Handle<String> name = isolate->factory()->NumberToString(number);
+        Handle<Object> args[1] = { name };
+        Handle<Object> error =
+            isolate->factory()->NewTypeError("object_not_extensible",
+                                             HandleVector(args, 1));
+        return isolate->Throw(*error);
+      }
+    }
+    FixedArrayBase* new_dictionary;
+    MaybeObject* maybe = dictionary->AtNumberPut(index, value);
+    if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
+    if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+      if (is_arguments) {
+        elements->set(1, new_dictionary);
+      } else {
+        set_elements(new_dictionary);
+      }
+      dictionary = SeededNumberDictionary::cast(new_dictionary);
+    }
+  }
+
+  // Update the array length if this JSObject is an array.
+  if (IsJSArray()) {
+    MaybeObject* result =
+        JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+    if (result->IsFailure()) return result;
+  }
+
+  // Attempt to put this object back in fast case.
+  if (ShouldConvertToFastElements()) {
+    uint32_t new_length = 0;
+    if (IsJSArray()) {
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+    } else {
+      new_length = dictionary->max_number_key() + 1;
+    }
+    MaybeObject* result = CanConvertToFastDoubleElements()
+        ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
+        : SetFastElementsCapacityAndLength(new_length, new_length);
+    if (result->IsFailure()) return result;
+#ifdef DEBUG
+    if (FLAG_trace_normalization) {
+      PrintF("Object elements are fast case again:\n");
+      Print();
+    }
+#endif
+  }
+  return value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
+    uint32_t index,
+    Object* value,
+    StrictModeFlag strict_mode,
+    bool check_prototype) {
+  ASSERT(HasFastDoubleElements());
+
+  FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+  uint32_t elms_length = static_cast<uint32_t>(elms->length());
+
+  // If storing to an element that isn't in the array, pass the store request
+  // up the prototype chain before storing in the receiver's elements.
+  if (check_prototype &&
+      (index >= elms_length || elms->is_the_hole(index))) {
+    bool found;
+    MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+                                                                   value,
+                                                                   &found,
+                                                                   strict_mode);
+    if (found) return result;
+  }
+
+  // If the value object is not a heap number, switch to fast elements and try
+  // again.
+  bool value_is_smi = value->IsSmi();
+  if (!value->IsNumber()) {
+    Object* obj;
+    uint32_t length = elms_length;
+    if (IsJSArray()) {
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+    }
+    MaybeObject* maybe_obj =
+        SetFastElementsCapacityAndLength(elms_length, length);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    return SetFastElement(index, value, strict_mode, check_prototype);
+  }
+
+  double double_value = value_is_smi
+      ? static_cast<double>(Smi::cast(value)->value())
+      : HeapNumber::cast(value)->value();
+
+  // Check whether there is extra space in the fixed array.
   if (index < elms_length) {
-    elms->set(index, value);
+    elms->set(index, double_value);
     if (IsJSArray()) {
       // Update the length of the array if needed.
       uint32_t array_length = 0;
@@ -7442,20 +8546,23 @@
   if ((index - elms_length) < kMaxGap) {
     // Try allocating extra space.
     int new_capacity = NewElementsCapacity(index+1);
-    if (new_capacity <= kMaxFastElementsLength ||
-        !ShouldConvertToSlowElements(new_capacity)) {
+    if (!ShouldConvertToSlowElements(new_capacity)) {
       ASSERT(static_cast<uint32_t>(new_capacity) > index);
       Object* obj;
       { MaybeObject* maybe_obj =
-            SetFastElementsCapacityAndLength(new_capacity, index + 1);
+            SetFastDoubleElementsCapacityAndLength(new_capacity,
+                                                   index + 1);
         if (!maybe_obj->ToObject(&obj)) return maybe_obj;
       }
-      FixedArray::cast(elements())->set(index, value);
+      FixedDoubleArray::cast(elements())->set(index, double_value);
       return value;
     }
   }
 
   // Otherwise default to slow case.
+  ASSERT(HasFastDoubleElements());
+  ASSERT(map()->has_fast_double_elements());
+  ASSERT(elements()->IsFixedDoubleArray());
   Object* obj;
   { MaybeObject* maybe_obj = NormalizeElements();
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -7473,7 +8580,7 @@
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
     if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
-      HandleScope scope;
+      HandleScope scope(heap->isolate());
       Handle<Object> value_handle(value);
       heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
       return *value_handle;
@@ -7512,8 +8619,9 @@
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
     case FAST_ELEMENTS:
-      // Fast case.
       return SetFastElement(index, value, strict_mode, check_prototype);
+    case FAST_DOUBLE_ELEMENTS:
+      return SetFastDoubleElement(index, value, strict_mode, check_prototype);
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
       return pixels->SetValue(index, value);
@@ -7549,101 +8657,34 @@
       ExternalFloatArray* array = ExternalFloatArray::cast(elements());
       return array->SetValue(index, value);
     }
-    case DICTIONARY_ELEMENTS: {
-      // Insert element in the dictionary.
-      FixedArray* elms = FixedArray::cast(elements());
-      NumberDictionary* dictionary = NumberDictionary::cast(elms);
-
-      int entry = dictionary->FindEntry(index);
-      if (entry != NumberDictionary::kNotFound) {
-        Object* element = dictionary->ValueAt(entry);
-        PropertyDetails details = dictionary->DetailsAt(entry);
-        if (details.type() == CALLBACKS) {
-          return SetElementWithCallback(element, index, value, this);
-        } else {
-          dictionary->UpdateMaxNumberKey(index);
-          // If put fails instrict mode, throw exception.
-          if (!dictionary->ValueAtPut(entry, value) &&
-              strict_mode == kStrictMode) {
-            Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
-            Handle<Object> holder(this);
-            Handle<Object> args[2] = { number, holder };
-            return isolate->Throw(
-                *isolate->factory()->NewTypeError("strict_read_only_property",
-                                                  HandleVector(args, 2)));
-          }
-        }
-      } else {
-        // Index not already used. Look for an accessor in the prototype chain.
-        if (check_prototype) {
-          bool found;
-          MaybeObject* result =
-              // Strict mode not needed. No-setter case already handled.
-              SetElementWithCallbackSetterInPrototypes(index, value, &found);
-          if (found) return result;
-        }
-        // When we set the is_extensible flag to false we always force
-        // the element into dictionary mode (and force them to stay there).
-        if (!map()->is_extensible()) {
-          if (strict_mode == kNonStrictMode) {
-            return isolate->heap()->undefined_value();
-          } else {
-            Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
-            Handle<String> index_string(
-                isolate->factory()->NumberToString(number));
-            Handle<Object> args[1] = { index_string };
-            return isolate->Throw(
-                *isolate->factory()->NewTypeError("object_not_extensible",
-                                                  HandleVector(args, 1)));
-          }
-        }
-        Object* result;
-        { MaybeObject* maybe_result = dictionary->AtNumberPut(index, value);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
-        if (elms != FixedArray::cast(result)) {
-          set_elements(FixedArray::cast(result));
-        }
-      }
-
-      // Update the array length if this JSObject is an array.
-      if (IsJSArray()) {
-        JSArray* array = JSArray::cast(this);
-        Object* return_value;
-        { MaybeObject* maybe_return_value =
-              array->JSArrayUpdateLengthFromIndex(index, value);
-          if (!maybe_return_value->ToObject(&return_value)) {
-            return maybe_return_value;
-          }
-        }
-      }
-
-      // Attempt to put this object back in fast case.
-      if (ShouldConvertToFastElements()) {
-        uint32_t new_length = 0;
-        if (IsJSArray()) {
-          CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
-        } else {
-          new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
-        }
-        Object* obj;
-        { MaybeObject* maybe_obj =
-              SetFastElementsCapacityAndLength(new_length, new_length);
-          if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-        }
-#ifdef DEBUG
-        if (FLAG_trace_normalization) {
-          PrintF("Object elements are fast case again:\n");
-          Print();
-        }
-#endif
-      }
-
-      return value;
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
+      return array->SetValue(index, value);
     }
-    default:
-      UNREACHABLE();
-      break;
+    case DICTIONARY_ELEMENTS:
+      return SetDictionaryElement(index, value, strict_mode, check_prototype);
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      uint32_t length = parameter_map->length();
+      Object* probe =
+          (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+      if (probe != NULL && !probe->IsTheHole()) {
+        Context* context = Context::cast(parameter_map->get(0));
+        int context_index = Smi::cast(probe)->value();
+        ASSERT(!context->get(context_index)->IsTheHole());
+        context->set(context_index, value);
+        return value;
+      } else {
+        // Object is not mapped, defer to the arguments.
+        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+        if (arguments->IsDictionary()) {
+          return SetDictionaryElement(index, value, strict_mode,
+                                      check_prototype);
+        } else {
+          return SetFastElement(index, value, strict_mode, check_prototype);
+        }
+      }
+    }
   }
   // All possible cases have been handled above. Add a return to avoid the
   // complaints from the compiler.
@@ -7670,61 +8711,6 @@
 }
 
 
-MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
-                                                 uint32_t index) {
-  // Get element works for both JSObject and JSArray since
-  // JSArray::length cannot change.
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      FixedArray* elms = FixedArray::cast(elements());
-      if (index < static_cast<uint32_t>(elms->length())) {
-        Object* value = elms->get(index);
-        if (!value->IsTheHole()) return value;
-      }
-      break;
-    }
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
-      MaybeObject* maybe_value = GetExternalElement(index);
-      Object* value;
-      if (!maybe_value->ToObject(&value)) return maybe_value;
-      if (!value->IsUndefined()) return value;
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = element_dictionary();
-      int entry = dictionary->FindEntry(index);
-      if (entry != NumberDictionary::kNotFound) {
-        Object* element = dictionary->ValueAt(entry);
-        PropertyDetails details = dictionary->DetailsAt(entry);
-        if (details.type() == CALLBACKS) {
-          return GetElementWithCallback(receiver,
-                                        element,
-                                        index,
-                                        this);
-        }
-        return element;
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // Continue searching via the prototype chain.
-  Object* pt = GetPrototype();
-  if (pt->IsNull()) return GetHeap()->undefined_value();
-  return pt->GetElementWithReceiver(receiver, index);
-}
-
-
 MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
                                                  uint32_t index) {
   Isolate* isolate = GetIsolate();
@@ -7752,229 +8738,157 @@
     if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
   }
 
-  MaybeObject* raw_result =
-      holder_handle->GetElementPostInterceptor(*this_handle, index);
+  Heap* heap = holder_handle->GetHeap();
+  ElementsAccessor* handler = holder_handle->GetElementsAccessor();
+  MaybeObject* raw_result = handler->Get(holder_handle->elements(),
+                                         index,
+                                         *holder_handle,
+                                         *this_handle);
+  if (raw_result != heap->the_hole_value()) return raw_result;
+
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
-  return raw_result;
-}
 
-
-MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
-                                              uint32_t index) {
-  // Check access rights if needed.
-  if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
-      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
-      return heap->undefined_value();
-    }
-  }
-
-  if (HasIndexedInterceptor()) {
-    return GetElementWithInterceptor(receiver, index);
-  }
-
-  // Get element works for both JSObject and JSArray since
-  // JSArray::length cannot change.
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      FixedArray* elms = FixedArray::cast(elements());
-      if (index < static_cast<uint32_t>(elms->length())) {
-        Object* value = elms->get(index);
-        if (!value->IsTheHole()) return value;
-      }
-      break;
-    }
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
-      MaybeObject* maybe_value = GetExternalElement(index);
-      Object* value;
-      if (!maybe_value->ToObject(&value)) return maybe_value;
-      if (!value->IsUndefined()) return value;
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = element_dictionary();
-      int entry = dictionary->FindEntry(index);
-      if (entry != NumberDictionary::kNotFound) {
-        Object* element = dictionary->ValueAt(entry);
-        PropertyDetails details = dictionary->DetailsAt(entry);
-        if (details.type() == CALLBACKS) {
-          return GetElementWithCallback(receiver,
-                                        element,
-                                        index,
-                                        this);
-        }
-        return element;
-      }
-      break;
-    }
-  }
-
-  Object* pt = GetPrototype();
-  Heap* heap = GetHeap();
+  Object* pt = holder_handle->GetPrototype();
   if (pt == heap->null_value()) return heap->undefined_value();
-  return pt->GetElementWithReceiver(receiver, index);
-}
-
-
-MaybeObject* JSObject::GetExternalElement(uint32_t index) {
-  // Get element works for both JSObject and JSArray since
-  // JSArray::length cannot change.
-  switch (GetElementsKind()) {
-    case EXTERNAL_PIXEL_ELEMENTS: {
-      ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
-      if (index < static_cast<uint32_t>(pixels->length())) {
-        uint8_t value = pixels->get(index);
-        return Smi::FromInt(value);
-      }
-      break;
-    }
-    case EXTERNAL_BYTE_ELEMENTS: {
-      ExternalByteArray* array = ExternalByteArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        int8_t value = array->get(index);
-        return Smi::FromInt(value);
-      }
-      break;
-    }
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
-      ExternalUnsignedByteArray* array =
-          ExternalUnsignedByteArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        uint8_t value = array->get(index);
-        return Smi::FromInt(value);
-      }
-      break;
-    }
-    case EXTERNAL_SHORT_ELEMENTS: {
-      ExternalShortArray* array = ExternalShortArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        int16_t value = array->get(index);
-        return Smi::FromInt(value);
-      }
-      break;
-    }
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
-      ExternalUnsignedShortArray* array =
-          ExternalUnsignedShortArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        uint16_t value = array->get(index);
-        return Smi::FromInt(value);
-      }
-      break;
-    }
-    case EXTERNAL_INT_ELEMENTS: {
-      ExternalIntArray* array = ExternalIntArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        int32_t value = array->get(index);
-        return GetHeap()->NumberFromInt32(value);
-      }
-      break;
-    }
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-      ExternalUnsignedIntArray* array =
-          ExternalUnsignedIntArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        uint32_t value = array->get(index);
-        return GetHeap()->NumberFromUint32(value);
-      }
-      break;
-    }
-    case EXTERNAL_FLOAT_ELEMENTS: {
-      ExternalFloatArray* array = ExternalFloatArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        float value = array->get(index);
-        return GetHeap()->AllocateHeapNumber(value);
-      }
-      break;
-    }
-    case FAST_ELEMENTS:
-    case DICTIONARY_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-  return GetHeap()->undefined_value();
+  return pt->GetElementWithReceiver(*this_handle, index);
 }
 
 
 bool JSObject::HasDenseElements() {
   int capacity = 0;
-  int number_of_elements = 0;
+  int used = 0;
+  GetElementsCapacityAndUsage(&capacity, &used);
+  return (capacity == 0) || (used > (capacity / 2));
+}
 
+
+void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
+  *capacity = 0;
+  *used = 0;
+
+  FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
+  FixedArray* backing_store = NULL;
   switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      FixedArray* elms = FixedArray::cast(elements());
-      capacity = elms->length();
-      for (int i = 0; i < capacity; i++) {
-        if (!elms->get(i)->IsTheHole()) number_of_elements++;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      backing_store_base =
+          FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
+      backing_store = FixedArray::cast(backing_store_base);
+      if (backing_store->IsDictionary()) {
+        SeededNumberDictionary* dictionary =
+            SeededNumberDictionary::cast(backing_store);
+        *capacity = dictionary->Capacity();
+        *used = dictionary->NumberOfElements();
+        break;
+      }
+      // Fall through.
+    case FAST_ELEMENTS:
+      backing_store = FixedArray::cast(backing_store_base);
+      *capacity = backing_store->length();
+      for (int i = 0; i < *capacity; ++i) {
+        if (!backing_store->get(i)->IsTheHole()) ++(*used);
+      }
+      break;
+    case DICTIONARY_ELEMENTS: {
+      SeededNumberDictionary* dictionary =
+          SeededNumberDictionary::cast(FixedArray::cast(elements()));
+      *capacity = dictionary->Capacity();
+      *used = dictionary->NumberOfElements();
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+      *capacity = elms->length();
+      for (int i = 0; i < *capacity; i++) {
+        if (!elms->is_the_hole(i)) ++(*used);
       }
       break;
     }
-    case EXTERNAL_PIXEL_ELEMENTS:
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
     case EXTERNAL_SHORT_ELEMENTS:
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
-      return true;
-    }
-    case DICTIONARY_ELEMENTS: {
-      NumberDictionary* dictionary = NumberDictionary::cast(elements());
-      capacity = dictionary->Capacity();
-      number_of_elements = dictionary->NumberOfElements();
-      break;
-    }
-    default:
-      UNREACHABLE();
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      // External arrays are considered 100% used.
+      ExternalArray* external_array = ExternalArray::cast(elements());
+      *capacity = external_array->length();
+      *used = external_array->length();
       break;
   }
-
-  if (capacity == 0) return true;
-  return (number_of_elements > (capacity / 2));
 }
 
 
 bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
-  ASSERT(HasFastElements());
-  // Keep the array in fast case if the current backing storage is
-  // almost filled and if the new capacity is no more than twice the
-  // old capacity.
-  int elements_length = FixedArray::cast(elements())->length();
-  return !HasDenseElements() || ((new_capacity / 2) > elements_length);
+  STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
+                kMaxUncheckedFastElementsLength);
+  if (new_capacity <= kMaxUncheckedOldFastElementsLength ||
+      (new_capacity <= kMaxUncheckedFastElementsLength &&
+       GetHeap()->InNewSpace(this))) {
+    return false;
+  }
+  // If the fast-case backing storage takes up roughly three times as
+  // much space (in machine words) as a dictionary backing storage
+  // would, the object should have slow elements.
+  int old_capacity = 0;
+  int used_elements = 0;
+  GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+  int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
+      SeededNumberDictionary::kEntrySize;
+  return 3 * dictionary_size <= new_capacity;
 }
 
 
 bool JSObject::ShouldConvertToFastElements() {
-  ASSERT(HasDictionaryElements());
-  NumberDictionary* dictionary = NumberDictionary::cast(elements());
+  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   // If the elements are sparse, we should not go back to fast case.
   if (!HasDenseElements()) return false;
-  // If an element has been added at a very high index in the elements
-  // dictionary, we cannot go back to fast case.
-  if (dictionary->requires_slow_elements()) return false;
   // An object requiring access checks is never allowed to have fast
   // elements.  If it had fast elements we would skip security checks.
   if (IsAccessCheckNeeded()) return false;
-  // If the dictionary backing storage takes up roughly half as much
-  // space as a fast-case backing storage would the array should have
-  // fast elements.
-  uint32_t length = 0;
-  if (IsJSArray()) {
-    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+
+  FixedArray* elements = FixedArray::cast(this->elements());
+  SeededNumberDictionary* dictionary = NULL;
+  if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+    dictionary = SeededNumberDictionary::cast(elements->get(1));
   } else {
-    length = dictionary->max_number_key();
+    dictionary = SeededNumberDictionary::cast(elements);
   }
-  return static_cast<uint32_t>(dictionary->Capacity()) >=
-      (length / (2 * NumberDictionary::kEntrySize));
+  // If an element has been added at a very high index in the elements
+  // dictionary, we cannot go back to fast case.
+  if (dictionary->requires_slow_elements()) return false;
+  // If the dictionary backing storage takes up roughly half as much
+  // space (in machine words) as a fast-case backing storage would,
+  // the object should have fast elements.
+  uint32_t array_size = 0;
+  if (IsJSArray()) {
+    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_size));
+  } else {
+    array_size = dictionary->max_number_key();
+  }
+  uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+      SeededNumberDictionary::kEntrySize;
+  return 2 * dictionary_size >= array_size;
+}
+
+
+bool JSObject::CanConvertToFastDoubleElements() {
+  if (FLAG_unbox_double_arrays) {
+    ASSERT(HasDictionaryElements());
+    SeededNumberDictionary* dictionary =
+        SeededNumberDictionary::cast(elements());
+    for (int i = 0; i < dictionary->Capacity(); i++) {
+      Object* key = dictionary->KeyAt(i);
+      if (key->IsNumber()) {
+        if (!dictionary->ValueAt(i)->IsNumber()) return false;
+      }
+    }
+    return true;
+  } else {
+    return false;
+  }
 }
 
 
@@ -8042,7 +8956,7 @@
 
 
 MaybeObject* JSObject::GetPropertyPostInterceptor(
-    JSObject* receiver,
+    JSReceiver* receiver,
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
@@ -8060,7 +8974,7 @@
 
 
 MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
-    JSObject* receiver,
+    JSReceiver* receiver,
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
@@ -8074,13 +8988,13 @@
 
 
 MaybeObject* JSObject::GetPropertyWithInterceptor(
-    JSObject* receiver,
+    JSReceiver* receiver,
     String* name,
     PropertyAttributes* attributes) {
   Isolate* isolate = GetIsolate();
   InterceptorInfo* interceptor = GetNamedInterceptor();
   HandleScope scope(isolate);
-  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSReceiver> receiver_handle(receiver);
   Handle<JSObject> holder_handle(this);
   Handle<String> name_handle(name);
 
@@ -8151,6 +9065,15 @@
       return (index < length) &&
           !FixedArray::cast(elements())->get(index)->IsTheHole();
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>(
+              Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      return (index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index);
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
       return index < static_cast<uint32_t>(pixels->length());
@@ -8161,16 +9084,17 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       return index < static_cast<uint32_t>(array->length());
     }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
-          != NumberDictionary::kNotFound;
+          != SeededNumberDictionary::kNotFound;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNIMPLEMENTED();
       break;
   }
   // All possibilities have been handled above already.
@@ -8343,7 +9267,8 @@
     ASSERT(storage->length() >= index);
   } else {
     property_dictionary()->CopyKeysTo(storage,
-                                      index);
+                                      index,
+                                      StringDictionary::UNSORTED);
   }
 }
 
@@ -8386,6 +9311,21 @@
       ASSERT(!storage || storage->length() >= counter);
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      int length = IsJSArray() ?
+          Smi::cast(JSArray::cast(this)->length())->value() :
+          FixedDoubleArray::cast(elements())->length();
+      for (int i = 0; i < length; i++) {
+        if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
+          if (storage != NULL) {
+            storage->set(counter, Smi::FromInt(i));
+          }
+          counter++;
+        }
+      }
+      ASSERT(!storage || storage->length() >= counter);
+      break;
+    }
     case EXTERNAL_PIXEL_ELEMENTS: {
       int length = ExternalPixelArray::cast(elements())->length();
       while (counter < length) {
@@ -8403,7 +9343,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       int length = ExternalArray::cast(elements())->length();
       while (counter < length) {
         if (storage != NULL) {
@@ -8416,14 +9357,54 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (storage != NULL) {
-        element_dictionary()->CopyKeysTo(storage, filter);
+        element_dictionary()->CopyKeysTo(storage,
+                                         filter,
+                                         SeededNumberDictionary::SORTED);
       }
-      counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+      counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
       break;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      int mapped_length = parameter_map->length() - 2;
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        // Copy the keys from arguments first, because Dictionary::CopyKeysTo
+        // will insert in storage starting at index 0.
+        SeededNumberDictionary* dictionary =
+            SeededNumberDictionary::cast(arguments);
+        if (storage != NULL) {
+          dictionary->CopyKeysTo(
+              storage, filter, SeededNumberDictionary::UNSORTED);
+        }
+        counter += dictionary->NumberOfElementsFilterAttributes(filter);
+        for (int i = 0; i < mapped_length; ++i) {
+          if (!parameter_map->get(i + 2)->IsTheHole()) {
+            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+            ++counter;
+          }
+        }
+        if (storage != NULL) storage->SortPairs(storage, counter);
+
+      } else {
+        int backing_length = arguments->length();
+        int i = 0;
+        for (; i < mapped_length; ++i) {
+          if (!parameter_map->get(i + 2)->IsTheHole()) {
+            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+            ++counter;
+          } else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
+            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+            ++counter;
+          }
+        }
+        for (; i < backing_length; ++i) {
+          if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+          ++counter;
+        }
+      }
       break;
+    }
   }
 
   if (this->IsJSValue()) {
@@ -8591,8 +9572,8 @@
 // Utf8SymbolKey carries a vector of chars as key.
 class Utf8SymbolKey : public HashTableKey {
  public:
-  explicit Utf8SymbolKey(Vector<const char> string)
-      : string_(string), hash_field_(0) { }
+  explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed)
+      : string_(string), hash_field_(0), seed_(seed) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsEqualTo(string_);
@@ -8603,7 +9584,7 @@
     unibrow::Utf8InputBuffer<> buffer(string_.start(),
                                       static_cast<unsigned>(string_.length()));
     chars_ = buffer.Length();
-    hash_field_ = String::ComputeHashField(&buffer, chars_);
+    hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
@@ -8622,17 +9603,18 @@
   Vector<const char> string_;
   uint32_t hash_field_;
   int chars_;  // Caches the number of characters when computing the hash code.
+  uint32_t seed_;
 };
 
 
 template <typename Char>
 class SequentialSymbolKey : public HashTableKey {
  public:
-  explicit SequentialSymbolKey(Vector<const Char> string)
-      : string_(string), hash_field_(0) { }
+  explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed)
+      : string_(string), hash_field_(0), seed_(seed) { }
 
   uint32_t Hash() {
-    StringHasher hasher(string_.length());
+    StringHasher hasher(string_.length(), seed_);
 
     // Very long strings have a trivial hash that doesn't inspect the
     // string contents.
@@ -8668,14 +9650,15 @@
 
   Vector<const Char> string_;
   uint32_t hash_field_;
+  uint32_t seed_;
 };
 
 
 
 class AsciiSymbolKey : public SequentialSymbolKey<char> {
  public:
-  explicit AsciiSymbolKey(Vector<const char> str)
-      : SequentialSymbolKey<char>(str) { }
+  AsciiSymbolKey(Vector<const char> str, uint32_t seed)
+      : SequentialSymbolKey<char>(str, seed) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsAsciiEqualTo(string_);
@@ -8688,10 +9671,77 @@
 };
 
 
+class SubStringAsciiSymbolKey : public HashTableKey {
+ public:
+  explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
+                                   int from,
+                                   int length,
+                                   uint32_t seed)
+      : string_(string), from_(from), length_(length), seed_(seed) { }
+
+  uint32_t Hash() {
+    ASSERT(length_ >= 0);
+    ASSERT(from_ + length_ <= string_->length());
+    StringHasher hasher(length_, string_->GetHeap()->HashSeed());
+
+    // Very long strings have a trivial hash that doesn't inspect the
+    // string contents.
+    if (hasher.has_trivial_hash()) {
+      hash_field_ = hasher.GetHashField();
+    } else {
+      int i = 0;
+      // Do the iterative array index computation as long as there is a
+      // chance this is an array index.
+      while (i < length_ && hasher.is_array_index()) {
+        hasher.AddCharacter(static_cast<uc32>(
+            string_->SeqAsciiStringGet(i + from_)));
+        i++;
+      }
+
+      // Process the remaining characters without updating the array
+      // index.
+      while (i < length_) {
+        hasher.AddCharacterNoIndex(static_cast<uc32>(
+            string_->SeqAsciiStringGet(i + from_)));
+        i++;
+      }
+      hash_field_ = hasher.GetHashField();
+    }
+
+    uint32_t result = hash_field_ >> String::kHashShift;
+    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+
+  uint32_t HashForObject(Object* other) {
+    return String::cast(other)->Hash();
+  }
+
+  bool IsMatch(Object* string) {
+    Vector<const char> chars(string_->GetChars() + from_, length_);
+    return String::cast(string)->IsAsciiEqualTo(chars);
+  }
+
+  MaybeObject* AsObject() {
+    if (hash_field_ == 0) Hash();
+    Vector<const char> chars(string_->GetChars() + from_, length_);
+    return HEAP->AllocateAsciiSymbol(chars, hash_field_);
+  }
+
+ private:
+  Handle<SeqAsciiString> string_;
+  int from_;
+  int length_;
+  uint32_t hash_field_;
+  uint32_t seed_;
+};
+
+
 class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
  public:
-  explicit TwoByteSymbolKey(Vector<const uc16> str)
-      : SequentialSymbolKey<uc16>(str) { }
+  explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed)
+      : SequentialSymbolKey<uc16>(str, seed) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsTwoByteEqualTo(string_);
@@ -8764,11 +9814,8 @@
 template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
                                              PretenureFlag pretenure) {
-  const int kMinCapacity = 32;
-  int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
-  if (capacity < kMinCapacity) {
-    capacity = kMinCapacity;  // Guarantee min capacity.
-  } else if (capacity > HashTable::kMaxCapacity) {
+  int capacity = ComputeCapacity(at_least_space_for);
+  if (capacity > HashTable::kMaxCapacity) {
     return Failure::OutOfMemoryException();
   }
 
@@ -8825,6 +9872,40 @@
 
 
 template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
+  ASSERT(NumberOfElements() < new_table->Capacity());
+
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+
+  // Copy prefix to new array.
+  for (int i = kPrefixStartIndex;
+       i < kPrefixStartIndex + Shape::kPrefixSize;
+       i++) {
+    new_table->set(i, get(i), mode);
+  }
+
+  // Rehash the elements.
+  int capacity = Capacity();
+  for (int i = 0; i < capacity; i++) {
+    uint32_t from_index = EntryToIndex(i);
+    Object* k = get(from_index);
+    if (IsKey(k)) {
+      uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+      uint32_t insertion_index =
+          EntryToIndex(new_table->FindInsertionEntry(hash));
+      for (int j = 0; j < Shape::kEntrySize; j++) {
+        new_table->set(insertion_index + j, get(from_index + j), mode);
+      }
+    }
+  }
+  new_table->SetNumberOfElements(NumberOfElements());
+  new_table->SetNumberOfDeletedElements(0);
+  return new_table;
+}
+
+
+template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
   int capacity = Capacity();
   int nof = NumberOfElements() + n;
@@ -8846,32 +9927,36 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  AssertNoAllocation no_gc;
-  HashTable* table = HashTable::cast(obj);
-  WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
+  return Rehash(HashTable::cast(obj), key);
+}
 
-  // Copy prefix to new array.
-  for (int i = kPrefixStartIndex;
-       i < kPrefixStartIndex + Shape::kPrefixSize;
-       i++) {
-    table->set(i, get(i), mode);
+
+template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
+  int capacity = Capacity();
+  int nof = NumberOfElements();
+
+  // Shrink to fit the number of elements if only a quarter of the
+  // capacity is filled with elements.
+  if (nof > (capacity >> 2)) return this;
+  // Allocate a new dictionary with room for at least the current
+  // number of elements. The allocation method will make sure that
+  // there is extra room in the dictionary for additions. Don't go
+  // lower than room for 16 elements.
+  int at_least_room_for = nof;
+  if (at_least_room_for < 16) return this;
+
+  const int kMinCapacityForPretenure = 256;
+  bool pretenure =
+      (at_least_room_for > kMinCapacityForPretenure) &&
+      !GetHeap()->InNewSpace(this);
+  Object* obj;
+  { MaybeObject* maybe_obj =
+        Allocate(at_least_room_for, pretenure ? TENURED : NOT_TENURED);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  // Rehash the elements.
-  for (int i = 0; i < capacity; i++) {
-    uint32_t from_index = EntryToIndex(i);
-    Object* k = get(from_index);
-    if (IsKey(k)) {
-      uint32_t hash = Shape::HashForObject(key, k);
-      uint32_t insertion_index =
-          EntryToIndex(table->FindInsertionEntry(hash));
-      for (int j = 0; j < Shape::kEntrySize; j++) {
-        table->set(insertion_index + j, get(from_index + j), mode);
-      }
-    }
-  }
-  table->SetNumberOfElements(NumberOfElements());
-  table->SetNumberOfDeletedElements(0);
-  return table;
+
+  return Rehash(HashTable::cast(obj), key);
 }
 
 
@@ -8898,36 +9983,56 @@
 
 template class HashTable<MapCacheShape, HashTableKey*>;
 
+template class HashTable<ObjectHashTableShape, JSObject*>;
+
 template class Dictionary<StringDictionaryShape, String*>;
 
-template class Dictionary<NumberDictionaryShape, uint32_t>;
+template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
 
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
-    int);
+template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
+
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    Allocate(int at_least_space_for);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+    Allocate(int at_least_space_for);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
     int);
 
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
     uint32_t, Object*);
 
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
-    Object*);
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+    AtPut(uint32_t, Object*);
+
+template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+    SlowReverseLookup(Object* value);
 
 template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
     Object*);
 
-template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
-    FixedArray*, PropertyAttributes);
+template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
+    FixedArray*,
+    PropertyAttributes,
+    Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
 
 template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
     int, JSObject::DeleteMode);
 
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
-    int, JSObject::DeleteMode);
+template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    DeleteProperty(int, JSObject::DeleteMode);
+
+template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
+    String*);
+
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
+    uint32_t);
 
 template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
-    FixedArray*, int);
+    FixedArray*,
+    int,
+    Dictionary<StringDictionaryShape, String*>::SortMode);
 
 template int
 Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
@@ -8940,32 +10045,41 @@
 Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
 
 template int
-Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
-    PropertyAttributes);
+Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    NumberOfElementsFilterAttributes(PropertyAttributes);
 
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Add(
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add(
     uint32_t, Object*, PropertyDetails);
 
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add(
+    uint32_t, Object*, PropertyDetails);
+
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    EnsureCapacity(int, uint32_t);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
     EnsureCapacity(int, uint32_t);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::
     EnsureCapacity(int, String*);
 
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
-    uint32_t, Object*, PropertyDetails, uint32_t);
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+    AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
     String*, Object*, PropertyDetails, uint32_t);
 
 template
-int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
 
 template
 int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
 
 template
-int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
 
 
 // Collates undefined and unexisting elements below limit from position
@@ -8975,7 +10089,7 @@
   // Must stay in dictionary mode, either because of requires_slow_elements,
   // or because we are not going to sort (and therefore compact) all of the
   // elements.
-  NumberDictionary* dict = element_dictionary();
+  SeededNumberDictionary* dict = element_dictionary();
   HeapNumber* result_double = NULL;
   if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
     // Allocate space for result before we start mutating the object.
@@ -8988,10 +10102,10 @@
 
   Object* obj;
   { MaybeObject* maybe_obj =
-        NumberDictionary::Allocate(dict->NumberOfElements());
+        SeededNumberDictionary::Allocate(dict->NumberOfElements());
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  NumberDictionary* new_dict = NumberDictionary::cast(obj);
+  SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
 
   AssertNoAllocation no_alloc;
 
@@ -9077,7 +10191,7 @@
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
-    NumberDictionary* dict = element_dictionary();
+    SeededNumberDictionary* dict = element_dictionary();
     if (IsJSArray() || dict->requires_slow_elements() ||
         dict->max_number_key() >= limit) {
       return PrepareSlowElementsForSort(limit);
@@ -9101,19 +10215,19 @@
 
     set_map(new_map);
     set_elements(fast_elements);
-  } else {
+  } else if (!HasFastDoubleElements()) {
     Object* obj;
     { MaybeObject* maybe_obj = EnsureWritableFastElements();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
   }
-  ASSERT(HasFastElements());
+  ASSERT(HasFastElements() || HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
 
-  FixedArray* elements = FixedArray::cast(this->elements());
-  uint32_t elements_length = static_cast<uint32_t>(elements->length());
+  FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+  uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
   if (limit > elements_length) {
     limit = elements_length ;
   }
@@ -9132,47 +10246,78 @@
     result_double = HeapNumber::cast(new_double);
   }
 
-  AssertNoAllocation no_alloc;
-
-  // Split elements into defined, undefined and the_hole, in that order.
-  // Only count locations for undefined and the hole, and fill them afterwards.
-  WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
-  unsigned int undefs = limit;
-  unsigned int holes = limit;
-  // Assume most arrays contain no holes and undefined values, so minimize the
-  // number of stores of non-undefined, non-the-hole values.
-  for (unsigned int i = 0; i < undefs; i++) {
-    Object* current = elements->get(i);
-    if (current->IsTheHole()) {
-      holes--;
-      undefs--;
-    } else if (current->IsUndefined()) {
-      undefs--;
-    } else {
-      continue;
+  uint32_t result = 0;
+  if (elements_base->map() == heap->fixed_double_array_map()) {
+    FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+    // Split elements into defined and the_hole, in that order.
+    unsigned int holes = limit;
+    // Assume most arrays contain no holes and undefined values, so minimize the
+    // number of stores of non-undefined, non-the-hole values.
+    for (unsigned int i = 0; i < holes; i++) {
+      if (elements->is_the_hole(i)) {
+        holes--;
+      } else {
+        continue;
+      }
+      // Position i needs to be filled.
+      while (holes > i) {
+        if (elements->is_the_hole(holes)) {
+          holes--;
+        } else {
+          elements->set(i, elements->get_scalar(holes));
+          break;
+        }
+      }
     }
-    // Position i needs to be filled.
-    while (undefs > i) {
-      current = elements->get(undefs);
+    result = holes;
+    while (holes < limit) {
+      elements->set_the_hole(holes);
+      holes++;
+    }
+  } else {
+    FixedArray* elements = FixedArray::cast(elements_base);
+    AssertNoAllocation no_alloc;
+
+    // Split elements into defined, undefined and the_hole, in that order.  Only
+    // count locations for undefined and the hole, and fill them afterwards.
+    WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
+    unsigned int undefs = limit;
+    unsigned int holes = limit;
+    // Assume most arrays contain no holes and undefined values, so minimize the
+    // number of stores of non-undefined, non-the-hole values.
+    for (unsigned int i = 0; i < undefs; i++) {
+      Object* current = elements->get(i);
       if (current->IsTheHole()) {
         holes--;
         undefs--;
       } else if (current->IsUndefined()) {
         undefs--;
       } else {
-        elements->set(i, current, write_barrier);
-        break;
+        continue;
+      }
+      // Position i needs to be filled.
+      while (undefs > i) {
+        current = elements->get(undefs);
+        if (current->IsTheHole()) {
+          holes--;
+          undefs--;
+        } else if (current->IsUndefined()) {
+          undefs--;
+        } else {
+          elements->set(i, current, write_barrier);
+          break;
+        }
       }
     }
-  }
-  uint32_t result = undefs;
-  while (undefs < holes) {
-    elements->set_undefined(undefs);
-    undefs++;
-  }
-  while (holes < limit) {
-    elements->set_the_hole(holes);
-    holes++;
+    result = undefs;
+    while (undefs < holes) {
+      elements->set_undefined(undefs);
+      undefs++;
+    }
+    while (holes < limit) {
+      elements->set_the_hole(holes);
+      holes++;
+    }
   }
 
   if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -9318,6 +10463,26 @@
 }
 
 
+MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
+  double double_value = 0;
+  Heap* heap = GetHeap();
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      double_value = static_cast<double>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double_value = HeapNumber::cast(value)->value();
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, double_value);
+  }
+  return heap->AllocateHeapNumber(double_value);
+}
+
+
 JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
@@ -9365,10 +10530,12 @@
 // algorithm.
 class TwoCharHashTableKey : public HashTableKey {
  public:
-  TwoCharHashTableKey(uint32_t c1, uint32_t c2)
+  TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
     : c1_(c1), c2_(c2) {
     // Char 1.
-    uint32_t hash = c1 + (c1 << 10);
+    uint32_t hash = seed;
+    hash += c1;
+    hash += hash << 10;
     hash ^= hash >> 6;
     // Char 2.
     hash += c2;
@@ -9378,9 +10545,9 @@
     hash += hash << 3;
     hash ^= hash >> 11;
     hash += hash << 15;
-    if (hash == 0) hash = 27;
+    if ((hash & String::kHashBitMask) == 0) hash = String::kZeroHash;
 #ifdef DEBUG
-    StringHasher hasher(2);
+    StringHasher hasher(2, seed);
     hasher.AddCharacter(c1);
     hasher.AddCharacter(c2);
     // If this assert fails then we failed to reproduce the two-character
@@ -9412,6 +10579,7 @@
     UNREACHABLE();
     return NULL;
   }
+
  private:
   uint32_t c1_;
   uint32_t c2_;
@@ -9436,7 +10604,7 @@
 bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
                                                uint32_t c2,
                                                String** symbol) {
-  TwoCharHashTableKey key(c1, c2);
+  TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
   int entry = FindEntry(&key);
   if (entry == kNotFound) {
     return false;
@@ -9449,22 +10617,32 @@
 }
 
 
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
-  Utf8SymbolKey key(str);
+MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
+                                       Object** s) {
+  Utf8SymbolKey key(str, GetHeap()->HashSeed());
   return LookupKey(&key, s);
 }
 
 
 MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
                                             Object** s) {
-  AsciiSymbolKey key(str);
+  AsciiSymbolKey key(str, GetHeap()->HashSeed());
+  return LookupKey(&key, s);
+}
+
+
+MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
+                                                     int from,
+                                                     int length,
+                                                     Object** s) {
+  SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
   return LookupKey(&key, s);
 }
 
 
 MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
                                               Object** s) {
-  TwoByteSymbolKey key(str);
+  TwoByteSymbolKey key(str, GetHeap()->HashSeed());
   return LookupKey(&key, s);
 }
 
@@ -9755,7 +10933,7 @@
 }
 
 
-void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
+void SeededNumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
   // Do nothing if the interval [from, to) is empty.
   if (from >= to) return;
 
@@ -9781,11 +10959,11 @@
 
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
-                                               JSObject::DeleteMode mode) {
+                                               JSReceiver::DeleteMode mode) {
   Heap* heap = Dictionary<Shape, Key>::GetHeap();
   PropertyDetails details = DetailsAt(entry);
   // Ignore attributes if forcing a deletion.
-  if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
+  if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
     return heap->false_value();
   }
   SetEntry(entry, heap->null_value(), heap->null_value());
@@ -9795,6 +10973,12 @@
 
 
 template<typename Shape, typename Key>
+MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) {
+  return HashTable<Shape, Key>::Shrink(key);
+}
+
+
+template<typename Shape, typename Key>
 MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
   int entry = this->FindEntry(key);
 
@@ -9815,8 +10999,9 @@
     if (!maybe_k->ToObject(&k)) return maybe_k;
   }
   PropertyDetails details = PropertyDetails(NONE, NORMAL);
-  return Dictionary<Shape, Key>::cast(obj)->
-      AddEntry(key, value, details, Shape::Hash(key));
+
+  return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
+      Dictionary<Shape, Key>::Hash(key));
 }
 
 
@@ -9831,8 +11016,9 @@
   { MaybeObject* maybe_obj = EnsureCapacity(1, key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  return Dictionary<Shape, Key>::cast(obj)->
-      AddEntry(key, value, details, Shape::Hash(key));
+
+  return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
+      Dictionary<Shape, Key>::Hash(key));
 }
 
 
@@ -9865,7 +11051,7 @@
 }
 
 
-void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
   // If the dictionary requires slow elements an element has already
   // been added at a high index.
   if (requires_slow_elements()) return;
@@ -9884,31 +11070,44 @@
 }
 
 
-MaybeObject* NumberDictionary::AddNumberEntry(uint32_t key,
-                                              Object* value,
-                                              PropertyDetails details) {
+MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
+                                                    Object* value,
+                                                    PropertyDetails details) {
   UpdateMaxNumberKey(key);
   SLOW_ASSERT(this->FindEntry(key) == kNotFound);
   return Add(key, value, details);
 }
 
 
-MaybeObject* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
+MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
+                                                      Object* value) {
+  SLOW_ASSERT(this->FindEntry(key) == kNotFound);
+  return Add(key, value, PropertyDetails(NONE, NORMAL));
+}
+
+
+MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) {
   UpdateMaxNumberKey(key);
   return AtPut(key, value);
 }
 
 
-MaybeObject* NumberDictionary::Set(uint32_t key,
-                                   Object* value,
-                                   PropertyDetails details) {
+MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key,
+                                                   Object* value) {
+  return AtPut(key, value);
+}
+
+
+MaybeObject* SeededNumberDictionary::Set(uint32_t key,
+                                         Object* value,
+                                         PropertyDetails details) {
   int entry = FindEntry(key);
   if (entry == kNotFound) return AddNumberEntry(key, value, details);
   // Preserve enumeration index.
   details = PropertyDetails(details.attributes(),
                             details.type(),
                             DetailsAt(entry).index());
-  MaybeObject* maybe_object_key = NumberDictionaryShape::AsObject(key);
+  MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
   Object* object_key;
   if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
   SetEntry(entry, object_key, value, details);
@@ -9916,6 +11115,18 @@
 }
 
 
+MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
+                                           Object* value) {
+  int entry = FindEntry(key);
+  if (entry == kNotFound) return AddNumberEntry(key, value);
+  MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key);
+  Object* object_key;
+  if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
+  SetEntry(entry, object_key, value);
+  return this;
+}
+
+
 
 template<typename Shape, typename Key>
 int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
@@ -9943,8 +11154,10 @@
 
 
 template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
-                                        PropertyAttributes filter) {
+void Dictionary<Shape, Key>::CopyKeysTo(
+    FixedArray* storage,
+    PropertyAttributes filter,
+    typename Dictionary<Shape, Key>::SortMode sort_mode) {
   ASSERT(storage->length() >= NumberOfEnumElements());
   int capacity = HashTable<Shape, Key>::Capacity();
   int index = 0;
@@ -9957,7 +11170,9 @@
        if ((attr & filter) == 0) storage->set(index++, k);
      }
   }
-  storage->SortPairs(storage, index);
+  if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+    storage->SortPairs(storage, index);
+  }
   ASSERT(storage->length() >= index);
 }
 
@@ -9984,7 +11199,9 @@
 
 template<typename Shape, typename Key>
 void Dictionary<Shape, Key>::CopyKeysTo(
-    FixedArray* storage, int index) {
+    FixedArray* storage,
+    int index,
+    typename Dictionary<Shape, Key>::SortMode sort_mode) {
   ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
       static_cast<PropertyAttributes>(NONE)));
   int capacity = HashTable<Shape, Key>::Capacity();
@@ -9996,6 +11213,9 @@
       storage->set(index++, k);
     }
   }
+  if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+    storage->SortPairs(storage, index);
+  }
   ASSERT(storage->length() >= index);
 }
 
@@ -10155,6 +11375,63 @@
 }
 
 
+Object* ObjectHashTable::Lookup(JSObject* key) {
+  // If the object does not have an identity hash, it was never used as a key.
+  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+  if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
+  int entry = FindEntry(key);
+  if (entry == kNotFound) return GetHeap()->undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+  // Make sure the key object has an identity hash code.
+  int hash;
+  { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+    if (maybe_hash->IsFailure()) return maybe_hash;
+    hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+  }
+  int entry = FindEntry(key);
+
+  // Check whether to perform removal operation.
+  if (value->IsUndefined()) {
+    if (entry == kNotFound) return this;
+    RemoveEntry(entry);
+    return Shrink(key);
+  }
+
+  // Key is already in table, just overwrite value.
+  if (entry != kNotFound) {
+    set(EntryToIndex(entry) + 1, value);
+    return this;
+  }
+
+  // Check whether the hash table should be extended.
+  Object* obj;
+  { MaybeObject* maybe_obj = EnsureCapacity(1, key);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  ObjectHashTable* table = ObjectHashTable::cast(obj);
+  table->AddEntry(table->FindInsertionEntry(hash), key, value);
+  return table;
+}
+
+
+void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+  set(EntryToIndex(entry), key);
+  set(EntryToIndex(entry) + 1, value);
+  ElementAdded();
+}
+
+
+void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
+  set_null(heap, EntryToIndex(entry));
+  set_null(heap, EntryToIndex(entry) + 1);
+  ElementRemoved();
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 // Check if there is a break point at this code position.
 bool DebugInfo::HasBreakPoint(int code_position) {
@@ -10381,7 +11658,7 @@
     Handle<Object> break_point_object) {
   // No break point.
   if (break_point_info->break_point_objects()->IsUndefined()) return false;
-  // Single beak point.
+  // Single break point.
   if (!break_point_info->break_point_objects()->IsFixedArray()) {
     return break_point_info->break_point_objects() == *break_point_object;
   }
@@ -10400,7 +11677,7 @@
 int BreakPointInfo::GetBreakPointCount() {
   // No break point.
   if (break_point_objects()->IsUndefined()) return 0;
-  // Single beak point.
+  // Single break point.
   if (!break_point_objects()->IsFixedArray()) return 1;
   // Multiple break points.
   return FixedArray::cast(break_point_objects())->length();
diff --git a/src/objects.h b/src/objects.h
index 72daad9..1245ed0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -28,8 +28,10 @@
 #ifndef V8_OBJECTS_H_
 #define V8_OBJECTS_H_
 
+#include "allocation.h"
 #include "builtins.h"
-#include "smart-pointer.h"
+#include "list.h"
+#include "smart-array-pointer.h"
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
@@ -46,41 +48,48 @@
 //   - Object
 //     - Smi          (immediate small integer)
 //     - HeapObject   (superclass for everything allocated in the heap)
-//       - JSObject
-//         - JSArray
-//         - JSRegExp
-//         - JSFunction
-//         - GlobalObject
-//           - JSGlobalObject
-//           - JSBuiltinsObject
-//         - JSGlobalProxy
-//         - JSValue
-//         - JSMessageObject
-//       - ByteArray
-//       - ExternalArray
-//         - ExternalPixelArray
-//         - ExternalByteArray
-//         - ExternalUnsignedByteArray
-//         - ExternalShortArray
-//         - ExternalUnsignedShortArray
-//         - ExternalIntArray
-//         - ExternalUnsignedIntArray
-//         - ExternalFloatArray
-//       - FixedArray
-//         - DescriptorArray
-//         - HashTable
-//           - Dictionary
-//           - SymbolTable
-//           - CompilationCacheTable
-//           - CodeCacheHashTable
-//           - MapCache
-//         - Context
-//         - JSFunctionResultCache
-//         - SerializedScopeInfo
+//       - JSReceiver  (suitable for property access)
+//         - JSObject
+//           - JSArray
+//           - JSWeakMap
+//           - JSRegExp
+//           - JSFunction
+//           - GlobalObject
+//             - JSGlobalObject
+//             - JSBuiltinsObject
+//           - JSGlobalProxy
+//           - JSValue
+//           - JSMessageObject
+//         - JSProxy
+//           - JSFunctionProxy
+//       - FixedArrayBase
+//         - ByteArray
+//         - FixedArray
+//           - DescriptorArray
+//           - HashTable
+//             - Dictionary
+//             - SymbolTable
+//             - CompilationCacheTable
+//             - CodeCacheHashTable
+//             - MapCache
+//           - Context
+//           - JSFunctionResultCache
+//           - SerializedScopeInfo
+//         - FixedDoubleArray
+//         - ExternalArray
+//           - ExternalPixelArray
+//           - ExternalByteArray
+//           - ExternalUnsignedByteArray
+//           - ExternalShortArray
+//           - ExternalUnsignedShortArray
+//           - ExternalIntArray
+//           - ExternalUnsignedIntArray
+//           - ExternalFloatArray
 //       - String
 //         - SeqString
 //           - SeqAsciiString
 //           - SeqTwoByteString
+//         - SlicedString
 //         - ConsString
 //         - ExternalString
 //           - ExternalAsciiString
@@ -89,7 +98,7 @@
 //       - Code
 //       - Map
 //       - Oddball
-//       - Proxy
+//       - Foreign
 //       - SharedFunctionInfo
 //       - Struct
 //         - AccessorInfo
@@ -126,16 +135,46 @@
 namespace v8 {
 namespace internal {
 
+enum ElementsKind {
+  // The "fast" kind for tagged values. Must be first to make it possible
+  // to efficiently check maps if they have fast elements.
+  FAST_ELEMENTS,
+
+  // The "fast" kind for unwrapped, non-tagged double values.
+  FAST_DOUBLE_ELEMENTS,
+
+  // The "slow" kind.
+  DICTIONARY_ELEMENTS,
+  NON_STRICT_ARGUMENTS_ELEMENTS,
+  // The "fast" kind for external arrays
+  EXTERNAL_BYTE_ELEMENTS,
+  EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
+  EXTERNAL_SHORT_ELEMENTS,
+  EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
+  EXTERNAL_INT_ELEMENTS,
+  EXTERNAL_UNSIGNED_INT_ELEMENTS,
+  EXTERNAL_FLOAT_ELEMENTS,
+  EXTERNAL_DOUBLE_ELEMENTS,
+  EXTERNAL_PIXEL_ELEMENTS,
+
+  // Derived constants from ElementsKind
+  FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
+  LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+  FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+  LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
+};
+
+static const int kElementsKindCount =
+    LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
 
 // PropertyDetails captures type and attributes for a property.
 // They are used both in property dictionaries and instance descriptors.
 class PropertyDetails BASE_EMBEDDED {
  public:
-
   PropertyDetails(PropertyAttributes attributes,
                   PropertyType type,
                   int index = 0) {
-    ASSERT(type != EXTERNAL_ARRAY_TRANSITION);
+    ASSERT(type != ELEMENTS_TRANSITION);
     ASSERT(TypeField::is_valid(type));
     ASSERT(AttributesField::is_valid(attributes));
     ASSERT(StorageField::is_valid(index));
@@ -151,19 +190,19 @@
 
   PropertyDetails(PropertyAttributes attributes,
                   PropertyType type,
-                  ExternalArrayType array_type) {
-    ASSERT(type == EXTERNAL_ARRAY_TRANSITION);
+                  ElementsKind elements_kind) {
+    ASSERT(type == ELEMENTS_TRANSITION);
     ASSERT(TypeField::is_valid(type));
     ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(static_cast<int>(array_type)));
+    ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
 
     value_ = TypeField::encode(type)
         | AttributesField::encode(attributes)
-        | StorageField::encode(static_cast<int>(array_type));
+        | StorageField::encode(static_cast<int>(elements_kind));
 
     ASSERT(type == this->type());
     ASSERT(attributes == this->attributes());
-    ASSERT(array_type == this->array_type());
+    ASSERT(elements_kind == this->elements_kind());
   }
 
   // Conversion for storing details as Object*.
@@ -176,7 +215,7 @@
     PropertyType t = type();
     ASSERT(t != INTERCEPTOR);
     return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-        t == EXTERNAL_ARRAY_TRANSITION;
+        t == ELEMENTS_TRANSITION;
   }
 
   bool IsProperty() {
@@ -187,9 +226,9 @@
 
   int index() { return StorageField::decode(value_); }
 
-  ExternalArrayType array_type() {
-    ASSERT(type() == EXTERNAL_ARRAY_TRANSITION);
-    return static_cast<ExternalArrayType>(StorageField::decode(value_));
+  ElementsKind elements_kind() {
+    ASSERT(type() == ELEMENTS_TRANSITION);
+    return static_cast<ElementsKind>(StorageField::decode(value_));
   }
 
   inline PropertyDetails AsDeleted();
@@ -211,6 +250,7 @@
   class StorageField:    public BitField<uint32_t,           8, 32-8> {};
 
   static const int kInitialIndex = 1;
+
  private:
   uint32_t value_;
 };
@@ -275,6 +315,7 @@
   V(ASCII_STRING_TYPE)                                                         \
   V(CONS_STRING_TYPE)                                                          \
   V(CONS_ASCII_STRING_TYPE)                                                    \
+  V(SLICED_STRING_TYPE)                                                        \
   V(EXTERNAL_STRING_TYPE)                                                      \
   V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_STRING_TYPE)                                                \
@@ -286,7 +327,7 @@
   V(JS_GLOBAL_PROPERTY_CELL_TYPE)                                              \
                                                                                \
   V(HEAP_NUMBER_TYPE)                                                          \
-  V(PROXY_TYPE)                                                                \
+  V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
@@ -311,8 +352,10 @@
   V(TYPE_SWITCH_INFO_TYPE)                                                     \
   V(SCRIPT_TYPE)                                                               \
   V(CODE_CACHE_TYPE)                                                           \
+  V(POLYMORPHIC_CODE_CACHE_TYPE)                                               \
                                                                                \
   V(FIXED_ARRAY_TYPE)                                                          \
+  V(FIXED_DOUBLE_ARRAY_TYPE)                                                   \
   V(SHARED_FUNCTION_INFO_TYPE)                                                 \
                                                                                \
   V(JS_MESSAGE_OBJECT_TYPE)                                                    \
@@ -324,9 +367,12 @@
   V(JS_BUILTINS_OBJECT_TYPE)                                                   \
   V(JS_GLOBAL_PROXY_TYPE)                                                      \
   V(JS_ARRAY_TYPE)                                                             \
+  V(JS_PROXY_TYPE)                                                             \
+  V(JS_WEAK_MAP_TYPE)                                                          \
   V(JS_REGEXP_TYPE)                                                            \
                                                                                \
   V(JS_FUNCTION_TYPE)                                                          \
+  V(JS_FUNCTION_PROXY_TYPE)                                                    \
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define INSTANCE_TYPE_LIST_DEBUGGER(V)                                         \
@@ -388,6 +434,14 @@
     ConsString::kSize,                                                         \
     cons_ascii_string,                                                         \
     ConsAsciiString)                                                           \
+  V(SLICED_STRING_TYPE,                                                        \
+    SlicedString::kSize,                                                       \
+    sliced_string,                                                             \
+    SlicedString)                                                              \
+  V(SLICED_ASCII_STRING_TYPE,                                                  \
+    SlicedString::kSize,                                                       \
+    sliced_ascii_string,                                                       \
+    SlicedAsciiString)                                                         \
   V(EXTERNAL_STRING_TYPE,                                                      \
     ExternalTwoByteString::kSize,                                              \
     external_string,                                                           \
@@ -420,7 +474,8 @@
   V(SIGNATURE_INFO, SignatureInfo, signature_info)                             \
   V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info)                        \
   V(SCRIPT, Script, script)                                                    \
-  V(CODE_CACHE, CodeCache, code_cache)
+  V(CODE_CACHE, CodeCache, code_cache)                                         \
+  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define STRUCT_LIST_DEBUGGER(V)                                                \
@@ -460,9 +515,22 @@
 enum StringRepresentationTag {
   kSeqStringTag = 0x0,
   kConsStringTag = 0x1,
-  kExternalStringTag = 0x2
+  kExternalStringTag = 0x2,
+  kSlicedStringTag = 0x3
 };
-const uint32_t kIsConsStringMask = 0x1;
+const uint32_t kIsIndirectStringMask = 0x1;
+const uint32_t kIsIndirectStringTag = 0x1;
+STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
+STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
+STATIC_ASSERT(
+    (kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+STATIC_ASSERT(
+    (kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+
+// Use this mask to distinguish between cons and slice only after making
+// sure that the string is one of the two (an indirect string).
+const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
+STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
 
 // If bit 7 is clear, then bit 3 indicates whether this two-byte
 // string actually contains ascii data.
@@ -484,7 +552,6 @@
 
 enum InstanceType {
   // String types.
-  // FIRST_STRING_TYPE
   SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
   ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
   CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
@@ -498,6 +565,8 @@
   ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
   CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
   CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
+  SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
+  SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
   EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
   EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -514,7 +583,7 @@
   // "Data", objects that cannot contain non-map-word pointers to heap
   // objects.
   HEAP_NUMBER_TYPE,
-  PROXY_TYPE,
+  FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
@@ -523,7 +592,9 @@
   EXTERNAL_INT_ARRAY_TYPE,
   EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
   EXTERNAL_FLOAT_ARRAY_TYPE,
+  EXTERNAL_DOUBLE_ARRAY_TYPE,
   EXTERNAL_PIXEL_ARRAY_TYPE,  // LAST_EXTERNAL_ARRAY_TYPE
+  FIXED_DOUBLE_ARRAY_TYPE,
   FILLER_TYPE,  // LAST_DATA_TYPE
 
   // Structs.
@@ -537,6 +608,7 @@
   TYPE_SWITCH_INFO_TYPE,
   SCRIPT_TYPE,
   CODE_CACHE_TYPE,
+  POLYMORPHIC_CODE_CACHE_TYPE,
   // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
   // is defined. However as include/v8.h contain some of the instance type
   // constants always having them avoids them getting different numbers
@@ -549,38 +621,47 @@
 
   JS_MESSAGE_OBJECT_TYPE,
 
-  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
+  JS_VALUE_TYPE,  // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
+  JS_PROXY_TYPE,
+  JS_WEAK_MAP_TYPE,
 
-  JS_REGEXP_TYPE,  // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
+  JS_REGEXP_TYPE,  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
 
-  JS_FUNCTION_TYPE,
+  JS_FUNCTION_TYPE,  // FIRST_CALLABLE_SPEC_OBJECT_TYPE
+  JS_FUNCTION_PROXY_TYPE,  // LAST_CALLABLE_SPEC_OBJECT_TYPE
 
   // Pseudo-types
   FIRST_TYPE = 0x0,
-  LAST_TYPE = JS_FUNCTION_TYPE,
+  LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
-  FIRST_STRING_TYPE = FIRST_TYPE,
-  LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
   // Boundaries for testing for an external array.
   FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
   LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
   // Boundary for promotion to old data space/old pointer space.
   LAST_DATA_TYPE = FILLER_TYPE,
-  // Boundaries for testing the type is a JavaScript "object".  Note that
-  // function objects are not counted as objects, even though they are
-  // implemented as such; only values whose typeof is "object" are included.
-  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
-  LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
-  // RegExp objects have [[Class]] "function" because they are callable.
-  // All types from this type and above are objects with [[Class]] "function".
-  FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
+  // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
+  // Note that there is no range for JSObject or JSProxy, since their subtypes
+  // are not continuous in this enum! The enum ranges instead reflect the
+  // external class names, where proxies are treated as either ordinary objects,
+  // or functions.
+  FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+  LAST_JS_RECEIVER_TYPE = LAST_TYPE,
+  // Boundaries for testing the types for which typeof is "object".
+  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
+  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+  // Boundaries for testing the types for which typeof is "function".
+  FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
+  LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+  // Boundaries for testing whether the type is a JavaScript object.
+  FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+  LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
 };
 
 static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
@@ -588,7 +669,7 @@
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
+STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
 
 
 enum CompareResult {
@@ -611,9 +692,11 @@
                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
 
 
-class StringStream;
+class DictionaryElementsAccessor;
+class ElementsAccessor;
+class FixedArrayBase;
 class ObjectVisitor;
-class Failure;
+class StringStream;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -627,6 +710,7 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
+class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -690,6 +774,7 @@
   V(SeqString)                                 \
   V(ExternalString)                            \
   V(ConsString)                                \
+  V(SlicedString)                              \
   V(ExternalTwoByteString)                     \
   V(ExternalAsciiString)                       \
   V(SeqTwoByteString)                          \
@@ -703,8 +788,10 @@
   V(ExternalIntArray)                          \
   V(ExternalUnsignedIntArray)                  \
   V(ExternalFloatArray)                        \
+  V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
+  V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
   V(Map)                                       \
@@ -712,9 +799,10 @@
   V(DeoptimizationInputData)                   \
   V(DeoptimizationOutputData)                  \
   V(FixedArray)                                \
+  V(FixedDoubleArray)                          \
   V(Context)                                   \
-  V(CatchContext)                              \
   V(GlobalContext)                             \
+  V(SerializedScopeInfo)                       \
   V(JSFunction)                                \
   V(Code)                                      \
   V(Oddball)                                   \
@@ -722,9 +810,12 @@
   V(JSValue)                                   \
   V(JSMessageObject)                           \
   V(StringWrapper)                             \
-  V(Proxy)                                     \
+  V(Foreign)                                   \
   V(Boolean)                                   \
   V(JSArray)                                   \
+  V(JSProxy)                                   \
+  V(JSFunctionProxy)                           \
+  V(JSWeakMap)                                 \
   V(JSRegExp)                                  \
   V(HashTable)                                 \
   V(Dictionary)                                \
@@ -733,6 +824,7 @@
   V(NormalizedMapCache)                        \
   V(CompilationCacheTable)                     \
   V(CodeCacheHashTable)                        \
+  V(PolymorphicCodeCacheHashTable)             \
   V(MapCache)                                  \
   V(Primitive)                                 \
   V(GlobalObject)                              \
@@ -766,6 +858,8 @@
   STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
 #undef DECLARE_STRUCT_PREDICATE
 
+  INLINE(bool IsSpecObject());
+
   // Oddball testing.
   INLINE(bool IsUndefined());
   INLINE(bool IsNull());
@@ -777,6 +871,10 @@
   // Extract the number.
   inline double Number();
 
+  // Returns true if the object is of the correct type to be used as a
+  // implementation of a JSObject's elements.
+  inline bool HasValidElements();
+
   inline bool HasSpecificClassOf(String* name);
 
   MUST_USE_RESULT MaybeObject* ToObject();             // ECMA-262 9.9.
@@ -809,6 +907,9 @@
                                                        Object* structure,
                                                        String* name,
                                                        Object* holder);
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
+                                                      String* name,
+                                                      Object* handler);
   MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
                                                             JSFunction* getter);
 
@@ -1324,11 +1425,9 @@
 };
 
 
-// The JSObject describes real heap allocated JavaScript objects with
-// properties.
-// Note that the map of JSObject changes during execution to enable inline
-// caching.
-class JSObject: public HeapObject {
+// JSReceiver includes types on which properties can be defined, i.e.,
+// JSObject and JSProxy.
+class JSReceiver: public HeapObject {
  public:
   enum DeleteMode {
     NORMAL_DELETION,
@@ -1336,21 +1435,65 @@
     FORCE_DELETION
   };
 
-  enum ElementsKind {
-    // The only "fast" kind.
-    FAST_ELEMENTS,
-    // All the kinds below are "slow".
-    DICTIONARY_ELEMENTS,
-    EXTERNAL_BYTE_ELEMENTS,
-    EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
-    EXTERNAL_SHORT_ELEMENTS,
-    EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
-    EXTERNAL_INT_ELEMENTS,
-    EXTERNAL_UNSIGNED_INT_ELEMENTS,
-    EXTERNAL_FLOAT_ELEMENTS,
-    EXTERNAL_PIXEL_ELEMENTS
-  };
+  // Casting.
+  static inline JSReceiver* cast(Object* obj);
 
+  // Can cause GC.
+  MUST_USE_RESULT MaybeObject* SetProperty(String* key,
+                                           Object* value,
+                                           PropertyAttributes attributes,
+                                           StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+                                           String* key,
+                                           Object* value,
+                                           PropertyAttributes attributes,
+                                           StrictModeFlag strict_mode);
+
+  MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+
+  // Returns the class name ([[Class]] property in the specification).
+  String* class_name();
+
+  // Returns the constructor name (the name (possibly, inferred name) of the
+  // function that was used to instantiate the object).
+  String* constructor_name();
+
+  inline PropertyAttributes GetPropertyAttribute(String* name);
+  PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
+                                                      String* name);
+  PropertyAttributes GetLocalPropertyAttribute(String* name);
+
+  // Can cause a GC.
+  inline bool HasProperty(String* name);
+  inline bool HasLocalProperty(String* name);
+
+  // Return the object's prototype (might be Heap::null_value()).
+  inline Object* GetPrototype();
+
+  // Set the object's prototype (only JSReceiver and null are allowed).
+  MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
+                                            bool skip_hidden_prototypes);
+
+  // Lookup a property.  If found, the result is valid and has
+  // detailed information.
+  void LocalLookup(String* name, LookupResult* result);
+  void Lookup(String* name, LookupResult* result);
+
+ private:
+  PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
+                                          LookupResult* result,
+                                          String* name,
+                                          bool continue_search);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
+};
+
+// The JSObject describes real heap allocated JavaScript objects with
+// properties.
+// Note that the map of JSObject changes during execution to enable inline
+// caching.
+class JSObject: public JSReceiver {
+ public:
   // [properties]: Backing storage for properties.
   // properties is a FixedArray in the fast case and a Dictionary in the
   // slow case.
@@ -1367,18 +1510,23 @@
   //
   // In the fast mode elements is a FixedArray and so each element can
   // be quickly accessed. This fact is used in the generated code. The
-  // elements array can have one of the two maps in this mode:
-  // fixed_array_map or fixed_cow_array_map (for copy-on-write
-  // arrays). In the latter case the elements array may be shared by a
-  // few objects and so before writing to any element the array must
-  // be copied. Use EnsureWritableFastElements in this case.
+  // elements array can have one of three maps in this mode:
+  // fixed_array_map, non_strict_arguments_elements_map or
+  // fixed_cow_array_map (for copy-on-write arrays). In the latter case
+  // the elements array may be shared by a few objects and so before
+  // writing to any element the array must be copied. Use
+  // EnsureWritableFastElements in this case.
   //
-  // In the slow mode elements is either a NumberDictionary or an ExternalArray.
-  DECL_ACCESSORS(elements, HeapObject)
+  // In the slow mode the elements is either a NumberDictionary, an
+  // ExternalArray, or a FixedArray parameter map for a (non-strict)
+  // arguments object.
+  DECL_ACCESSORS(elements, FixedArrayBase)
   inline void initialize_elements();
   MUST_USE_RESULT inline MaybeObject* ResetElements();
   inline ElementsKind GetElementsKind();
+  inline ElementsAccessor* GetElementsAccessor();
   inline bool HasFastElements();
+  inline bool HasFastDoubleElements();
   inline bool HasDictionaryElements();
   inline bool HasExternalPixelElements();
   inline bool HasExternalArrayElements();
@@ -1389,9 +1537,13 @@
   inline bool HasExternalIntElements();
   inline bool HasExternalUnsignedIntElements();
   inline bool HasExternalFloatElements();
+  inline bool HasExternalDoubleElements();
+  bool HasFastArgumentsElements();
+  bool HasDictionaryArgumentsElements();
   inline bool AllowsSetElementsLength();
-  inline NumberDictionary* element_dictionary();  // Gets slow elements.
-  // Requires: this->HasFastElements().
+  inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
+
+  // Requires: HasFastElements().
   MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
 
   // Collects elements starting at index 0.
@@ -1402,11 +1554,7 @@
   // a dictionary, and it will stay a dictionary.
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
-  MUST_USE_RESULT MaybeObject* SetProperty(String* key,
-                                           Object* value,
-                                           PropertyAttributes attributes,
-                                           StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+  MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
                                            String* key,
                                            Object* value,
                                            PropertyAttributes attributes,
@@ -1415,11 +1563,14 @@
       LookupResult* result,
       String* name,
       Object* value,
-      bool check_prototype);
-  MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(Object* structure,
-                                                       String* name,
-                                                       Object* value,
-                                                       JSObject* holder);
+      bool check_prototype,
+      StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
+      Object* structure,
+      String* name,
+      Object* value,
+      JSObject* holder,
+      StrictModeFlag strict_mode);
   MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
                                                             Object* value);
   MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
@@ -1455,21 +1606,22 @@
   MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
                                                         DeleteMode mode);
 
-  // Returns the class name ([[Class]] property in the specification).
-  String* class_name();
-
-  // Returns the constructor name (the name (possibly, inferred name) of the
-  // function that was used to instantiate the object).
-  String* constructor_name();
-
   // Retrieve interceptors.
   InterceptorInfo* GetNamedInterceptor();
   InterceptorInfo* GetIndexedInterceptor();
 
-  inline PropertyAttributes GetPropertyAttribute(String* name);
-  PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
-                                                      String* name);
-  PropertyAttributes GetLocalPropertyAttribute(String* name);
+  // Used from JSReceiver.
+  PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
+                                                         String* name,
+                                                         bool continue_search);
+  PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
+                                                         String* name,
+                                                         bool continue_search);
+  PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+      Object* receiver,
+      LookupResult* result,
+      String* name,
+      bool continue_search);
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
                                               bool is_getter,
@@ -1486,14 +1638,14 @@
       String* name,
       PropertyAttributes* attributes);
   MaybeObject* GetPropertyWithInterceptor(
-      JSObject* receiver,
+      JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
   MaybeObject* GetPropertyPostInterceptor(
-      JSObject* receiver,
+      JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
-  MaybeObject* GetLocalPropertyPostInterceptor(JSObject* receiver,
+  MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver,
                                                String* name,
                                                PropertyAttributes* attributes);
 
@@ -1501,15 +1653,6 @@
   // been modified since it was created.  May give false positives.
   bool IsDirty();
 
-  bool HasProperty(String* name) {
-    return GetPropertyAttribute(name) != ABSENT;
-  }
-
-  // Can cause a GC if it hits an interceptor.
-  bool HasLocalProperty(String* name) {
-    return GetLocalPropertyAttribute(name) != ABSENT;
-  }
-
   // If the receiver is a JSGlobalProxy this method will return its prototype,
   // otherwise the result is the receiver itself.
   inline Object* BypassGlobalProxy();
@@ -1532,6 +1675,23 @@
   MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
       Object* hidden_obj);
 
+  // Indicates whether the hidden properties object should be created.
+  enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+
+  // Retrieves the hidden properties object.
+  //
+  // The undefined value might be returned in case no hidden properties object
+  // is present and creation was omitted.
+  inline bool HasHiddenProperties();
+  MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
+
+  // Retrieves a permanent object identity hash code.
+  //
+  // The identity hash is stored as a hidden property. The undefined value might
+  // be returned in case no hidden properties object is present and creation was
+  // omitted.
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
 
@@ -1546,17 +1706,13 @@
   // storage would.  In that case the JSObject should have fast
   // elements.
   bool ShouldConvertToFastElements();
-
-  // Return the object's prototype (might be Heap::null_value()).
-  inline Object* GetPrototype();
-
-  // Set the object's prototype (only JSObject and null are allowed).
-  MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
-                                            bool skip_hidden_prototypes);
+  // Returns true if the elements of JSObject contains only values that can be
+  // represented in a FixedDoubleArray.
+  bool CanConvertToFastDoubleElements();
 
   // Tells whether the index'th element is present.
   inline bool HasElement(uint32_t index);
-  bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
+  bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
 
   // Computes the new capacity when expanding the elements of a JSObject.
   static int NewElementsCapacity(int old_capacity) {
@@ -1584,33 +1740,43 @@
 
   LocalElementType HasLocalElement(uint32_t index);
 
-  bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
-  bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
+  bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+  bool HasElementPostInterceptor(JSReceiver* receiver, uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
                                               Object* value,
                                               StrictModeFlag strict_mode,
-                                              bool check_prototype = true);
+                                              bool check_prototype);
+  MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
+                                                    Object* value,
+                                                    StrictModeFlag strict_mode,
+                                                    bool check_prototype);
+
+  MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
+      uint32_t index,
+      Object* value,
+      StrictModeFlag strict_mode,
+      bool check_prototype = true);
 
   // Set the index'th array element.
   // A Failure object is returned if GC is needed.
   MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
                                           Object* value,
                                           StrictModeFlag strict_mode,
-                                          bool check_prototype = true);
+                                          bool check_prototype);
 
   // Returns the index'th element.
   // The undefined object if index is out of bounds.
-  MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
   MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
 
-  // Get external element value at index if there is one and undefined
-  // otherwise. Can return a failure if allocation of a heap number
-  // failed.
-  MaybeObject* GetExternalElement(uint32_t index);
-
+  // Replace the elements' backing store with fast elements of the given
+  // capacity.  Update the length for JSArrays.  Returns the new backing
+  // store.
   MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
                                                                 int length);
+  MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
+      int capacity,
+      int length);
   MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
 
   // Lookup interceptors are used for handling properties controlled by host
@@ -1638,7 +1804,6 @@
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
   void LocalLookup(String* name, LookupResult* result);
-  void Lookup(String* name, LookupResult* result);
 
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1646,7 +1811,7 @@
   void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
   void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
   MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
-      uint32_t index, Object* value, bool* found);
+      uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
   void LookupCallback(String* name, LookupResult* result);
 
   // Returns the number of properties on this object filtering out properties
@@ -1736,6 +1901,7 @@
   MUST_USE_RESULT MaybeObject* NormalizeProperties(
       PropertyNormalizationMode mode,
       int expected_additional_properties);
+
   MUST_USE_RESULT MaybeObject* NormalizeElements();
 
   MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
@@ -1828,8 +1994,21 @@
   // Also maximal value of JSArray's length property.
   static const uint32_t kMaxElementCount = 0xffffffffu;
 
+  // Constants for heuristics controlling conversion of fast elements
+  // to slow elements.
+
+  // Maximal gap that can be introduced by adding an element beyond
+  // the current elements length.
   static const uint32_t kMaxGap = 1024;
-  static const int kMaxFastElementsLength = 5000;
+
+  // Maximal length of fast elements array that won't be checked for
+  // being dense enough on expansion.
+  static const int kMaxUncheckedFastElementsLength = 5000;
+
+  // Same as above but for old arrays. This limit is more strict. We
+  // don't want to be wasteful with long lived objects.
+  static const int kMaxUncheckedOldFastElementsLength = 500;
+
   static const int kInitialMaxFastElementArray = 100000;
   static const int kMaxFastProperties = 12;
   static const int kMaxInstanceSize = 255 * kPointerSize;
@@ -1851,6 +2030,8 @@
   };
 
  private:
+  friend class DictionaryElementsAccessor;
+
   MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
                                                       Object* structure,
                                                       uint32_t index,
@@ -1858,7 +2039,8 @@
   MaybeObject* SetElementWithCallback(Object* structure,
                                       uint32_t index,
                                       Object* value,
-                                      JSObject* holder);
+                                      JSObject* holder,
+                                      StrictModeFlag strict_mode);
   MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
       uint32_t index,
       Object* value,
@@ -1870,35 +2052,29 @@
       StrictModeFlag strict_mode,
       bool check_prototype);
 
-  MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index);
-
   MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
                                                              DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
 
-  MUST_USE_RESULT MaybeObject* DeleteElementPostInterceptor(uint32_t index,
-                                                            DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
 
-  PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
-                                                         String* name,
-                                                         bool continue_search);
-  PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
-                                                         String* name,
-                                                         bool continue_search);
-  PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
-      Object* receiver,
-      LookupResult* result,
-      String* name,
-      bool continue_search);
-  PropertyAttributes GetPropertyAttribute(JSObject* receiver,
-                                          LookupResult* result,
-                                          String* name,
-                                          bool continue_search);
+  MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
+  MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
+                                                       DeleteMode mode);
+
+  bool ReferencesObjectFromElements(FixedArray* elements,
+                                    ElementsKind kind,
+                                    Object* object);
+  bool HasElementInElements(FixedArray* elements,
+                            ElementsKind kind,
+                            uint32_t index);
 
   // Returns true if most of the elements backing storage is used.
   bool HasDenseElements();
 
+  // Gets the current elements capacity and the number of used elements.
+  void GetElementsCapacityAndUsage(int* capacity, int* used);
+
   bool CanSetCallback(String* name);
   MUST_USE_RESULT MaybeObject* SetElementCallback(
       uint32_t index,
@@ -1918,17 +2094,33 @@
 };
 
 
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public HeapObject {
+// Common superclass for FixedArrays that allow implementations to share
+// common accessors and some code paths.
+class FixedArrayBase: public HeapObject {
  public:
   // [length]: length of the array.
   inline int length();
   inline void set_length(int value);
 
+  inline static FixedArrayBase* cast(Object* object);
+
+  // Layout description.
+  // Length is smi tagged when it is stored.
+  static const int kLengthOffset = HeapObject::kHeaderSize;
+  static const int kHeaderSize = kLengthOffset + kPointerSize;
+};
+
+
+class FixedDoubleArray;
+
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray: public FixedArrayBase {
+ public:
   // Setter and getter for elements.
   inline Object* get(int index);
   // Setter that uses write barrier.
   inline void set(int index, Object* value);
+  inline bool is_the_hole(int index);
 
   // Setter that doesn't need write barrier).
   inline void set(int index, Smi* value);
@@ -1975,15 +2167,10 @@
   // Casting.
   static inline FixedArray* cast(Object* obj);
 
-  // Layout description.
-  // Length is smi tagged when it is stored.
-  static const int kLengthOffset = HeapObject::kHeaderSize;
-  static const int kHeaderSize = kLengthOffset + kPointerSize;
-
   // Maximal allowed size, in bytes, of a single FixedArray.
   // Prevents overflowing size computations, as well as extreme memory
   // consumption.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 128 * MB * kPointerSize;
   // Maximally allowed length of a FixedArray.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
 
@@ -2027,18 +2214,79 @@
 };
 
 
+// FixedDoubleArray describes fixed-sized arrays with element type double.
+class FixedDoubleArray: public FixedArrayBase {
+ public:
+  inline void Initialize(FixedArray* from);
+  inline void Initialize(FixedDoubleArray* from);
+  inline void Initialize(SeededNumberDictionary* from);
+
+  // Setter and getter for elements.
+  inline double get_scalar(int index);
+  inline MaybeObject* get(int index);
+  inline void set(int index, double value);
+  inline void set_the_hole(int index);
+
+  // Checking for the hole.
+  inline bool is_the_hole(int index);
+
+  // Garbage collection support.
+  inline static int SizeFor(int length) {
+    return kHeaderSize + length * kDoubleSize;
+  }
+
+  // Code Generation support.
+  static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+  inline static bool is_the_hole_nan(double value);
+  inline static double hole_nan_as_double();
+  inline static double canonical_not_the_hole_nan_as_double();
+
+  // Casting.
+  static inline FixedDoubleArray* cast(Object* obj);
+
+  // Maximal allowed size, in bytes, of a single FixedDoubleArray.
+  // Prevents overflowing size computations, as well as extreme memory
+  // consumption.
+  static const int kMaxSize = 512 * MB;
+  // Maximally allowed length of a FixedArray.
+  static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+
+  // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void FixedDoubleArrayPrint() {
+    FixedDoubleArrayPrint(stdout);
+  }
+  void FixedDoubleArrayPrint(FILE* out);
+#endif
+
+#ifdef DEBUG
+  void FixedDoubleArrayVerify();
+#endif
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+};
+
+
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
-//   [0]: point to a fixed array with (value, detail) pairs.
-//   [1]: next enumeration index (Smi), or pointer to small fixed array:
+// TODO(1399): It should be possible to make room for bit_field3 in the map
+//             without overloading the instance descriptors field in the map
+//             (and storing it in the DescriptorArray when the map has one).
+//   [0]: storage for bit_field3 for Map owning this object (Smi)
+//   [1]: point to a fixed array with (value, detail) pairs.
+//   [2]: next enumeration index (Smi), or pointer to small fixed array:
 //          [0]: next enumeration index (Smi)
 //          [1]: pointer to fixed array with enum cache
-//   [2]: first key
+//   [3]: first key
 //   [length() - 1]: last key
 //
 class DescriptorArray: public FixedArray {
  public:
-  // Is this the singleton empty_descriptor_array?
+  // Returns true for both shared empty_descriptor_array and for smis, which the
+  // map uses to encode additional bit fields when the descriptor array is not
+  // yet used.
   inline bool IsEmpty();
 
   // Returns the number of descriptors in the array.
@@ -2075,6 +2323,12 @@
     return bridge->get(kEnumCacheBridgeCacheIndex);
   }
 
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  //             without overloading the instance descriptors field in the map
+  //             (and storing it in the DescriptorArray when the map has one).
+  inline int bit_field3_storage();
+  inline void set_bit_field3_storage(int value);
+
   // Initialize or change the enum cache,
   // using the supplied storage for the small "bridge".
   void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
@@ -2153,9 +2407,10 @@
   // Constant for denoting key was not found.
   static const int kNotFound = -1;
 
-  static const int kContentArrayIndex = 0;
-  static const int kEnumerationIndexIndex = 1;
-  static const int kFirstIndex = 2;
+  static const int kBitField3StorageIndex = 0;
+  static const int kContentArrayIndex = 1;
+  static const int kEnumerationIndexIndex = 2;
+  static const int kFirstIndex = 3;
 
   // The length of the "bridge" to the enum cache.
   static const int kEnumCacheBridgeLength = 2;
@@ -2163,7 +2418,8 @@
   static const int kEnumCacheBridgeCacheIndex = 1;
 
   // Layout description.
-  static const int kContentArrayOffset = FixedArray::kHeaderSize;
+  static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
+  static const int kContentArrayOffset = kBitField3StorageOffset + kPointerSize;
   static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
   static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
 
@@ -2256,9 +2512,42 @@
 // beginning of the backing storage that can be used for non-element
 // information by subclasses.
 
+template<typename Key>
+class BaseShape {
+ public:
+  static const bool UsesSeed = false;
+  static uint32_t Hash(Key key) { return 0; }
+  static uint32_t SeededHash(Key key, uint32_t seed) {
+    ASSERT(UsesSeed);
+    return Hash(key);
+  }
+  static uint32_t HashForObject(Key key, Object* object) { return 0; }
+  static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
+    // Won't be called if UsesSeed isn't overridden by child class.
+    return HashForObject(key, object);
+  }
+};
+
 template<typename Shape, typename Key>
 class HashTable: public FixedArray {
  public:
+  // Wrapper methods
+  inline uint32_t Hash(Key key) {
+    if (Shape::UsesSeed) {
+      return Shape::SeededHash(key, GetHeap()->HashSeed());
+    } else {
+      return Shape::Hash(key);
+    }
+  }
+
+  inline uint32_t HashForObject(Key key, Object* object) {
+    if (Shape::UsesSeed) {
+      return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
+    } else {
+      return Shape::HashForObject(key, object);
+    }
+  }
+
   // Returns the number of elements in the hash table.
   int NumberOfElements() {
     return Smi::cast(get(kNumberOfElementsIndex))->value();
@@ -2294,6 +2583,10 @@
       int at_least_space_for,
       PretenureFlag pretenure = NOT_TENURED);
 
+  // Computes the required capacity for a table holding the given
+  // number of elements. May be more than HashTable::kMaxCapacity.
+  static int ComputeCapacity(int at_least_space_for);
+
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
 
@@ -2341,7 +2634,6 @@
   int FindEntry(Isolate* isolate, Key key);
 
  protected:
-
   // Find the entry at which to insert element with the given key that
   // has the given hash value.
   uint32_t FindInsertionEntry(uint32_t hash);
@@ -2386,12 +2678,17 @@
     return (last + number) & (size - 1);
   }
 
+  // Rehashes this hash-table into the new table.
+  MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
+
+  // Attempt to shrink hash table after removal of key.
+  MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
   // Ensure enough space for n additional elements.
   MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
 };
 
 
-
 // HashTableKey is an abstract superclass for virtual key behavior.
 class HashTableKey {
  public:
@@ -2408,7 +2705,8 @@
   virtual ~HashTableKey() {}
 };
 
-class SymbolTableShape {
+
+class SymbolTableShape : public BaseShape<HashTableKey*> {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -2427,6 +2725,8 @@
   static const int kEntrySize = 1;
 };
 
+class SeqAsciiString;
+
 // SymbolTable.
 //
 // No special elements in the prefix and the element size is 1
@@ -2440,6 +2740,11 @@
   MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
                                                  Object** s);
+  MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
+      Handle<SeqAsciiString> str,
+      int from,
+      int length,
+      Object** s);
   MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
                                                    Object** s);
   MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
@@ -2460,7 +2765,7 @@
 };
 
 
-class MapCacheShape {
+class MapCacheShape : public BaseShape<HashTableKey*> {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -2501,7 +2806,6 @@
 template <typename Shape, typename Key>
 class Dictionary: public HashTable<Shape, Key> {
  public:
-
   static inline Dictionary<Shape, Key>* cast(Object* obj) {
     return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
   }
@@ -2544,6 +2848,9 @@
   // Delete a property from the dictionary.
   Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
 
+  // Attempt to shrink the dictionary after deletion of key.
+  MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
   // Returns the number of elements in the dictionary filtering out properties
   // with the specified attributes.
   int NumberOfElementsFilterAttributes(PropertyAttributes filter);
@@ -2551,10 +2858,13 @@
   // Returns the number of enumerable elements in the dictionary.
   int NumberOfEnumElements();
 
+  enum SortMode { UNSORTED, SORTED };
   // Copies keys to preallocated fixed array.
-  void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
+  void CopyKeysTo(FixedArray* storage,
+                  PropertyAttributes filter,
+                  SortMode sort_mode);
   // Fill in details for properties into storage.
-  void CopyKeysTo(FixedArray* storage, int index);
+  void CopyKeysTo(FixedArray* storage, int index, SortMode sort_mode);
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
@@ -2611,7 +2921,7 @@
 };
 
 
-class StringDictionaryShape {
+class StringDictionaryShape : public BaseShape<String*> {
  public:
   static inline bool IsMatch(String* key, Object* other);
   static inline uint32_t Hash(String* key);
@@ -2644,23 +2954,42 @@
 };
 
 
-class NumberDictionaryShape {
+class NumberDictionaryShape : public BaseShape<uint32_t> {
  public:
   static inline bool IsMatch(uint32_t key, Object* other);
-  static inline uint32_t Hash(uint32_t key);
-  static inline uint32_t HashForObject(uint32_t key, Object* object);
   MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
-  static const int kPrefixSize = 2;
   static const int kEntrySize = 3;
   static const bool kIsEnumerable = false;
 };
 
 
-class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
+class SeededNumberDictionaryShape : public NumberDictionaryShape {
  public:
-  static NumberDictionary* cast(Object* obj) {
+  static const bool UsesSeed = true;
+  static const int kPrefixSize = 2;
+
+  static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
+  static inline uint32_t SeededHashForObject(uint32_t key,
+                                             uint32_t seed,
+                                             Object* object);
+};
+
+
+class UnseededNumberDictionaryShape : public NumberDictionaryShape {
+ public:
+  static const int kPrefixSize = 0;
+
+  static inline uint32_t Hash(uint32_t key);
+  static inline uint32_t HashForObject(uint32_t key, Object* object);
+};
+
+
+class SeededNumberDictionary
+    : public Dictionary<SeededNumberDictionaryShape, uint32_t> {
+ public:
+  static SeededNumberDictionary* cast(Object* obj) {
     ASSERT(obj->IsDictionary());
-    return reinterpret_cast<NumberDictionary*>(obj);
+    return reinterpret_cast<SeededNumberDictionary*>(obj);
   }
 
   // Type specific at put (default NONE attributes is used when adding).
@@ -2699,6 +3028,65 @@
 };
 
 
+class UnseededNumberDictionary
+    : public Dictionary<UnseededNumberDictionaryShape, uint32_t> {
+ public:
+  static UnseededNumberDictionary* cast(Object* obj) {
+    ASSERT(obj->IsDictionary());
+    return reinterpret_cast<UnseededNumberDictionary*>(obj);
+  }
+
+  // Type specific at put (default NONE attributes is used when adding).
+  MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
+  MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
+
+  // Set an existing entry or add a new one if needed.
+  MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
+};
+
+
+class ObjectHashTableShape : public BaseShape<Object*> {
+ public:
+  static inline bool IsMatch(JSObject* key, Object* other);
+  static inline uint32_t Hash(JSObject* key);
+  static inline uint32_t HashForObject(JSObject* key, Object* object);
+  MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 2;
+};
+
+
+// ObjectHashTable maps keys that are JavaScript objects to object values by
+// using the identity hash of the key for hashing purposes.
+class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+ public:
+  static inline ObjectHashTable* cast(Object* obj) {
+    ASSERT(obj->IsHashTable());
+    return reinterpret_cast<ObjectHashTable*>(obj);
+  }
+
+  // Looks up the value associated with the given key. The undefined value is
+  // returned in case the key is not present.
+  Object* Lookup(JSObject* key);
+
+  // Adds (or overwrites) the value associated with the given key. Mapping a
+  // key to the undefined value causes removal of the whole entry.
+  MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+
+ private:
+  friend class MarkCompactCollector;
+
+  void AddEntry(int entry, JSObject* key, Object* value);
+  void RemoveEntry(int entry, Heap* heap);
+  inline void RemoveEntry(int entry);
+
+  // Returns the index to the value of an entry.
+  static inline int EntryToValueIndex(int entry) {
+    return EntryToIndex(entry) + 1;
+  }
+};
+
+
 // JSFunctionResultCache caches results of some JSFunction invocation.
 // It is a fixed array with fixed structure:
 //   [0]: factory function
@@ -2755,23 +3143,14 @@
 #ifdef DEBUG
   void NormalizedMapCacheVerify();
 #endif
-
- private:
-  static int Hash(Map* fast);
-
-  static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
 };
 
 
 // ByteArray represents fixed sized byte arrays.  Used by the outside world,
 // such as PCRE, and also by the memory allocator and garbage collector to
 // fill in free blocks in the heap.
-class ByteArray: public HeapObject {
+class ByteArray: public FixedArrayBase {
  public:
-  // [length]: length of the array.
-  inline int length();
-  inline void set_length(int value);
-
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
@@ -2816,10 +3195,6 @@
 #endif
 
   // Layout description.
-  // Length is smi tagged when it is stored.
-  static const int kLengthOffset = HeapObject::kHeaderSize;
-  static const int kHeaderSize = kLengthOffset + kPointerSize;
-
   static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
 
   // Maximal memory consumption for a single ByteArray.
@@ -2843,11 +3218,9 @@
 // Out-of-range values passed to the setter are converted via a C
 // cast, not clamping. Out-of-range indices cause exceptions to be
 // raised rather than being silently ignored.
-class ExternalArray: public HeapObject {
+class ExternalArray: public FixedArrayBase {
  public:
-  // [length]: length of the array.
-  inline int length();
-  inline void set_length(int value);
+  inline bool is_the_hole(int index) { return false; }
 
   // [external_pointer]: The pointer to the external memory area backing this
   // external array.
@@ -2860,9 +3233,8 @@
   static const int kMaxLength = 0x3fffffff;
 
   // ExternalArray headers are not quadword aligned.
-  static const int kLengthOffset = HeapObject::kHeaderSize;
   static const int kExternalPointerOffset =
-      POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
+      POINTER_SIZE_ALIGN(FixedArrayBase::kLengthOffset + kPointerSize);
   static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
   static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
 
@@ -2884,7 +3256,8 @@
   inline uint8_t* external_pixel_pointer();
 
   // Setter and getter.
-  inline uint8_t get(int index);
+  inline uint8_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber and
@@ -2912,7 +3285,8 @@
 class ExternalByteArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline int8_t get(int index);
+  inline int8_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -2940,7 +3314,8 @@
 class ExternalUnsignedByteArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline uint8_t get(int index);
+  inline uint8_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -2968,7 +3343,8 @@
 class ExternalShortArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline int16_t get(int index);
+  inline int16_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -2996,7 +3372,8 @@
 class ExternalUnsignedShortArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline uint16_t get(int index);
+  inline uint16_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -3024,7 +3401,8 @@
 class ExternalIntArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline int32_t get(int index);
+  inline int32_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -3052,7 +3430,8 @@
 class ExternalUnsignedIntArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline uint32_t get(int index);
+  inline uint32_t get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -3080,7 +3459,8 @@
 class ExternalFloatArray: public ExternalArray {
  public:
   // Setter and getter.
-  inline float get(int index);
+  inline float get_scalar(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, float value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
@@ -3105,6 +3485,35 @@
 };
 
 
+class ExternalDoubleArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline double get_scalar(int index);
+  inline MaybeObject* get(int index);
+  inline void set(int index, double value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  MaybeObject* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalDoubleArray* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void ExternalDoubleArrayPrint() {
+    ExternalDoubleArrayPrint(stdout);
+  }
+  void ExternalDoubleArrayPrint(FILE* out);
+#endif  // OBJECT_PRINT
+#ifdef DEBUG
+  void ExternalDoubleArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
+};
+
+
 // DeoptimizationInputData is a fixed array used to hold the deoptimization
 // data for code generated by the Hydrogen/Lithium compiler.  It also
 // contains information about functions that were inlined.  If N different
@@ -3126,7 +3535,8 @@
   static const int kAstIdOffset = 0;
   static const int kTranslationIndexOffset = 1;
   static const int kArgumentsStackHeightOffset = 2;
-  static const int kDeoptEntrySize = 3;
+  static const int kPcOffset = 3;
+  static const int kDeoptEntrySize = 4;
 
   // Simple element accessors.
 #define DEFINE_ELEMENT_ACCESSORS(name, type)      \
@@ -3162,6 +3572,7 @@
   DEFINE_ENTRY_ACCESSORS(AstId, Smi)
   DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
   DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+  DEFINE_ENTRY_ACCESSORS(Pc, Smi)
 
 #undef DEFINE_ENTRY_ACCESSORS
 
@@ -3176,7 +3587,7 @@
   // Casting.
   static inline DeoptimizationInputData* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
+#ifdef ENABLE_DISASSEMBLER
   void DeoptimizationInputDataPrint(FILE* out);
 #endif
 
@@ -3215,7 +3626,7 @@
   // Casting.
   static inline DeoptimizationOutputData* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
   void DeoptimizationOutputDataPrint(FILE* out);
 #endif
 };
@@ -3243,21 +3654,21 @@
     BUILTIN,
     LOAD_IC,
     KEYED_LOAD_IC,
-    KEYED_EXTERNAL_ARRAY_LOAD_IC,
     CALL_IC,
     KEYED_CALL_IC,
     STORE_IC,
     KEYED_STORE_IC,
-    KEYED_EXTERNAL_ARRAY_STORE_IC,
-    TYPE_RECORDING_BINARY_OP_IC,
+    UNARY_OP_IC,
+    BINARY_OP_IC,
     COMPARE_IC,
+    TO_BOOLEAN_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
 
     // Pseudo-kinds.
     REGEXP = BUILTIN,
     FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = COMPARE_IC
+    LAST_IC_KIND = TO_BOOLEAN_IC
   };
 
   enum {
@@ -3291,6 +3702,12 @@
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
+  // [code_flushing_candidate]: Field only used during garbage
+  // collection to hold code flushing candidates. The contents of this
+  // field does not have to be traced during garbage collection since
+  // it is only used by the garbage collector itself.
+  DECL_ACCESSORS(next_code_flushing_candidate, Object)
+
   // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
   inline FixedArray* unchecked_deoptimization_data();
@@ -3305,7 +3722,6 @@
   inline Kind kind();
   inline InlineCacheState ic_state();  // Only valid for IC stubs.
   inline ExtraICState extra_ic_state();  // Only valid for IC stubs.
-  inline InLoopFlag ic_in_loop();  // Only valid for IC stubs.
   inline PropertyType type();  // Only valid for monomorphic IC stubs.
   inline int arguments_count();  // Only valid for call IC stubs.
 
@@ -3317,16 +3733,10 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
-  inline bool is_type_recording_binary_op_stub() {
-    return kind() == TYPE_RECORDING_BINARY_OP_IC;
-  }
+  inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; }
+  inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
   inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
-  inline bool is_external_array_load_stub() {
-    return kind() == KEYED_EXTERNAL_ARRAY_LOAD_IC;
-  }
-  inline bool is_external_array_store_stub() {
-    return kind() == KEYED_EXTERNAL_ARRAY_STORE_IC;
-  }
+  inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
 
   // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline int major_key();
@@ -3341,6 +3751,11 @@
   inline bool has_deoptimization_support();
   inline void set_has_deoptimization_support(bool value);
 
+  // [has_debug_break_slots]: For FUNCTION kind, tells if it has
+  // been compiled with debug break slots.
+  inline bool has_debug_break_slots();
+  inline void set_has_debug_break_slots(bool value);
+
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -3368,23 +3783,24 @@
   inline CheckType check_type();
   inline void set_check_type(CheckType value);
 
-  // [external array type]: For kind KEYED_EXTERNAL_ARRAY_LOAD_IC and
-  // KEYED_EXTERNAL_ARRAY_STORE_IC, identifies the type of external
-  // array that the code stub is specialized for.
-  inline ExternalArrayType external_array_type();
-  inline void set_external_array_type(ExternalArrayType value);
+  // [type-recording unary op type]: For kind UNARY_OP_IC.
+  inline byte unary_op_type();
+  inline void set_unary_op_type(byte value);
 
-  // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
-  inline byte type_recording_binary_op_type();
-  inline void set_type_recording_binary_op_type(byte value);
-  inline byte type_recording_binary_op_result_type();
-  inline void set_type_recording_binary_op_result_type(byte value);
+  // [type-recording binary op type]: For kind BINARY_OP_IC.
+  inline byte binary_op_type();
+  inline void set_binary_op_type(byte value);
+  inline byte binary_op_result_type();
+  inline void set_binary_op_result_type(byte value);
 
-  // [compare state]: For kind compare IC stubs, tells what state the
-  // stub is in.
+  // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
   inline byte compare_state();
   inline void set_compare_state(byte value);
 
+  // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
+  inline byte to_boolean_state();
+  inline void set_to_boolean_state(byte value);
+
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
@@ -3398,7 +3814,6 @@
   // Flags operations.
   static inline Flags ComputeFlags(
       Kind kind,
-      InLoopFlag in_loop = NOT_IN_LOOP,
       InlineCacheState ic_state = UNINITIALIZED,
       ExtraICState extra_ic_state = kNoExtraICState,
       PropertyType type = NORMAL,
@@ -3410,16 +3825,15 @@
       PropertyType type,
       ExtraICState extra_ic_state = kNoExtraICState,
       InlineCacheHolderFlag holder = OWN_MAP,
-      InLoopFlag in_loop = NOT_IN_LOOP,
       int argc = -1);
 
-  static inline Kind ExtractKindFromFlags(Flags flags);
   static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
-  static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
-  static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
   static inline PropertyType ExtractTypeFromFlags(Flags flags);
-  static inline int ExtractArgumentsCountFromFlags(Flags flags);
+  static inline Kind ExtractKindFromFlags(Flags flags);
   static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
+  static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
+  static inline int ExtractArgumentsCountFromFlags(Flags flags);
+
   static inline Flags RemoveTypeFromFlags(Flags flags);
 
   // Convert a target address into a code object.
@@ -3504,9 +3918,12 @@
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
   static const int kDeoptimizationDataOffset =
       kRelocationInfoOffset + kPointerSize;
-  static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
-  static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
+  static const int kNextCodeFlushingCandidateOffset =
+      kDeoptimizationDataOffset + kPointerSize;
+  static const int kFlagsOffset =
+      kNextCodeFlushingCandidateOffset + kPointerSize;
 
+  static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
   static const int kKindSpecificFlagsSize = 2 * kIntSize;
 
   static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
@@ -3522,38 +3939,37 @@
   static const int kOptimizableOffset = kKindSpecificFlagsOffset;
   static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
   static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
-  static const int kExternalArrayTypeOffset = kKindSpecificFlagsOffset;
 
-  static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+  static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
-  static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
+  static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+  static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
+
+  static const int kFullCodeFlags = kOptimizableOffset + 1;
+  class FullCodeFlagsHasDeoptimizationSupportField:
+      public BitField<bool, 0, 1> {};  // NOLINT
+  class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
-  static const int kAllowOSRAtLoopNestingLevelOffset =
-      kHasDeoptimizationSupportOffset + 1;
+
+  static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
 
   static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
   static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
 
-  // Flags layout.
-  static const int kFlagsICStateShift        = 0;
-  static const int kFlagsICInLoopShift       = 3;
-  static const int kFlagsTypeShift           = 4;
-  static const int kFlagsKindShift           = 8;
-  static const int kFlagsICHolderShift       = 12;
-  static const int kFlagsExtraICStateShift   = 13;
-  static const int kFlagsArgumentsCountShift = 15;
+  // Flags layout.  BitField<type, shift, size>.
+  class ICStateField: public BitField<InlineCacheState, 0, 3> {};
+  class TypeField: public BitField<PropertyType, 3, 4> {};
+  class KindField: public BitField<Kind, 7, 4> {};
+  class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
+  class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
 
-  static const int kFlagsICStateMask        = 0x00000007;  // 00000000111
-  static const int kFlagsICInLoopMask       = 0x00000008;  // 00000001000
-  static const int kFlagsTypeMask           = 0x000000F0;  // 00001110000
-  static const int kFlagsKindMask           = 0x00000F00;  // 11110000000
-  static const int kFlagsCacheInPrototypeMapMask = 0x00001000;
-  static const int kFlagsExtraICStateMask   = 0x00006000;
-  static const int kFlagsArgumentsCountMask = 0xFFFF8000;
+  // Signed field cannot be encoded using the BitField class.
+  static const int kArgumentsCountShift = 14;
+  static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
 
   static const int kFlagsNotUsedInLookup =
-      (kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
+      TypeField::kMask | CacheHolderField::kMask;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -3597,6 +4013,13 @@
   inline byte bit_field2();
   inline void set_bit_field2(byte value);
 
+  // Bit field 3.
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  // without overloading the instance descriptors field (and storing it in the
+  // DescriptorArray when the map has one).
+  inline int bit_field3();
+  inline void set_bit_field3(int value);
+
   // Tells whether the object in the prototype property will be used
   // for instances created from this function.  If the prototype
   // property is set to a value that is not a JSObject, the prototype
@@ -3665,31 +4088,37 @@
   inline void set_is_extensible(bool value);
   inline bool is_extensible();
 
+  inline void set_elements_kind(ElementsKind elements_kind) {
+    ASSERT(elements_kind < kElementsKindCount);
+    ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
+    set_bit_field2((bit_field2() & ~kElementsKindMask) |
+        (elements_kind << kElementsKindShift));
+    ASSERT(this->elements_kind() == elements_kind);
+  }
+
+  inline ElementsKind elements_kind() {
+    return static_cast<ElementsKind>(
+        (bit_field2() & kElementsKindMask) >> kElementsKindShift);
+  }
+
   // Tells whether the instance has fast elements.
   // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
-  inline void set_has_fast_elements(bool value) {
-    if (value) {
-      set_bit_field2(bit_field2() | (1 << kHasFastElements));
-    } else {
-      set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
-    }
-  }
-
   inline bool has_fast_elements() {
-    return ((1 << kHasFastElements) & bit_field2()) != 0;
+    return elements_kind() == FAST_ELEMENTS;
   }
 
-  // Tells whether an instance has pixel array elements.
-  inline void set_has_external_array_elements(bool value) {
-    if (value) {
-      set_bit_field2(bit_field2() | (1 << kHasExternalArrayElements));
-    } else {
-      set_bit_field2(bit_field2() & ~(1 << kHasExternalArrayElements));
-    }
+  inline bool has_fast_double_elements() {
+    return elements_kind() == FAST_DOUBLE_ELEMENTS;
   }
 
   inline bool has_external_array_elements() {
-    return ((1 << kHasExternalArrayElements) & bit_field2()) != 0;
+    ElementsKind kind(elements_kind());
+    return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+        kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+  }
+
+  inline bool has_dictionary_elements() {
+    return elements_kind() == DICTIONARY_ELEMENTS;
   }
 
   // Tells whether the map is attached to SharedFunctionInfo
@@ -3718,9 +4147,17 @@
 
   inline JSFunction* unchecked_constructor();
 
+  // Should only be called by the code that initializes map to set initial valid
+  // value of the instance descriptor member.
+  inline void init_instance_descriptors();
+
   // [instance descriptors]: describes the object.
   DECL_ACCESSORS(instance_descriptors, DescriptorArray)
 
+  // Sets the instance descriptor array for the map to be an empty descriptor
+  // array.
+  inline void clear_instance_descriptors();
+
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
@@ -3734,6 +4171,26 @@
   DECL_ACCESSORS(prototype_transitions, FixedArray)
   inline FixedArray* unchecked_prototype_transitions();
 
+  static const int kProtoTransitionHeaderSize = 1;
+  static const int kProtoTransitionNumberOfEntriesOffset = 0;
+  static const int kProtoTransitionElementsPerEntry = 2;
+  static const int kProtoTransitionPrototypeOffset = 0;
+  static const int kProtoTransitionMapOffset = 1;
+
+  inline int NumberOfProtoTransitions() {
+    FixedArray* cache = unchecked_prototype_transitions();
+    if (cache->length() == 0) return 0;
+    return
+        Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
+  }
+
+  inline void SetNumberOfProtoTransitions(int value) {
+    FixedArray* cache = unchecked_prototype_transitions();
+    ASSERT(cache->length() != 0);
+    cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
+                         Smi::FromInt(value));
+  }
+
   // Lookup in the map's instance descriptors and fill out the result
   // with the given holder if the name is found. The holder may be
   // NULL when this function is used from the compiler.
@@ -3750,20 +4207,25 @@
   // instance descriptors.
   MUST_USE_RESULT MaybeObject* CopyDropTransitions();
 
-  // Returns this map if it has the fast elements bit set, otherwise
+  // Returns this map if it already has elements that are fast, otherwise
   // returns a copy of the map, with all transitions dropped from the
-  // descriptors and the fast elements bit set.
+  // descriptors and the ElementsKind set to FAST_ELEMENTS.
   MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
 
-  // Returns this map if it has the fast elements bit cleared,
-  // otherwise returns a copy of the map, with all transitions dropped
-  // from the descriptors and the fast elements bit cleared.
+  // Returns this map if it already has fast elements that are doubles,
+  // otherwise returns a copy of the map, with all transitions dropped from the
+  // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
+  MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
+
+  // Returns this map if already has dictionary elements, otherwise returns a
+  // copy of the map, with all transitions dropped from the descriptors and the
+  // ElementsKind set to DICTIONARY_ELEMENTS.
   MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
 
   // Returns a new map with all transitions dropped from the descriptors and the
-  // external array elements bit set.
-  MUST_USE_RESULT MaybeObject* GetExternalArrayElementsMap(
-      ExternalArrayType array_type,
+  // ElementsKind set.
+  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+      ElementsKind elements_kind,
       bool safe_to_add_transition);
 
   // Returns the property index for name (only valid for FAST MODE).
@@ -3810,6 +4272,21 @@
   // following back pointers.
   void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
 
+  // Computes a hash value for this map, to be used in HashTables and such.
+  int Hash();
+
+  // Compares this map to another to see if they describe equivalent objects.
+  // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
+  // it had exactly zero inobject properties.
+  // The "shared" flags of both this map and |other| are ignored.
+  bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+
+  // Returns true if this map and |other| describe equivalent objects.
+  // The "shared" flags of both this map and |other| are ignored.
+  bool EquivalentTo(Map* other) {
+    return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
+  }
+
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void MapPrint() {
@@ -3846,9 +4323,19 @@
   static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
   static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
   static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
-  static const int kInstanceDescriptorsOffset =
+  // Storage for instance descriptors is overloaded to also contain additional
+  // map flags when unused (bit_field3). When the map has instance descriptors,
+  // the flags are transferred to the instance descriptor array and accessed
+  // through an extra indirection.
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  // without overloading the instance descriptors field, but the map is
+  // currently perfectly aligned to 32 bytes and extending it at all would
+  // double its size.  After the increment GC work lands, this size restriction
+  // could be loosened and bit_field3 moved directly back in the map.
+  static const int kInstanceDescriptorsOrBitField3Offset =
       kConstructorOffset + kPointerSize;
-  static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
+  static const int kCodeCacheOffset =
+      kInstanceDescriptorsOrBitField3Offset + kPointerSize;
   static const int kPrototypeTransitionsOffset =
       kCodeCacheOffset + kPointerSize;
   static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
@@ -3892,11 +4379,21 @@
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
   static const int kFunctionWithPrototype = 1;
-  static const int kHasFastElements = 2;
-  static const int kStringWrapperSafeForDefaultValueOf = 3;
-  static const int kAttachedToSharedFunctionInfo = 4;
-  static const int kIsShared = 5;
-  static const int kHasExternalArrayElements = 6;
+  static const int kStringWrapperSafeForDefaultValueOf = 2;
+  static const int kAttachedToSharedFunctionInfo = 3;
+  // No bits can be used after kElementsKindFirstBit, they are all reserved for
+  // storing ElementKind.  for anything other than storing the ElementKind.
+  static const int kElementsKindShift = 4;
+  static const int kElementsKindBitCount = 4;
+
+  // Derived values from bit field 2
+  static const int kElementsKindMask = (-1 << kElementsKindShift) &
+      ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
+  static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
+      (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+
+  // Bit positions for bit field 3
+  static const int kIsShared = 0;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
@@ -3913,7 +4410,7 @@
 
 
 // An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to me
+// It doesn't carry much functionality but allows struct classes to be
 // identified in the type system.
 class Struct: public HeapObject {
  public:
@@ -3961,7 +4458,7 @@
   DECL_ACCESSORS(context_data, Object)
 
   // [wrapper]: the wrapper cache.
-  DECL_ACCESSORS(wrapper, Proxy)
+  DECL_ACCESSORS(wrapper, Foreign)
 
   // [type]: the script type.
   DECL_ACCESSORS(type, Smi)
@@ -4192,9 +4689,7 @@
   // False if there are definitely no live objects created from this function.
   // True if live objects _may_ exist (existence not guaranteed).
   // May go back from true to false after GC.
-  inline bool live_objects_may_exist();
-
-  inline void set_live_objects_may_exist(bool value);
+  DECL_BOOLEAN_ACCESSORS(live_objects_may_exist)
 
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
@@ -4253,12 +4748,10 @@
   inline void set_end_position(int end_position);
 
   // Is this function a function expression in the source code.
-  inline bool is_expression();
-  inline void set_is_expression(bool value);
+  DECL_BOOLEAN_ACCESSORS(is_expression)
 
   // Is this function a top-level function (scripts, evals).
-  inline bool is_toplevel();
-  inline void set_is_toplevel(bool value);
+  DECL_BOOLEAN_ACCESSORS(is_toplevel)
 
   // Bit field containing various information collected by the compiler to
   // drive optimization.
@@ -4285,8 +4778,7 @@
   // Indicates if this function can be lazy compiled.
   // This is used to determine if we can safely flush code from a function
   // when doing GC if we expect that the function will no longer be used.
-  inline bool allows_lazy_compilation();
-  inline void set_allows_lazy_compilation(bool flag);
+  DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
 
   // Indicates how many full GCs this function has survived with assigned
   // code object. Used to determine when it is relatively safe to flush
@@ -4300,12 +4792,36 @@
   // shared function info. If a function is repeatedly optimized or if
   // we cannot optimize the function we disable optimization to avoid
   // spending time attempting to optimize it again.
-  inline bool optimization_disabled();
-  inline void set_optimization_disabled(bool value);
+  DECL_BOOLEAN_ACCESSORS(optimization_disabled)
 
   // Indicates whether the function is a strict mode function.
-  inline bool strict_mode();
-  inline void set_strict_mode(bool value);
+  DECL_BOOLEAN_ACCESSORS(strict_mode)
+
+  // False if the function definitely does not allocate an arguments object.
+  DECL_BOOLEAN_ACCESSORS(uses_arguments)
+
+  // True if the function has any duplicated parameter names.
+  DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
+
+  // Indicates whether the function is a native function.
+  // These needs special treatment in .call and .apply since
+  // null passed as the receiver should not be translated to the
+  // global object.
+  DECL_BOOLEAN_ACCESSORS(native)
+
+  // Indicates that the function was created by the Function function.
+  // Though it's anonymous, toString should treat it as if it had the name
+  // "anonymous".  We don't set the name itself so that the system does not
+  // see a binding for it.
+  DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+  // Indicates whether the function is a bound function created using
+  // the bind function.
+  DECL_BOOLEAN_ACCESSORS(bound)
+
+  // Indicates that the function is anonymous (the name field can be set
+  // through the API, which does not change this flag).
+  DECL_BOOLEAN_ACCESSORS(is_anonymous)
 
   // Indicates whether or not the code in the shared function support
   // deoptimization.
@@ -4314,6 +4830,11 @@
   // Enable deoptimization support through recompiled code.
   void EnableDeoptimizationSupport(Code* recompiled);
 
+  // Disable (further) attempted optimization of all functions sharing this
+  // shared function info.  The function is the one we actually tried to
+  // optimize.
+  void DisableOptimization(JSFunction* function);
+
   // Lookup the bailout ID and ASSERT that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
   // disabled).
@@ -4480,13 +5001,24 @@
   static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
 
   // Bit positions in compiler_hints.
-  static const int kHasOnlySimpleThisPropertyAssignments = 0;
-  static const int kAllowLazyCompilation = 1;
-  static const int kLiveObjectsMayExist = 2;
-  static const int kCodeAgeShift = 3;
-  static const int kCodeAgeMask = 0x7;
-  static const int kOptimizationDisabled = 6;
-  static const int kStrictModeFunction = 7;
+  static const int kCodeAgeSize = 3;
+  static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
+
+  enum CompilerHints {
+    kHasOnlySimpleThisPropertyAssignments,
+    kAllowLazyCompilation,
+    kLiveObjectsMayExist,
+    kCodeAgeShift,
+    kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+    kStrictModeFunction,
+    kUsesArguments,
+    kHasDuplicateParameters,
+    kNative,
+    kBoundFunction,
+    kIsAnonymous,
+    kNameShouldPrintAsAnonymous,
+    kCompilerHintsCount  // Pseudo entry
+  };
 
  private:
 #if V8_HOST_ARCH_32_BIT
@@ -4499,19 +5031,31 @@
   static const int kCompilerHintsSize = kIntSize;
 #endif
 
+  STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount <=
+                SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
+
  public:
-  // Constants for optimizing codegen for strict mode function tests.
+  // Constants for optimizing codegen for strict mode function and
+  // native tests.
   // Allows to use byte-widgh instructions.
   static const int kStrictModeBitWithinByte =
       (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
+  static const int kNativeBitWithinByte =
+      (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
-    (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+      (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+  static const int kNativeByteOffset = kCompilerHintsOffset +
+      (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
 #elif __BYTE_ORDER == __BIG_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
-    (kCompilerHintsSize - 1) -
-    ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+      (kCompilerHintsSize - 1) -
+      ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+  static const int kNativeByteOffset = kCompilerHintsOffset +
+      (kCompilerHintsSize - 1) -
+      ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
 #else
 #error Unknown byte ordering
 #endif
@@ -4527,7 +5071,7 @@
   // [prototype_or_initial_map]:
   DECL_ACCESSORS(prototype_or_initial_map, Object)
 
-  // [shared_function_info]: The information about the function that
+  // [shared]: The information about the function that
   // can be shared by instances.
   DECL_ACCESSORS(shared, SharedFunctionInfo)
 
@@ -4568,9 +5112,6 @@
   // recompilation.
   inline bool IsMarkedForLazyRecompilation();
 
-  // Compute a hash code for the source code of this function.
-  uint32_t SourceHash();
-
   // Check whether or not this function is inlineable.
   bool IsInlineable();
 
@@ -4670,6 +5211,7 @@
   // Layout of the literals array.
   static const int kLiteralsPrefixSize = 1;
   static const int kLiteralGlobalContextIndex = 0;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
 };
@@ -4685,7 +5227,7 @@
 
 class JSGlobalProxy : public JSObject {
  public:
-  // [context]: the owner global context of this proxy object.
+  // [context]: the owner global context of this global proxy object.
   // It is null value if this object is not used by any context.
   DECL_ACCESSORS(context, Object)
 
@@ -4708,7 +5250,6 @@
   static const int kSize = kContextOffset + kPointerSize;
 
  private:
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
 };
 
@@ -4764,7 +5305,6 @@
 // JavaScript global object.
 class JSGlobalObject: public GlobalObject {
  public:
-
   // Casting.
   static inline JSGlobalObject* cast(Object* obj);
 
@@ -4936,8 +5476,10 @@
 // If it is an atom regexp
 // - a reference to a literal string to search for
 // If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled).
-// - a reference to code for UC16 inputs (bytecode or compiled).
+// - a reference to code for ASCII inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for code flushing).
+// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for code flushing)..
 // - max number of registers used by irregexp implementations.
 // - number of capture registers (output values) of the regexp.
 class JSRegExp: public JSObject {
@@ -4970,6 +5512,12 @@
   inline Object* DataAt(int index);
   // Set implementation data after the object has been prepared.
   inline void SetDataAt(int index, Object* value);
+
+  // Used during GC when flushing code or setting age.
+  inline Object* DataAtUnchecked(int index);
+  inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
+  inline Type TypeTagUnchecked();
+
   static int code_index(bool is_ascii) {
     if (is_ascii) {
       return kIrregexpASCIICodeIndex;
@@ -4978,6 +5526,14 @@
     }
   }
 
+  static int saved_code_index(bool is_ascii) {
+    if (is_ascii) {
+      return kIrregexpASCIICodeSavedIndex;
+    } else {
+      return kIrregexpUC16CodeSavedIndex;
+    }
+  }
+
   static inline JSRegExp* cast(Object* obj);
 
   // Dispatched behavior.
@@ -5008,11 +5564,19 @@
   // fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
   static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+
+  // Saved instance of Irregexp compiled code or bytecode for ASCII that
+  // is a potential candidate for flushing.
+  static const int kIrregexpASCIICodeSavedIndex = kDataIndex + 2;
+  // Saved instance of Irregexp compiled code or bytecode for UC16 that is
+  // a potential candidate for flushing.
+  static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
+
   // Maximal number of registers used by either ASCII or UC16.
   // Only used to check that there is enough stack space
-  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
   // Number of captures in the compiled regexp.
-  static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+  static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
 
   static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
 
@@ -5033,10 +5597,22 @@
   static const int kMultilineFieldIndex = 3;
   static const int kLastIndexFieldIndex = 4;
   static const int kInObjectFieldCount = 5;
+
+  // The uninitialized value for a regexp code object.
+  static const int kUninitializedValue = -1;
+
+  // The compilation error value for the regexp code object. The real error
+  // object is in the saved code field.
+  static const int kCompilationErrorValue = -2;
+
+  // When we store the sweep generation at which we moved the code from the
+  // code index to the saved code index we mask it of to be in the [0:255]
+  // range.
+  static const int kCodeAgeMask = 0xff;
 };
 
 
-class CompilationCacheShape {
+class CompilationCacheShape : public BaseShape<HashTableKey*> {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -5136,7 +5712,7 @@
 };
 
 
-class CodeCacheHashTableShape {
+class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -5178,13 +5754,56 @@
 };
 
 
+class PolymorphicCodeCache: public Struct {
+ public:
+  DECL_ACCESSORS(cache, Object)
+
+  MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+                                      Code::Flags flags,
+                                      Code* code);
+  Object* Lookup(MapList* maps, Code::Flags flags);
+
+  static inline PolymorphicCodeCache* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void PolymorphicCodeCachePrint() {
+    PolymorphicCodeCachePrint(stdout);
+  }
+  void PolymorphicCodeCachePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void PolymorphicCodeCacheVerify();
+#endif
+
+  static const int kCacheOffset = HeapObject::kHeaderSize;
+  static const int kSize = kCacheOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
+};
+
+
+class PolymorphicCodeCacheHashTable
+    : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
+ public:
+  Object* Lookup(MapList* maps, int code_kind);
+  MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+
+  static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
+
+  static const int kInitialSize = 64;
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
+};
+
+
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
 
 class StringHasher {
  public:
-  explicit inline StringHasher(int length);
+  explicit inline StringHasher(int length, uint32_t seed);
 
   // Returns true if the hash of this string can be computed without
   // looking at the contents.
@@ -5215,8 +5834,12 @@
   // value is represented decimal value.
   static uint32_t MakeArrayIndexHash(uint32_t value, int length);
 
- private:
+  // No string is allowed to have a hash of zero.  That value is reserved
+  // for internal properties.  If the hash calculation yields zero then we
+  // use 27 instead.
+  static const int kZeroHash = 27;
 
+ private:
   uint32_t array_index() {
     ASSERT(is_array_index());
     return array_index_;
@@ -5236,7 +5859,9 @@
 
 // Calculates string hash.
 template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars, int length);
+inline uint32_t HashSequentialString(const schar* chars,
+                                     int length,
+                                     uint32_t seed);
 
 
 // The characteristics of a string are stored in its map.  Retrieving these
@@ -5258,12 +5883,15 @@
   inline bool IsSequential();
   inline bool IsExternal();
   inline bool IsCons();
+  inline bool IsSliced();
+  inline bool IsIndirect();
   inline bool IsExternalAscii();
   inline bool IsExternalTwoByte();
   inline bool IsSequentialAscii();
   inline bool IsSequentialTwoByte();
   inline bool IsSymbol();
   inline StringRepresentationTag representation_tag();
+  inline uint32_t encoding_tag();
   inline uint32_t full_representation_tag();
   inline uint32_t size_tag();
 #ifdef DEBUG
@@ -5273,6 +5901,7 @@
 #else
   inline void invalidate() { }
 #endif
+
  private:
   uint32_t type_;
 #ifdef DEBUG
@@ -5294,6 +5923,51 @@
 // All string values have a length field.
 class String: public HeapObject {
  public:
+  // Representation of the flat content of a String.
+  // A non-flat string doesn't have flat content.
+  // A flat string has content that's encoded as a sequence of either
+  // ASCII chars or two-byte UC16.
+  // Returned by String::GetFlatContent().
+  class FlatContent {
+   public:
+    // Returns true if the string is flat and this structure contains content.
+    bool IsFlat() { return state_ != NON_FLAT; }
+    // Returns true if the structure contains ASCII content.
+    bool IsAscii() { return state_ == ASCII; }
+    // Returns true if the structure contains two-byte content.
+    bool IsTwoByte() { return state_ == TWO_BYTE; }
+
+    // Return the ASCII content of the string. Only use if IsAscii() returns
+    // true.
+    Vector<const char> ToAsciiVector() {
+      ASSERT_EQ(ASCII, state_);
+      return Vector<const char>::cast(buffer_);
+    }
+    // Return the two-byte content of the string. Only use if IsTwoByte()
+    // returns true.
+    Vector<const uc16> ToUC16Vector() {
+      ASSERT_EQ(TWO_BYTE, state_);
+      return Vector<const uc16>::cast(buffer_);
+    }
+
+   private:
+    enum State { NON_FLAT, ASCII, TWO_BYTE };
+
+    // Constructors only used by String::GetFlatContent().
+    explicit FlatContent(Vector<const char> chars)
+        : buffer_(Vector<const byte>::cast(chars)),
+          state_(ASCII) { }
+    explicit FlatContent(Vector<const uc16> chars)
+        : buffer_(Vector<const byte>::cast(chars)),
+          state_(TWO_BYTE) { }
+    FlatContent() : buffer_(), state_(NON_FLAT) { }
+
+    Vector<const byte> buffer_;
+    State state_;
+
+    friend class String;
+  };
+
   // Get and set the length of the string.
   inline int length();
   inline void set_length(int value);
@@ -5302,14 +5976,19 @@
   inline uint32_t hash_field();
   inline void set_hash_field(uint32_t value);
 
+  // Returns whether this string has only ASCII chars, i.e. all of them can
+  // be ASCII encoded.  This might be the case even if the string is
+  // two-byte.  Such strings may appear when the embedder prefers
+  // two-byte external representations even for ASCII data.
   inline bool IsAsciiRepresentation();
   inline bool IsTwoByteRepresentation();
 
-  // Returns whether this string has ascii chars, i.e. all of them can
-  // be ascii encoded.  This might be the case even if the string is
-  // two-byte.  Such strings may appear when the embedder prefers
-  // two-byte external representations even for ascii data.
-  //
+  // Cons and slices have an encoding flag that may not represent the actual
+  // encoding of the underlying string.  This is taken into account here.
+  // Requires: this->IsFlat()
+  inline bool IsAsciiRepresentationUnderneath();
+  inline bool IsTwoByteRepresentationUnderneath();
+
   // NOTE: this should be considered only a hint.  False negatives are
   // possible.
   inline bool HasOnlyAsciiChars();
@@ -5342,8 +6021,16 @@
   // string.
   inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
 
-  Vector<const char> ToAsciiVector();
-  Vector<const uc16> ToUC16Vector();
+  // Tries to return the content of a flat string as a structure holding either
+  // a flat vector of char or of uc16.
+  // If the string isn't flat, and therefore doesn't have flat content, the
+  // returned structure will report so, and can't provide a vector of either
+  // kind.
+  FlatContent GetFlatContent();
+
+  // Returns the parent of a sliced string or first part of a flat cons string.
+  // Requires: StringShape(this).IsIndirect() && this->IsFlat()
+  inline String* GetUnderlying();
 
   // Mark the string as an undetectable object. It only applies to
   // ascii and two byte string types.
@@ -5368,12 +6055,12 @@
   // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust  This means it
   // handles unexpected data without causing assert failures and it does not
   // do any heap allocations.  This is useful when printing stack traces.
-  SmartPointer<char> ToCString(AllowNullsFlag allow_nulls,
-                               RobustnessFlag robustness_flag,
-                               int offset,
-                               int length,
-                               int* length_output = 0);
-  SmartPointer<char> ToCString(
+  SmartArrayPointer<char> ToCString(AllowNullsFlag allow_nulls,
+                                    RobustnessFlag robustness_flag,
+                                    int offset,
+                                    int length,
+                                    int* length_output = 0);
+  SmartArrayPointer<char> ToCString(
       AllowNullsFlag allow_nulls = DISALLOW_NULLS,
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
       int* length_output = 0);
@@ -5386,7 +6073,7 @@
   // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust  This means it
   // handles unexpected data without causing assert failures and it does not
   // do any heap allocations.  This is useful when printing stack traces.
-  SmartPointer<uc16> ToWideCString(
+  SmartArrayPointer<uc16> ToWideCString(
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
 
   // Tells whether the hash code has been computed.
@@ -5396,7 +6083,8 @@
   inline uint32_t Hash();
 
   static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
-                                   int length);
+                                   int length,
+                                   uint32_t seed);
 
   static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
                                 uint32_t* index,
@@ -5424,6 +6112,8 @@
     StringPrint(stdout);
   }
   void StringPrint(FILE* out);
+
+  char* ToAsciiArray();
 #endif
 #ifdef DEBUG
   void StringVerify();
@@ -5459,6 +6149,10 @@
   // Shift constant retrieving hash code from hash field.
   static const int kHashShift = kNofHashBitFields;
 
+  // Only these bits are relevant in the hash, since the top two are shifted
+  // out.
+  static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+
   // Array index strings this short can keep their index in the hash
   // field.
   static const int kMaxCachedArrayIndexLength = 7;
@@ -5610,7 +6304,6 @@
 // The SeqString abstract class captures sequential string values.
 class SeqString: public String {
  public:
-
   // Casting.
   static inline SeqString* cast(Object* obj);
 
@@ -5772,11 +6465,68 @@
   typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
           BodyDescriptor;
 
+#ifdef DEBUG
+  void ConsStringVerify();
+#endif
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
 };
 
 
+// The Sliced String class describes strings that are substrings of another
+// sequential string.  The motivation is to save time and memory when creating
+// a substring.  A Sliced String is described as a pointer to the parent,
+// the offset from the start of the parent string and the length.  Using
+// a Sliced String therefore requires unpacking of the parent string and
+// adding the offset to the start address.  A substring of a Sliced String
+// are not nested since the double indirection is simplified when creating
+// such a substring.
+// Currently missing features are:
+//  - handling externalized parent strings
+//  - external strings as parent
+//  - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
+class SlicedString: public String {
+ public:
+  inline String* parent();
+  inline void set_parent(String* parent);
+  inline int offset();
+  inline void set_offset(int offset);
+
+  // Dispatched behavior.
+  uint16_t SlicedStringGet(int index);
+
+  // Casting.
+  static inline SlicedString* cast(Object* obj);
+
+  // Layout description.
+  static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
+  static const int kOffsetOffset = kParentOffset + kPointerSize;
+  static const int kSize = kOffsetOffset + kPointerSize;
+
+  // Support for StringInputBuffer
+  inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
+                                                    unsigned* offset_ptr,
+                                                    unsigned chars);
+  inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                              unsigned* offset_ptr,
+                                              unsigned chars);
+  // Minimum length for a sliced string.
+  static const int kMinLength = 13;
+
+  typedef FixedBodyDescriptor<kParentOffset,
+                              kOffsetOffset + kPointerSize, kSize>
+          BodyDescriptor;
+
+#ifdef DEBUG
+  void SlicedStringVerify();
+#endif
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+};
+
+
 // The ExternalString class describes string values that are backed by
 // a string resource that lies outside the V8 heap.  ExternalStrings
 // consist of the length field common to all strings, a pointer to the
@@ -5889,8 +6639,8 @@
 
   static void PostGarbageCollectionProcessing();
   static int ArchiveSpacePerThread();
-  static char* ArchiveState(char* to);
-  static char* RestoreState(char* from);
+  static char* ArchiveState(Isolate* isolate, char* to);
+  static char* RestoreState(Isolate* isolate, char* from);
   static void Iterate(ObjectVisitor* v);
   static void Iterate(ObjectVisitor* v, Relocatable* top);
   static char* Iterate(ObjectVisitor* v, char* t);
@@ -6044,44 +6794,180 @@
 };
 
 
-
-// Proxy describes objects pointing from JavaScript to C structures.
-// Since they cannot contain references to JS HeapObjects they can be
-// placed in old_data_space.
-class Proxy: public HeapObject {
+// The JSProxy describes EcmaScript Harmony proxies
+class JSProxy: public JSReceiver {
  public:
-  // [proxy]: field containing the address.
-  inline Address proxy();
-  inline void set_proxy(Address value);
+  // [handler]: The handler property.
+  DECL_ACCESSORS(handler, Object)
 
   // Casting.
-  static inline Proxy* cast(Object* obj);
+  static inline JSProxy* cast(Object* obj);
+
+  bool HasPropertyWithHandler(String* name);
+
+  MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode);
+
+  MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
+      String* name,
+      DeleteMode mode);
+
+  MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
+      JSReceiver* receiver,
+      String* name,
+      bool* has_exception);
+
+  // Turn this into an (empty) JSObject.
+  void Fix();
+
+  // Initializes the body after the handler slot.
+  inline void InitializeBody(int object_size, Object* value);
 
   // Dispatched behavior.
-  inline void ProxyIterateBody(ObjectVisitor* v);
-
-  template<typename StaticVisitor>
-  inline void ProxyIterateBody();
-
 #ifdef OBJECT_PRINT
-  inline void ProxyPrint() {
-    ProxyPrint(stdout);
+  inline void JSProxyPrint() {
+    JSProxyPrint(stdout);
   }
-  void ProxyPrint(FILE* out);
+  void JSProxyPrint(FILE* out);
 #endif
 #ifdef DEBUG
-  void ProxyVerify();
+  void JSProxyVerify();
+#endif
+
+  // Layout description. We add padding so that a proxy has the same
+  // size as a virgin JSObject. This is essential for becoming a JSObject
+  // upon freeze.
+  static const int kHandlerOffset = HeapObject::kHeaderSize;
+  static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+  static const int kSize = JSObject::kHeaderSize;
+  static const int kHeaderSize = kPaddingOffset;
+  static const int kPaddingSize = kSize - kPaddingOffset;
+
+  STATIC_CHECK(kPaddingSize >= 0);
+
+  typedef FixedBodyDescriptor<kHandlerOffset,
+                              kHandlerOffset + kPointerSize,
+                              kSize> BodyDescriptor;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+};
+
+
+class JSFunctionProxy: public JSProxy {
+ public:
+  // [call_trap]: The call trap.
+  DECL_ACCESSORS(call_trap, Object)
+
+  // [construct_trap]: The construct trap.
+  DECL_ACCESSORS(construct_trap, Object)
+
+  // Casting.
+  static inline JSFunctionProxy* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSFunctionProxyPrint() {
+    JSFunctionProxyPrint(stdout);
+  }
+  void JSFunctionProxyPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSFunctionProxyVerify();
+#endif
+
+  // Layout description.
+  static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
+  static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
+  static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
+  static const int kSize = JSFunction::kSize;
+  static const int kPaddingSize = kSize - kPaddingOffset;
+
+  STATIC_CHECK(kPaddingSize >= 0);
+
+  typedef FixedBodyDescriptor<kHandlerOffset,
+                              kConstructTrapOffset + kPointerSize,
+                              kSize> BodyDescriptor;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
+};
+
+
+// The JSWeakMap describes EcmaScript Harmony weak maps
+class JSWeakMap: public JSObject {
+ public:
+  // [table]: the backing hash table mapping keys to values.
+  DECL_ACCESSORS(table, ObjectHashTable)
+
+  // [next]: linked list of encountered weak maps during GC.
+  DECL_ACCESSORS(next, Object)
+
+  // Unchecked accessors to be used during GC.
+  inline ObjectHashTable* unchecked_table();
+
+  // Casting.
+  static inline JSWeakMap* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void JSWeakMapPrint() {
+    JSWeakMapPrint(stdout);
+  }
+  void JSWeakMapPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSWeakMapVerify();
+#endif
+
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kNextOffset = kTableOffset + kPointerSize;
+  static const int kSize = kNextOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
+};
+
+
+// Foreign describes objects pointing from JavaScript to C structures.
+// Since they cannot contain references to JS HeapObjects they can be
+// placed in old_data_space.
+class Foreign: public HeapObject {
+ public:
+  // [address]: field containing the address.
+  inline Address address();
+  inline void set_address(Address value);
+
+  // Casting.
+  static inline Foreign* cast(Object* obj);
+
+  // Dispatched behavior.
+  inline void ForeignIterateBody(ObjectVisitor* v);
+
+  template<typename StaticVisitor>
+  inline void ForeignIterateBody();
+
+#ifdef OBJECT_PRINT
+  inline void ForeignPrint() {
+    ForeignPrint(stdout);
+  }
+  void ForeignPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void ForeignVerify();
 #endif
 
   // Layout description.
 
-  static const int kProxyOffset = HeapObject::kHeaderSize;
-  static const int kSize = kProxyOffset + kPointerSize;
+  static const int kAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
 
  private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
 };
 
 
@@ -6351,6 +7237,7 @@
   // If the bit is set, object instances created by this function
   // requires access check.
   DECL_BOOLEAN_ACCESSORS(needs_access_check)
+  DECL_BOOLEAN_ACCESSORS(read_only_prototype)
 
   static inline FunctionTemplateInfo* cast(Object* obj);
 
@@ -6390,6 +7277,7 @@
   static const int kHiddenPrototypeBit   = 0;
   static const int kUndetectableBit      = 1;
   static const int kNeedsAccessCheckBit  = 2;
+  static const int kReadOnlyPrototypeBit = 3;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
 };
diff --git a/src/parser.cc b/src/parser.cc
index ce9b7c3..90d5c91 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -28,8 +28,9 @@
 #include "v8.h"
 
 #include "api.h"
-#include "ast.h"
+#include "ast-inl.h"
 #include "bootstrapper.h"
+#include "char-predicates-inl.h"
 #include "codegen.h"
 #include "compiler.h"
 #include "func-name-inferrer.h"
@@ -38,11 +39,10 @@
 #include "platform.h"
 #include "preparser.h"
 #include "runtime.h"
+#include "scanner-character-streams.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
 
-#include "ast-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -129,7 +129,7 @@
 void RegExpBuilder::AddCharacter(uc16 c) {
   pending_empty_ = false;
   if (characters_ == NULL) {
-    characters_ = new ZoneList<uc16>(4);
+    characters_ = new(zone()) ZoneList<uc16>(4);
   }
   characters_->Add(c);
   LAST(ADD_CHAR);
@@ -413,6 +413,7 @@
   return result;
 }
 
+
 // ----------------------------------------------------------------------------
 // Target is a support class to facilitate manipulation of the
 // Parser's target_stack_ (the stack of potential 'break' and
@@ -533,12 +534,11 @@
   parser->top_scope_ = scope;
   parser->lexical_scope_ = this;
   parser->with_nesting_level_ = 0;
-  isolate->set_ast_node_id(AstNode::kFunctionEntryId + 1);
+  isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
 }
 
 
 LexicalScope::~LexicalScope() {
-  parser_->top_scope_->Leave();
   parser_->top_scope_ = previous_scope_;
   parser_->lexical_scope_ = lexical_scope_parent_;
   parser_->with_nesting_level_ = previous_with_nesting_level_;
@@ -586,7 +586,8 @@
       pre_data_(pre_data),
       fni_(NULL),
       stack_overflow_(false),
-      parenthesized_function_(false) {
+      parenthesized_function_(false),
+      harmony_block_scoping_(false) {
   AstNode::ResetIds();
 }
 
@@ -594,11 +595,11 @@
 FunctionLiteral* Parser::ParseProgram(Handle<String> source,
                                       bool in_global_context,
                                       StrictModeFlag strict_mode) {
-  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
   isolate()->counters()->total_parse_size()->Increment(source->length());
-  fni_ = new(zone()) FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer(isolate());
 
   // Initialize parser state.
   source->TryFlatten();
@@ -641,15 +642,21 @@
     if (strict_mode == kStrictMode) {
       top_scope_->EnableStrictMode();
     }
-    ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
+    ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
     ParseSourceElements(body, Token::EOS, &ok);
     if (ok && top_scope_->is_strict_mode()) {
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
+
+    if (ok && harmony_block_scoping_) {
+      CheckConflictingVarDeclarations(scope, &ok);
+    }
+
     if (ok) {
       result = new(zone()) FunctionLiteral(
+          isolate(),
           no_name,
           top_scope_,
           body,
@@ -660,7 +667,8 @@
           0,
           0,
           source->length(),
-          false);
+          FunctionLiteral::ANONYMOUS_EXPRESSION,
+          false);  // Does not have duplicate parameters.
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -676,7 +684,7 @@
 }
 
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
   Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -709,7 +717,7 @@
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
-  fni_ = new(zone()) FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer(isolate());
   fni_->PushEnclosingName(name);
 
   mode_ = PARSE_EAGERLY;
@@ -719,7 +727,6 @@
 
   {
     // Parse the function literal.
-    Handle<String> no_name = isolate()->factory()->empty_symbol();
     Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
     if (!info->closure().is_null()) {
       scope = Scope::DeserializeScopeChain(info, scope);
@@ -730,12 +737,17 @@
       top_scope_->EnableStrictMode();
     }
 
-    FunctionLiteralType type =
-        shared_info->is_expression() ? EXPRESSION : DECLARATION;
+    FunctionLiteral::Type type = shared_info->is_expression()
+        ? (shared_info->is_anonymous()
+              ? FunctionLiteral::ANONYMOUS_EXPRESSION
+              : FunctionLiteral::NAMED_EXPRESSION)
+        : FunctionLiteral::DECLARATION;
     bool ok = true;
     result = ParseFunctionLiteral(name,
-                                  false,    // Strict mode name already checked.
-                                  RelocInfo::kNoPosition, type, &ok);
+                                  false,  // Strict mode name already checked.
+                                  RelocInfo::kNoPosition,
+                                  type,
+                                  &ok);
     // Make sure the results agree.
     ASSERT(ok == (result != NULL));
   }
@@ -805,6 +817,10 @@
   isolate()->Throw(*result, &location);
 }
 
+void Parser::SetHarmonyBlockScoping(bool block_scoping) {
+  scanner().SetHarmonyBlockScoping(block_scoping);
+  harmony_block_scoping_ = block_scoping;
+}
 
 // Base class containing common code for the different finder classes used by
 // the parser.
@@ -824,14 +840,24 @@
 // form expr.a = ...; expr.b = ...; etc.
 class InitializationBlockFinder : public ParserFinder {
  public:
-  InitializationBlockFinder()
-    : first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {}
+  // We find and mark the initialization blocks in top level
+  // non-looping code only. This is because the optimization prevents
+  // reuse of the map transitions, so it should be used only for code
+  // that will only be run once.
+  InitializationBlockFinder(Scope* top_scope, Target* target)
+      : enabled_(top_scope->DeclarationScope()->is_global_scope() &&
+                 !IsLoopTarget(target)),
+        first_in_block_(NULL),
+        last_in_block_(NULL),
+        block_size_(0) {}
 
   ~InitializationBlockFinder() {
+    if (!enabled_) return;
     if (InBlock()) EndBlock();
   }
 
   void Update(Statement* stat) {
+    if (!enabled_) return;
     Assignment* assignment = AsAssignment(stat);
     if (InBlock()) {
       if (BlockContinues(assignment)) {
@@ -852,6 +878,14 @@
   // the overhead exceeds the savings below this limit.
   static const int kMinInitializationBlock = 3;
 
+  static bool IsLoopTarget(Target* target) {
+    while (target != NULL) {
+      if (target->node()->AsIterationStatement() != NULL) return true;
+      target = target->previous();
+    }
+    return false;
+  }
+
   // Returns true if the expressions appear to denote the same object.
   // In the context of initialization blocks, we only consider expressions
   // of the form 'expr.x' or expr["x"].
@@ -914,6 +948,7 @@
 
   bool InBlock() { return first_in_block_ != NULL; }
 
+  const bool enabled_;
   Assignment* first_in_block_;
   Assignment* last_in_block_;
   int block_size_;
@@ -1051,9 +1086,10 @@
     if (names_ == NULL) {
       ASSERT(assigned_arguments_ == NULL);
       ASSERT(assigned_constants_ == NULL);
-      names_ = new ZoneStringList(4);
-      assigned_arguments_ = new ZoneList<int>(4);
-      assigned_constants_ = new ZoneObjectList(4);
+      Zone* zone = isolate_->zone();
+      names_ = new(zone) ZoneStringList(4);
+      assigned_arguments_ = new(zone) ZoneList<int>(4);
+      assigned_constants_ = new(zone) ZoneObjectList(4);
     }
   }
 
@@ -1065,6 +1101,25 @@
 };
 
 
+Statement* Parser::ParseSourceElement(ZoneStringList* labels,
+                                      bool* ok) {
+  if (peek() == Token::FUNCTION) {
+    // FunctionDeclaration is only allowed in the context of SourceElements
+    // (Ecma 262 5th Edition, clause 14):
+    // SourceElement:
+    //    Statement
+    //    FunctionDeclaration
+    // Common language extension is to allow function declaration in place
+    // of any statement. This language extension is disabled in strict mode.
+    return ParseFunctionDeclaration(ok);
+  } else if (peek() == Token::LET) {
+    return ParseVariableStatement(kSourceElement, ok);
+  } else {
+    return ParseStatement(labels, ok);
+  }
+}
+
+
 void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
                                   int end_token,
                                   bool* ok) {
@@ -1078,7 +1133,7 @@
   TargetScope scope(&this->target_stack_);
 
   ASSERT(processor != NULL);
-  InitializationBlockFinder block_finder;
+  InitializationBlockFinder block_finder(top_scope_, target_stack_);
   ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
   bool directive_prologue = true;     // Parsing directive prologue.
 
@@ -1088,21 +1143,7 @@
     }
 
     Scanner::Location token_loc = scanner().peek_location();
-
-    Statement* stat;
-    if (peek() == Token::FUNCTION) {
-      // FunctionDeclaration is only allowed in the context of SourceElements
-      // (Ecma 262 5th Edition, clause 14):
-      // SourceElement:
-      //    Statement
-      //    FunctionDeclaration
-      // Common language extension is to allow function declaration in place
-      // of any statement. This language extension is disabled in strict mode.
-      stat = ParseFunctionDeclaration(CHECK_OK);
-    } else {
-      stat = ParseStatement(NULL, CHECK_OK);
-    }
-
+    Statement* stat = ParseSourceElement(NULL, CHECK_OK);
     if (stat == NULL || stat->IsEmpty()) {
       directive_prologue = false;   // End of directive prologue.
       continue;
@@ -1133,12 +1174,7 @@
       }
     }
 
-    // We find and mark the initialization blocks on top level code only.
-    // This is because the optimization prevents reuse of the map transitions,
-    // so it should be used only for code that will only be run once.
-    if (top_scope_->is_global_scope()) {
-      block_finder.Update(stat);
-    }
+    block_finder.Update(stat);
     // Find and mark all assignments to named properties in this (this.x =)
     if (top_scope_->is_function_scope()) {
       this_property_assignment_finder.Update(top_scope_, stat);
@@ -1195,7 +1231,7 @@
 
     case Token::CONST:  // fall through
     case Token::VAR:
-      stmt = ParseVariableStatement(ok);
+      stmt = ParseVariableStatement(kStatement, ok);
       break;
 
     case Token::SEMICOLON:
@@ -1248,7 +1284,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = new(zone()) Block(labels, 1, false);
+      Block* result = new(zone()) Block(isolate(), labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1270,9 +1306,6 @@
       return ParseFunctionDeclaration(ok);
     }
 
-    case Token::NATIVE:
-      return ParseNativeDeclaration(ok);
-
     case Token::DEBUGGER:
       stmt = ParseDebuggerStatement(ok);
       break;
@@ -1293,9 +1326,9 @@
                                bool resolve,
                                bool* ok) {
   Variable* var = NULL;
-  // If we are inside a function, a declaration of a variable
-  // is a truly local variable, and the scope of the variable
-  // is always the function scope.
+  // If we are inside a function, a declaration of a var/const variable is a
+  // truly local variable, and the scope of the variable is always the function
+  // scope.
 
   // If a function scope exists, then we can statically declare this
   // variable and also set its mode. In any case, a Declaration node
@@ -1303,28 +1336,54 @@
   // to the corresponding activation frame at runtime if necessary.
   // For instance declarations inside an eval scope need to be added
   // to the calling function context.
-  if (top_scope_->is_function_scope()) {
+  // Similarly, strict mode eval scope does not leak variable declarations to
+  // the caller's scope so we declare all locals, too.
+
+  Scope* declaration_scope = mode == Variable::LET ? top_scope_
+      : top_scope_->DeclarationScope();
+  if (declaration_scope->is_function_scope() ||
+      declaration_scope->is_strict_mode_eval_scope() ||
+      declaration_scope->is_block_scope()) {
     // Declare the variable in the function scope.
-    var = top_scope_->LocalLookup(name);
+    var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
-      var = top_scope_->DeclareLocal(name, mode, Scope::VAR_OR_CONST);
+      var = declaration_scope->DeclareLocal(name, mode);
     } else {
-      // The name was declared before; check for conflicting
-      // re-declarations. If the previous declaration was a const or the
-      // current declaration is a const then we have a conflict. There is
-      // similar code in runtime.cc in the Declare functions.
-      if ((mode == Variable::CONST) || (var->mode() == Variable::CONST)) {
-        // We only have vars and consts in declarations.
+      // The name was declared in this scope before; check for conflicting
+      // re-declarations. We have a conflict if either of the declarations is
+      // not a var. There is similar code in runtime.cc in the Declare
+      // functions. The function CheckNonConflictingScope checks for conflicting
+      // var and let bindings from different scopes whereas this is a check for
+      // conflicting declarations within the same scope. This check also covers
+      //
+      // function () { let x; { var x; } }
+      //
+      // because the var declaration is hoisted to the function scope where 'x'
+      // is already bound.
+      if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
+        // We only have vars, consts and lets in declarations.
         ASSERT(var->mode() == Variable::VAR ||
-               var->mode() == Variable::CONST);
-        const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
+               var->mode() == Variable::CONST ||
+               var->mode() == Variable::LET);
+        if (harmony_block_scoping_) {
+          // In harmony mode we treat re-declarations as early errors. See
+          // ES5 16 for a definition of early errors.
+          SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
+          const char* elms[2] = { "Variable", *c_string };
+          Vector<const char*> args(elms, 2);
+          ReportMessage("redeclaration", args);
+          *ok = false;
+          return NULL;
+        }
+        const char* type = (var->mode() == Variable::VAR) ? "var" :
+                           (var->mode() == Variable::CONST) ? "const" : "let";
         Handle<String> type_string =
             isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
         Expression* expression =
             NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
                               type_string, name);
-        top_scope_->SetIllegalRedeclaration(expression);
+        declaration_scope->SetIllegalRedeclaration(expression);
       }
     }
   }
@@ -1345,14 +1404,20 @@
   // semantic issue as long as we keep the source order, but it may be
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
-  VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
-  top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
+  VariableProxy* proxy = declaration_scope->NewUnresolved(
+      name, false, scanner().location().beg_pos);
+  declaration_scope->AddDeclaration(
+      new(zone()) Declaration(proxy, mode, fun, top_scope_));
 
   // For global const variables we bind the proxy to a variable.
-  if (mode == Variable::CONST && top_scope_->is_global_scope()) {
+  if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
+    var = new(zone()) Variable(declaration_scope,
+                               name,
+                               Variable::CONST,
+                               true,
+                               kind);
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1390,13 +1455,6 @@
 // declaration is resolved by looking up the function through a
 // callback provided by the extension.
 Statement* Parser::ParseNativeDeclaration(bool* ok) {
-  if (extension_ == NULL) {
-    ReportUnexpectedToken(Token::NATIVE);
-    *ok = false;
-    return NULL;
-  }
-
-  Expect(Token::NATIVE, CHECK_OK);
   Expect(Token::FUNCTION, CHECK_OK);
   Handle<String> name = ParseIdentifier(CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
@@ -1415,7 +1473,7 @@
   // isn't lazily compiled. The extension structures are only
   // accessible while parsing the first time not when reparsing
   // because of lazy compilation.
-  top_scope_->ForceEagerCompilation();
+  top_scope_->DeclarationScope()->ForceEagerCompilation();
 
   // Compute the function template for the native function.
   v8::Handle<v8::FunctionTemplate> fun_template =
@@ -1441,10 +1499,10 @@
   // introduced dynamically when we meet their declarations, whereas
   // other functions are setup when entering the surrounding scope.
   SharedFunctionInfoLiteral* lit =
-      new(zone()) SharedFunctionInfoLiteral(shared);
+      new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
   VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
   return new(zone()) ExpressionStatement(new(zone()) Assignment(
-      Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+      isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
 }
 
 
@@ -1453,22 +1511,26 @@
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int function_token_position = scanner().location().beg_pos;
-  bool is_reserved = false;
-  Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+  bool is_strict_reserved = false;
+  Handle<String> name = ParseIdentifierOrStrictReservedWord(
+      &is_strict_reserved, CHECK_OK);
   FunctionLiteral* fun = ParseFunctionLiteral(name,
-                                              is_reserved,
+                                              is_strict_reserved,
                                               function_token_position,
-                                              DECLARATION,
+                                              FunctionLiteral::DECLARATION,
                                               CHECK_OK);
   // Even if we're not at the top-level of the global or a function
   // scope, we treat is as such and introduce the function with it's
   // initial value upon entering the corresponding scope.
-  Declare(name, Variable::VAR, fun, true, CHECK_OK);
+  Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
+  Declare(name, mode, fun, true, CHECK_OK);
   return EmptyStatement();
 }
 
 
 Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
+  if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
+
   // Block ::
   //   '{' Statement* '}'
 
@@ -1476,24 +1538,68 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = new(zone()) Block(labels, 16, false);
+  Block* result = new(zone()) Block(isolate(), labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
+  InitializationBlockFinder block_finder(top_scope_, target_stack_);
   while (peek() != Token::RBRACE) {
     Statement* stat = ParseStatement(NULL, CHECK_OK);
-    if (stat && !stat->IsEmpty()) result->AddStatement(stat);
+    if (stat && !stat->IsEmpty()) {
+      result->AddStatement(stat);
+      block_finder.Update(stat);
+    }
   }
   Expect(Token::RBRACE, CHECK_OK);
   return result;
 }
 
 
-Block* Parser::ParseVariableStatement(bool* ok) {
+Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+  // Construct block expecting 16 statements.
+  Block* body = new(zone()) Block(isolate(), labels, 16, false);
+  Scope* saved_scope = top_scope_;
+  Scope* block_scope = NewScope(top_scope_,
+                                Scope::BLOCK_SCOPE,
+                                inside_with());
+  if (top_scope_->is_strict_mode()) {
+    block_scope->EnableStrictMode();
+  }
+  top_scope_ = block_scope;
+
+  // Parse the statements and collect escaping labels.
+  TargetCollector collector;
+  Target target(&this->target_stack_, &collector);
+  Expect(Token::LBRACE, CHECK_OK);
+  {
+    Target target_body(&this->target_stack_, body);
+    InitializationBlockFinder block_finder(top_scope_, target_stack_);
+
+    while (peek() != Token::RBRACE) {
+      Statement* stat = ParseSourceElement(NULL, CHECK_OK);
+      if (stat && !stat->IsEmpty()) {
+        body->AddStatement(stat);
+        block_finder.Update(stat);
+      }
+    }
+  }
+  Expect(Token::RBRACE, CHECK_OK);
+  top_scope_ = saved_scope;
+
+  block_scope = block_scope->FinalizeBlockScope();
+  body->set_block_scope(block_scope);
+  return body;
+}
+
+
+Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
+                                      bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
-  Expression* dummy;  // to satisfy the ParseVariableDeclarations() signature
-  Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
+  Handle<String> ignore;
+  Block* result = ParseVariableDeclarations(var_context,
+                                            &ignore,
+                                            CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
 }
@@ -1510,14 +1616,20 @@
 // *var is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is used for the parsing
 // of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(bool accept_IN,
-                                         Expression** var,
+Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                         Handle<String>* out,
                                          bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
 
   Variable::Mode mode = Variable::VAR;
+  // True if the binding needs initialization. 'let' and 'const' declared
+  // bindings are created uninitialized by their declaration nodes and
+  // need initialization. 'var' declared bindings are always initialized
+  // immediately by their declaration nodes.
+  bool needs_init = false;
   bool is_const = false;
+  Token::Value init_op = Token::INIT_VAR;
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
   } else if (peek() == Token::CONST) {
@@ -1529,13 +1641,29 @@
     }
     mode = Variable::CONST;
     is_const = true;
+    needs_init = true;
+    init_op = Token::INIT_CONST;
+  } else if (peek() == Token::LET) {
+    Consume(Token::LET);
+    if (var_context != kSourceElement &&
+        var_context != kForStatement) {
+      ASSERT(var_context == kStatement);
+      ReportMessage("unprotected_let", Vector<const char*>::empty());
+      *ok = false;
+      return NULL;
+    }
+    mode = Variable::LET;
+    needs_init = true;
+    init_op = Token::INIT_LET;
   } else {
     UNREACHABLE();  // by current callers
   }
 
-  // The scope of a variable/const declared anywhere inside a function
+  Scope* declaration_scope = mode == Variable::LET
+      ? top_scope_ : top_scope_->DeclarationScope();
+  // The scope of a var/const declared variable anywhere inside a function
   // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
-  // transform a source-level variable/const declaration into a (Function)
+  // transform a source-level var/const declaration into a (Function)
   // Scope declaration, and rewrite the source-level initialization into an
   // assignment statement. We use a block to collect multiple assignments.
   //
@@ -1546,19 +1674,19 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = new(zone()) Block(NULL, 1, true);
-  VariableProxy* last_var = NULL;  // the last variable declared
+  Block* block = new(zone()) Block(isolate(), NULL, 1, true);
   int nvars = 0;  // the number of variables declared
+  Handle<String> name;
   do {
     if (fni_ != NULL) fni_->Enter();
 
     // Parse variable name.
     if (nvars > 0) Consume(Token::COMMA);
-    Handle<String> name = ParseIdentifier(CHECK_OK);
+    name = ParseIdentifier(CHECK_OK);
     if (fni_ != NULL) fni_->PushVariableName(name);
 
     // Strict mode variables may not be named eval or arguments
-    if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+    if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_var_name", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -1576,11 +1704,10 @@
     // If we have a const declaration, in an inner scope, the proxy is always
     // bound to the declared variable (independent of possibly surrounding with
     // statements).
-    last_var = Declare(name, mode, NULL,
-                       is_const /* always bound for CONST! */,
-                       CHECK_OK);
+    Declare(name, mode, NULL, is_const /* always bound for CONST! */,
+            CHECK_OK);
     nvars++;
-    if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
+    if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
       ReportMessageAt(scanner().location(), "too_many_variables",
                       Vector<const char*>::empty());
       *ok = false;
@@ -1596,10 +1723,10 @@
     //
     //    var v; v = x;
     //
-    // In particular, we need to re-lookup 'v' as it may be a
-    // different 'v' than the 'v' in the declaration (if we are inside
-    // a 'with' statement that makes a object property with name 'v'
-    // visible).
+    // In particular, we need to re-lookup 'v' (in top_scope_, not
+    // declaration_scope) as it may be a different 'v' than the 'v' in the
+    // declaration (e.g., if we are inside a 'with' statement or 'catch'
+    // block).
     //
     // However, note that const declarations are different! A const
     // declaration of the form:
@@ -1614,19 +1741,25 @@
     // one - there is no re-lookup (see the last parameter of the
     // Declare() call above).
 
+    Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
     Expression* value = NULL;
     int position = -1;
     if (peek() == Token::ASSIGN) {
       Expect(Token::ASSIGN, CHECK_OK);
       position = scanner().location().beg_pos;
-      value = ParseAssignmentExpression(accept_IN, CHECK_OK);
+      value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
       // Don't infer if it is "a = function(){...}();"-like expression.
-      if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
+      if (fni_ != NULL &&
+          value->AsCall() == NULL &&
+          value->AsCallNew() == NULL) {
+        fni_->Infer();
+      } else {
+        fni_->RemoveLastFunction();
+      }
     }
 
-    // Make sure that 'const c' actually initializes 'c' to undefined
-    // even though it seems like a stupid thing to do.
-    if (value == NULL && is_const) {
+    // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
+    if (value == NULL && needs_init) {
       value = GetLiteralUndefined();
     }
 
@@ -1650,11 +1783,11 @@
     // browsers where the global object (window) has lots of
     // properties defined in prototype objects.
 
-    if (top_scope_->is_global_scope()) {
+    if (initialization_scope->is_global_scope()) {
       // Compute the arguments for the runtime call.
-      ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
+      ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
       // We have at least 1 parameter.
-      arguments->Add(new(zone()) Literal(name));
+      arguments->Add(NewLiteral(name));
       CallRuntime* initialize;
 
       if (is_const) {
@@ -1667,14 +1800,17 @@
         // the number of arguments (1 or 2).
         initialize =
             new(zone()) CallRuntime(
-              isolate()->factory()->InitializeConstGlobal_symbol(),
-              Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
-              arguments);
+                isolate(),
+                isolate()->factory()->InitializeConstGlobal_symbol(),
+                Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+                arguments);
       } else {
         // Add strict mode.
         // We may want to pass singleton to avoid Literal allocations.
-        arguments->Add(NewNumberLiteral(
-            top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
+        StrictModeFlag flag = initialization_scope->is_strict_mode()
+            ? kStrictMode
+            : kNonStrictMode;
+        arguments->Add(NewNumberLiteral(flag));
 
         // Be careful not to assign a value to the global variable if
         // we're in a with. The initialization value should not
@@ -1691,28 +1827,34 @@
         // the number of arguments (2 or 3).
         initialize =
             new(zone()) CallRuntime(
-              isolate()->factory()->InitializeVarGlobal_symbol(),
-              Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
-              arguments);
+                isolate(),
+                isolate()->factory()->InitializeVarGlobal_symbol(),
+                Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+                arguments);
       }
 
       block->AddStatement(new(zone()) ExpressionStatement(initialize));
     }
 
-    // Add an assignment node to the initialization statement block if
-    // we still have a pending initialization value. We must distinguish
-    // between variables and constants: Variable initializations are simply
+    // Add an assignment node to the initialization statement block if we still
+    // have a pending initialization value. We must distinguish between
+    // different kinds of declarations: 'var' initializations are simply
     // assignments (with all the consequences if they are inside a 'with'
     // statement - they may change a 'with' object property). Constant
     // initializations always assign to the declared constant which is
     // always at the function scope level. This is only relevant for
     // dynamically looked-up variables and constants (the start context
     // for constant lookups is always the function context, while it is
-    // the top context for variables). Sigh...
+    // the top context for var declared variables). Sigh...
+    // For 'let' declared variables the initialization is in the same scope
+    // as the declaration. Thus dynamic lookups are unnecessary even if the
+    // block scope is inside a with.
     if (value != NULL) {
-      Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
+      bool in_with = mode == Variable::VAR ? inside_with() : false;
+      VariableProxy* proxy =
+          initialization_scope->NewUnresolved(name, in_with);
       Assignment* assignment =
-          new(zone()) Assignment(op, last_var, value, position);
+          new(zone()) Assignment(isolate(), init_op, proxy, value, position);
       if (block) {
         block->AddStatement(new(zone()) ExpressionStatement(assignment));
       }
@@ -1721,10 +1863,10 @@
     if (fni_ != NULL) fni_->Leave();
   } while (peek() == Token::COMMA);
 
-  if (!is_const && nvars == 1) {
-    // We have a single, non-const variable.
-    ASSERT(last_var != NULL);
-    *var = last_var;
+  // If there was a single non-const declaration, return it in the output
+  // parameter for possible use by for/in.
+  if (nvars == 1 && !is_const) {
+    *out = name;
   }
 
   return block;
@@ -1749,7 +1891,7 @@
   //   Identifier ':' Statement
   bool starts_with_idenfifier = peek_any_identifier();
   Expression* expr = ParseExpression(true, CHECK_OK);
-  if (peek() == Token::COLON && starts_with_idenfifier && expr &&
+  if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
       expr->AsVariableProxy() != NULL &&
       !expr->AsVariableProxy()->is_this()) {
     // Expression is a single identifier, and not, e.g., a parenthesized
@@ -1762,14 +1904,14 @@
     // structured.  However, these are probably changes we want to
     // make later anyway so we should go back and fix this then.
     if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
-      SmartPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
+      SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
       const char* elms[2] = { "Label", *c_string };
       Vector<const char*> args(elms, 2);
       ReportMessage("redeclaration", args);
       *ok = false;
       return NULL;
     }
-    if (labels == NULL) labels = new ZoneStringList(4);
+    if (labels == NULL) labels = new(zone()) ZoneStringList(4);
     labels->Add(label);
     // Remove the "ghost" variable that turned out to be a label
     // from the top scope. This way, we don't try to resolve it
@@ -1779,6 +1921,20 @@
     return ParseStatement(labels, ok);
   }
 
+  // If we have an extension, we allow a native function declaration.
+  // A native function declaration starts with "native function" with
+  // no line-terminator between the two words.
+  if (extension_ != NULL &&
+      peek() == Token::FUNCTION &&
+      !scanner().HasAnyLineTerminatorBeforeNext() &&
+      expr != NULL &&
+      expr->AsVariableProxy() != NULL &&
+      expr->AsVariableProxy()->name()->Equals(
+          isolate()->heap()->native_symbol()) &&
+      !scanner().literal_contains_escapes()) {
+    return ParseNativeDeclaration(ok);
+  }
+
   // Parsed expression statement.
   ExpectSemicolon(CHECK_OK);
   return new(zone()) ExpressionStatement(expr);
@@ -1801,7 +1957,8 @@
   } else {
     else_statement = EmptyStatement();
   }
-  return new(zone()) IfStatement(condition, then_statement, else_statement);
+  return new(zone()) IfStatement(
+      isolate(), condition, then_statement, else_statement);
 }
 
 
@@ -1812,7 +1969,7 @@
   Expect(Token::CONTINUE, CHECK_OK);
   Handle<String> label = Handle<String>::null();
   Token::Value tok = peek();
-  if (!scanner().has_line_terminator_before_next() &&
+  if (!scanner().HasAnyLineTerminatorBeforeNext() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
     label = ParseIdentifier(CHECK_OK);
   }
@@ -1842,7 +1999,7 @@
   Expect(Token::BREAK, CHECK_OK);
   Handle<String> label;
   Token::Value tok = peek();
-  if (!scanner().has_line_terminator_before_next() &&
+  if (!scanner().HasAnyLineTerminatorBeforeNext() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
     label = ParseIdentifier(CHECK_OK);
   }
@@ -1879,67 +2036,32 @@
   // reported (underlining).
   Expect(Token::RETURN, CHECK_OK);
 
+  Token::Value tok = peek();
+  Statement* result;
+  if (scanner().HasAnyLineTerminatorBeforeNext() ||
+      tok == Token::SEMICOLON ||
+      tok == Token::RBRACE ||
+      tok == Token::EOS) {
+    ExpectSemicolon(CHECK_OK);
+    result = new(zone()) ReturnStatement(GetLiteralUndefined());
+  } else {
+    Expression* expr = ParseExpression(true, CHECK_OK);
+    ExpectSemicolon(CHECK_OK);
+    result = new(zone()) ReturnStatement(expr);
+  }
+
   // An ECMAScript program is considered syntactically incorrect if it
   // contains a return statement that is not within the body of a
   // function. See ECMA-262, section 12.9, page 67.
   //
   // To be consistent with KJS we report the syntax error at runtime.
-  if (!top_scope_->is_function_scope()) {
+  Scope* declaration_scope = top_scope_->DeclarationScope();
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_eval_scope()) {
     Handle<String> type = isolate()->factory()->illegal_return_symbol();
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
     return new(zone()) ExpressionStatement(throw_error);
   }
-
-  Token::Value tok = peek();
-  if (scanner().has_line_terminator_before_next() ||
-      tok == Token::SEMICOLON ||
-      tok == Token::RBRACE ||
-      tok == Token::EOS) {
-    ExpectSemicolon(CHECK_OK);
-    return new(zone()) ReturnStatement(GetLiteralUndefined());
-  }
-
-  Expression* expr = ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return new(zone()) ReturnStatement(expr);
-}
-
-
-Block* Parser::WithHelper(Expression* obj,
-                          ZoneStringList* labels,
-                          bool is_catch_block,
-                          bool* ok) {
-  // Parse the statement and collect escaping labels.
-  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
-  TargetCollector collector(target_list);
-  Statement* stat;
-  { Target target(&this->target_stack_, &collector);
-    with_nesting_level_++;
-    top_scope_->RecordWithStatement();
-    stat = ParseStatement(labels, CHECK_OK);
-    with_nesting_level_--;
-  }
-  // Create resulting block with two statements.
-  // 1: Evaluate the with expression.
-  // 2: The try-finally block evaluating the body.
-  Block* result = new(zone()) Block(NULL, 2, false);
-
-  if (result != NULL) {
-    result->AddStatement(new(zone()) WithEnterStatement(obj, is_catch_block));
-
-    // Create body block.
-    Block* body = new(zone()) Block(NULL, 1, false);
-    body->AddStatement(stat);
-
-    // Create exit block.
-    Block* exit = new(zone()) Block(NULL, 1, false);
-    exit->AddStatement(new(zone()) WithExitStatement());
-
-    // Return a try-finally statement.
-    TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
-    wrapper->set_escaping_targets(collector.targets());
-    result->AddStatement(wrapper);
-  }
   return result;
 }
 
@@ -1960,7 +2082,11 @@
   Expression* expr = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
-  return WithHelper(expr, labels, false, CHECK_OK);
+  ++with_nesting_level_;
+  top_scope_->DeclarationScope()->RecordWithStatement();
+  Statement* stmt = ParseStatement(labels, CHECK_OK);
+  --with_nesting_level_;
+  return new(zone()) WithStatement(expr, stmt);
 }
 
 
@@ -1985,7 +2111,7 @@
   }
   Expect(Token::COLON, CHECK_OK);
   int pos = scanner().location().beg_pos;
-  ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
+  ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5);
   while (peek() != Token::CASE &&
          peek() != Token::DEFAULT &&
          peek() != Token::RBRACE) {
@@ -1993,7 +2119,7 @@
     statements->Add(stat);
   }
 
-  return new(zone()) CaseClause(label, statements, pos);
+  return new(zone()) CaseClause(isolate(), label, statements, pos);
 }
 
 
@@ -2002,7 +2128,7 @@
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
-  SwitchStatement* statement = new(zone()) SwitchStatement(labels);
+  SwitchStatement* statement = new(zone()) SwitchStatement(isolate(), labels);
   Target target(&this->target_stack_, statement);
 
   Expect(Token::SWITCH, CHECK_OK);
@@ -2011,7 +2137,7 @@
   Expect(Token::RPAREN, CHECK_OK);
 
   bool default_seen = false;
-  ZoneList<CaseClause*>* cases = new ZoneList<CaseClause*>(4);
+  ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4);
   Expect(Token::LBRACE, CHECK_OK);
   while (peek() != Token::RBRACE) {
     CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
@@ -2030,7 +2156,7 @@
 
   Expect(Token::THROW, CHECK_OK);
   int pos = scanner().location().beg_pos;
-  if (scanner().has_line_terminator_before_next()) {
+  if (scanner().HasAnyLineTerminatorBeforeNext()) {
     ReportMessage("newline_after_throw", Vector<const char*>::empty());
     *ok = false;
     return NULL;
@@ -2038,7 +2164,8 @@
   Expression* exception = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
 
-  return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
+  return new(zone()) ExpressionStatement(
+      new(zone()) Throw(isolate(), exception, pos));
 }
 
 
@@ -2056,18 +2183,13 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
-  TargetCollector collector(target_list);
+  TargetCollector try_collector;
   Block* try_block;
 
-  { Target target(&this->target_stack_, &collector);
+  { Target target(&this->target_stack_, &try_collector);
     try_block = ParseBlock(NULL, CHECK_OK);
   }
 
-  Block* catch_block = NULL;
-  Variable* catch_var = NULL;
-  Block* finally_block = NULL;
-
   Token::Value tok = peek();
   if (tok != Token::CATCH && tok != Token::FINALLY) {
     ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
@@ -2076,18 +2198,19 @@
   }
 
   // If we can break out from the catch block and there is a finally block,
-  // then we will need to collect jump targets from the catch block. Since
-  // we don't know yet if there will be a finally block, we always collect
-  // the jump targets.
-  ZoneList<Label*>* catch_target_list = new ZoneList<Label*>(0);
-  TargetCollector catch_collector(catch_target_list);
-  bool has_catch = false;
+  // then we will need to collect escaping targets from the catch
+  // block. Since we don't know yet if there will be a finally block, we
+  // always collect the targets.
+  TargetCollector catch_collector;
+  Scope* catch_scope = NULL;
+  Variable* catch_variable = NULL;
+  Block* catch_block = NULL;
+  Handle<String> name;
   if (tok == Token::CATCH) {
-    has_catch = true;
     Consume(Token::CATCH);
 
     Expect(Token::LPAREN, CHECK_OK);
-    Handle<String> name = ParseIdentifier(CHECK_OK);
+    name = ParseIdentifier(CHECK_OK);
 
     if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_catch_variable", Vector<const char*>::empty());
@@ -2098,17 +2221,19 @@
     Expect(Token::RPAREN, CHECK_OK);
 
     if (peek() == Token::LBRACE) {
-      // Allocate a temporary for holding the finally state while
-      // executing the finally block.
-      catch_var =
-          top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
-      Literal* name_literal = new(zone()) Literal(name);
-      VariableProxy* catch_var_use = new(zone()) VariableProxy(catch_var);
-      Expression* obj =
-          new(zone()) CatchExtensionObject(name_literal, catch_var_use);
-      { Target target(&this->target_stack_, &catch_collector);
-        catch_block = WithHelper(obj, NULL, true, CHECK_OK);
+      Target target(&this->target_stack_, &catch_collector);
+      catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
+      if (top_scope_->is_strict_mode()) {
+        catch_scope->EnableStrictMode();
       }
+      Variable::Mode mode = harmony_block_scoping_
+          ? Variable::LET : Variable::VAR;
+      catch_variable = catch_scope->DeclareLocal(name, mode);
+
+      Scope* saved_scope = top_scope_;
+      top_scope_ = catch_scope;
+      catch_block = ParseBlock(NULL, CHECK_OK);
+      top_scope_ = saved_scope;
     } else {
       Expect(Token::LBRACE, CHECK_OK);
     }
@@ -2116,45 +2241,48 @@
     tok = peek();
   }
 
-  if (tok == Token::FINALLY || !has_catch) {
+  Block* finally_block = NULL;
+  if (tok == Token::FINALLY || catch_block == NULL) {
     Consume(Token::FINALLY);
-    // Declare a variable for holding the finally state while
-    // executing the finally block.
     finally_block = ParseBlock(NULL, CHECK_OK);
   }
 
   // Simplify the AST nodes by converting:
-  //   'try { } catch { } finally { }'
+  //   'try B0 catch B1 finally B2'
   // to:
-  //   'try { try { } catch { } } finally { }'
+  //   'try { try B0 catch B1 } finally B2'
 
   if (catch_block != NULL && finally_block != NULL) {
-    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+    // If we have both, create an inner try/catch.
+    ASSERT(catch_scope != NULL && catch_variable != NULL);
     TryCatchStatement* statement =
-        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
-    statement->set_escaping_targets(collector.targets());
-    try_block = new(zone()) Block(NULL, 1, false);
+        new(zone()) TryCatchStatement(try_block,
+                                      catch_scope,
+                                      catch_variable,
+                                      catch_block);
+    statement->set_escaping_targets(try_collector.targets());
+    try_block = new(zone()) Block(isolate(), NULL, 1, false);
     try_block->AddStatement(statement);
-    catch_block = NULL;
+    catch_block = NULL;  // Clear to indicate it's been handled.
   }
 
   TryStatement* result = NULL;
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
-    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+    ASSERT(catch_scope != NULL && catch_variable != NULL);
     result =
-        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
-    result->set_escaping_targets(collector.targets());
+        new(zone()) TryCatchStatement(try_block,
+                                      catch_scope,
+                                      catch_variable,
+                                      catch_block);
   } else {
     ASSERT(finally_block != NULL);
     result = new(zone()) TryFinallyStatement(try_block, finally_block);
-    // Add the jump targets of the try block and the catch block.
-    for (int i = 0; i < collector.targets()->length(); i++) {
-      catch_collector.AddTarget(collector.targets()->at(i));
-    }
-    result->set_escaping_targets(catch_collector.targets());
+    // Combine the jump targets of the try block and the possible catch block.
+    try_collector.targets()->AddAll(*catch_collector.targets());
   }
 
+  result->set_escaping_targets(try_collector.targets());
   return result;
 }
 
@@ -2164,7 +2292,7 @@
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
+  DoWhileStatement* loop = new(zone()) DoWhileStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2195,7 +2323,7 @@
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  WhileStatement* loop = new(zone()) WhileStatement(labels);
+  WhileStatement* loop = new(zone()) WhileStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2219,11 +2347,13 @@
   Expect(Token::LPAREN, CHECK_OK);
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST) {
-      Expression* each = NULL;
+      Handle<String> name;
       Block* variable_statement =
-          ParseVariableDeclarations(false, &each, CHECK_OK);
-      if (peek() == Token::IN && each != NULL) {
-        ForInStatement* loop = new(zone()) ForInStatement(labels);
+          ParseVariableDeclarations(kForStatement, &name, CHECK_OK);
+
+      if (peek() == Token::IN && !name.is_null()) {
+        VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2232,7 +2362,7 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = new(zone()) Block(NULL, 2, false);
+        Block* result = new(zone()) Block(isolate(), NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
         // Parsed for-in loop w/ variable/const declaration.
@@ -2253,7 +2383,7 @@
               isolate()->factory()->invalid_lhs_in_for_in_symbol();
           expression = NewThrowReferenceError(type);
         }
-        ForInStatement* loop = new(zone()) ForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2272,7 +2402,7 @@
   }
 
   // Standard 'for' loop
-  ForStatement* loop = new(zone()) ForStatement(labels);
+  ForStatement* loop = new(zone()) ForStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
@@ -2308,7 +2438,8 @@
     Expect(Token::COMMA, CHECK_OK);
     int position = scanner().location().beg_pos;
     Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
+    result = new(zone()) BinaryOperation(
+        isolate(), Token::COMMA, result, right, position);
   }
   return result;
 }
@@ -2374,13 +2505,15 @@
     if ((op == Token::INIT_VAR
          || op == Token::INIT_CONST
          || op == Token::ASSIGN)
-        && (right->AsCall() == NULL)) {
+        && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
       fni_->Infer();
+    } else {
+      fni_->RemoveLastFunction();
     }
     fni_->Leave();
   }
 
-  return new(zone()) Assignment(op, expression, right, pos);
+  return new(zone()) Assignment(isolate(), op, expression, right, pos);
 }
 
 
@@ -2402,8 +2535,8 @@
   Expect(Token::COLON, CHECK_OK);
   int right_position = scanner().peek_location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return new(zone()) Conditional(expression, left, right,
-                         left_position, right_position);
+  return new(zone()) Conditional(
+      isolate(), expression, left, right, left_position, right_position);
 }
 
 
@@ -2490,12 +2623,12 @@
         x = NewCompareNode(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = new(zone()) UnaryOperation(Token::NOT, x);
+          x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
         }
 
       } else {
         // We have a "normal" binary operation.
-        x = new(zone()) BinaryOperation(op, x, y, position);
+        x = new(zone()) BinaryOperation(isolate(), op, x, y, position);
       }
     }
   }
@@ -2512,15 +2645,15 @@
     bool is_strict = (op == Token::EQ_STRICT);
     Literal* x_literal = x->AsLiteral();
     if (x_literal != NULL && x_literal->IsNull()) {
-      return new(zone()) CompareToNull(is_strict, y);
+      return new(zone()) CompareToNull(isolate(), is_strict, y);
     }
 
     Literal* y_literal = y->AsLiteral();
     if (y_literal != NULL && y_literal->IsNull()) {
-      return new(zone()) CompareToNull(is_strict, x);
+      return new(zone()) CompareToNull(isolate(), is_strict, x);
     }
   }
-  return new(zone()) CompareOperation(op, x, y, position);
+  return new(zone()) CompareOperation(isolate(), op, x, y, position);
 }
 
 
@@ -2540,20 +2673,29 @@
   Token::Value op = peek();
   if (Token::IsUnaryOp(op)) {
     op = Next();
+    int position = scanner().location().beg_pos;
     Expression* expression = ParseUnaryExpression(CHECK_OK);
 
-    // Compute some expressions involving only number literals.
-    if (expression != NULL && expression->AsLiteral() &&
-        expression->AsLiteral()->handle()->IsNumber()) {
-      double value = expression->AsLiteral()->handle()->Number();
-      switch (op) {
-        case Token::ADD:
-          return expression;
-        case Token::SUB:
-          return NewNumberLiteral(-value);
-        case Token::BIT_NOT:
-          return NewNumberLiteral(~DoubleToInt32(value));
-        default: break;
+    if (expression != NULL && (expression->AsLiteral() != NULL)) {
+      Handle<Object> literal = expression->AsLiteral()->handle();
+      if (op == Token::NOT) {
+        // Convert the literal to a boolean condition and negate it.
+        bool condition = literal->ToBoolean()->IsTrue();
+        Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
+        return NewLiteral(result);
+      } else if (literal->IsNumber()) {
+        // Compute some expressions involving only number literals.
+        double value = literal->Number();
+        switch (op) {
+          case Token::ADD:
+            return expression;
+          case Token::SUB:
+            return NewNumberLiteral(-value);
+          case Token::BIT_NOT:
+            return NewNumberLiteral(~DoubleToInt32(value));
+          default:
+            break;
+        }
       }
     }
 
@@ -2567,7 +2709,7 @@
       }
     }
 
-    return new(zone()) UnaryOperation(op, expression);
+    return new(zone()) UnaryOperation(isolate(), op, expression, position);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -2588,7 +2730,8 @@
     }
 
     int position = scanner().location().beg_pos;
-    return new(zone()) CountOperation(op,
+    return new(zone()) CountOperation(isolate(),
+                                      op,
                                       true /* prefix */,
                                       expression,
                                       position);
@@ -2604,7 +2747,7 @@
   //   LeftHandSideExpression ('++' | '--')?
 
   Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
-  if (!scanner().has_line_terminator_before_next() &&
+  if (!scanner().HasAnyLineTerminatorBeforeNext() &&
       Token::IsCountOp(peek())) {
     // Signal a reference error if the expression is an invalid
     // left-hand side expression.  We could report this as a syntax
@@ -2624,7 +2767,8 @@
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
     expression =
-        new(zone()) CountOperation(next,
+        new(zone()) CountOperation(isolate(),
+                                   next,
                                    false /* postfix */,
                                    expression,
                                    position);
@@ -2650,7 +2794,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new(zone()) Property(result, index, pos);
+        result = new(zone()) Property(isolate(), result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2677,7 +2821,7 @@
           Handle<String> name = callee->name();
           Variable* var = top_scope_->Lookup(name);
           if (var == NULL) {
-            top_scope_->RecordEvalCall();
+            top_scope_->DeclarationScope()->RecordEvalCall();
           }
         }
         result = NewCall(result, args, pos);
@@ -2688,7 +2832,10 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
+        result = new(zone()) Property(isolate(),
+                                      result,
+                                      NewLiteral(name),
+                                      pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2724,7 +2871,10 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = new(zone()) CallNew(result, new ZoneList<Expression*>(0), last);
+    result = new(zone()) CallNew(isolate(),
+                                 result,
+                                 new(zone()) ZoneList<Expression*>(0),
+                                 last);
   }
   return result;
 }
@@ -2753,12 +2903,19 @@
     Expect(Token::FUNCTION, CHECK_OK);
     int function_token_position = scanner().location().beg_pos;
     Handle<String> name;
-    bool is_reserved_name = false;
+    bool is_strict_reserved_name = false;
     if (peek_any_identifier()) {
-        name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+      name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+                                                 CHECK_OK);
     }
-    result = ParseFunctionLiteral(name, is_reserved_name,
-                                  function_token_position, NESTED, CHECK_OK);
+    FunctionLiteral::Type type = name.is_null()
+        ? FunctionLiteral::ANONYMOUS_EXPRESSION
+        : FunctionLiteral::NAMED_EXPRESSION;
+    result = ParseFunctionLiteral(name,
+                                  is_strict_reserved_name,
+                                  function_token_position,
+                                  type,
+                                  CHECK_OK);
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
   }
@@ -2769,7 +2926,15 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new(zone()) Property(result, index, pos);
+        result = new(zone()) Property(isolate(), result, index, pos);
+        if (fni_ != NULL) {
+          if (index->IsPropertyName()) {
+            fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
+          } else {
+            fni_->PushLiteralName(
+                isolate()->factory()->anonymous_function_symbol());
+          }
+        }
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2777,7 +2942,10 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
+        result = new(zone()) Property(isolate(),
+                                      result,
+                                      NewLiteral(name),
+                                      pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2786,7 +2954,7 @@
         // Consume one of the new prefixes (already parsed).
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
         int last = stack->pop();
-        result = new CallNew(result, args, last);
+        result = new(zone()) CallNew(isolate(), result, args, last);
         break;
       }
       default:
@@ -2828,6 +2996,9 @@
       return ReportMessage("unexpected_token_identifier",
                            Vector<const char*>::empty());
     case Token::FUTURE_RESERVED_WORD:
+      return ReportMessage("unexpected_reserved",
+                           Vector<const char*>::empty());
+    case Token::FUTURE_STRICT_RESERVED_WORD:
       return ReportMessage(top_scope_->is_strict_mode() ?
                                "unexpected_strict_reserved" :
                                "unexpected_token_identifier",
@@ -2841,7 +3012,7 @@
 
 
 void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
-  SmartPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
+  SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
   const char* element[1] = { *name_string };
   ReportMessage("invalid_preparser_data",
                 Vector<const char*>(element, 1));
@@ -2867,28 +3038,30 @@
   switch (peek()) {
     case Token::THIS: {
       Consume(Token::THIS);
-      VariableProxy* recv = top_scope_->receiver();
-      result = recv;
+      result = new(zone()) VariableProxy(isolate(), top_scope_->receiver());
       break;
     }
 
     case Token::NULL_LITERAL:
       Consume(Token::NULL_LITERAL);
-      result = new(zone()) Literal(isolate()->factory()->null_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->null_value());
       break;
 
     case Token::TRUE_LITERAL:
       Consume(Token::TRUE_LITERAL);
-      result = new(zone()) Literal(isolate()->factory()->true_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->true_value());
       break;
 
     case Token::FALSE_LITERAL:
       Consume(Token::FALSE_LITERAL);
-      result = new(zone()) Literal(isolate()->factory()->false_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->false_value());
       break;
 
     case Token::IDENTIFIER:
-    case Token::FUTURE_RESERVED_WORD: {
+    case Token::FUTURE_STRICT_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
       result = top_scope_->NewUnresolved(name,
@@ -2910,7 +3083,7 @@
     case Token::STRING: {
       Consume(Token::STRING);
       Handle<String> symbol = GetSymbol(CHECK_OK);
-      result = new(zone()) Literal(symbol);
+      result = NewLiteral(symbol);
       if (fni_ != NULL) fni_->PushLiteralName(symbol);
       break;
     }
@@ -2991,7 +3164,7 @@
   // ArrayLiteral ::
   //   '[' Expression? (',' Expression?)* ']'
 
-  ZoneList<Expression*>* values = new ZoneList<Expression*>(4);
+  ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4);
   Expect(Token::LBRACK, CHECK_OK);
   while (peek() != Token::RBRACK) {
     Expression* elem;
@@ -3037,8 +3210,8 @@
     literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
-  return new(zone()) ArrayLiteral(literals, values,
-                          literal_index, is_simple, depth);
+  return new(zone()) ArrayLiteral(
+      isolate(), literals, values, literal_index, is_simple, depth);
 }
 
 
@@ -3299,6 +3472,7 @@
   bool is_keyword = Token::IsKeyword(next);
   if (next == Token::IDENTIFIER || next == Token::NUMBER ||
       next == Token::FUTURE_RESERVED_WORD ||
+      next == Token::FUTURE_STRICT_RESERVED_WORD ||
       next == Token::STRING || is_keyword) {
     Handle<String> name;
     if (is_keyword) {
@@ -3310,7 +3484,7 @@
         ParseFunctionLiteral(name,
                              false,   // reserved words are allowed here
                              RelocInfo::kNoPosition,
-                             DECLARATION,
+                             FunctionLiteral::ANONYMOUS_EXPRESSION,
                              CHECK_OK);
     // Allow any number of parameters for compatiabilty with JSC.
     // Specification only allows zero parameters for get and one for set.
@@ -3333,14 +3507,13 @@
   //    )*[','] '}'
 
   ZoneList<ObjectLiteral::Property*>* properties =
-      new ZoneList<ObjectLiteral::Property*>(4);
+      new(zone()) ZoneList<ObjectLiteral::Property*>(4);
   int number_of_boilerplate_properties = 0;
   bool has_function = false;
 
   ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
 
   Expect(Token::LBRACE, CHECK_OK);
-  Scanner::Location loc = scanner().location();
 
   while (peek() != Token::RBRACE) {
     if (fni_ != NULL) fni_->Enter();
@@ -3353,11 +3526,12 @@
 
     switch (next) {
       case Token::FUTURE_RESERVED_WORD:
+      case Token::FUTURE_STRICT_RESERVED_WORD:
       case Token::IDENTIFIER: {
         bool is_getter = false;
         bool is_setter = false;
         Handle<String> id =
-            ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+            ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
         if (fni_ != NULL) fni_->PushLiteralName(id);
 
         if ((is_getter || is_setter) && peek() != Token::COLON) {
@@ -3381,7 +3555,7 @@
         }
         // Failed to parse as get/set property, so it's just a property
         // called "get" or "set".
-        key = new(zone()) Literal(id);
+        key = NewLiteral(id);
         break;
       }
       case Token::STRING: {
@@ -3393,7 +3567,7 @@
           key = NewNumberLiteral(index);
           break;
         }
-        key = new(zone()) Literal(string);
+        key = NewLiteral(string);
         break;
       }
       case Token::NUMBER: {
@@ -3409,7 +3583,7 @@
         if (Token::IsKeyword(next)) {
           Consume(next);
           Handle<String> string = GetSymbol(CHECK_OK);
-          key = new(zone()) Literal(string);
+          key = NewLiteral(string);
         } else {
           // Unexpected token.
           Token::Value next = Next();
@@ -3462,13 +3636,14 @@
                                        &is_simple,
                                        &fast_elements,
                                        &depth);
-  return new(zone()) ObjectLiteral(constant_properties,
-                           properties,
-                           literal_index,
-                           is_simple,
-                           fast_elements,
-                           depth,
-                           has_function);
+  return new(zone()) ObjectLiteral(isolate(),
+                                   constant_properties,
+                                   properties,
+                                   literal_index,
+                                   is_simple,
+                                   fast_elements,
+                                   depth,
+                                   has_function);
 }
 
 
@@ -3487,7 +3662,8 @@
   Handle<String> js_flags = NextLiteralString(TENURED);
   Next();
 
-  return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
+  return new(zone()) RegExpLiteral(
+      isolate(), js_pattern, js_flags, literal_index);
 }
 
 
@@ -3495,7 +3671,7 @@
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
-  ZoneList<Expression*>* result = new ZoneList<Expression*>(4);
+  ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4);
   Expect(Token::LPAREN, CHECK_OK);
   bool done = (peek() == Token::RPAREN);
   while (!done) {
@@ -3515,69 +3691,75 @@
 }
 
 
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
-                                              bool name_is_reserved,
+FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
+                                              bool name_is_strict_reserved,
                                               int function_token_position,
-                                              FunctionLiteralType type,
+                                              FunctionLiteral::Type type,
                                               bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
-  bool is_named = !var_name.is_null();
 
-  // The name associated with this function. If it's a function expression,
-  // this is the actual function name, otherwise this is the name of the
-  // variable declared and initialized with the function (expression). In
-  // that case, we don't have a function name (it's empty).
-  Handle<String> name =
-      is_named ? var_name : isolate()->factory()->empty_symbol();
-  // The function name, if any.
-  Handle<String> function_name = isolate()->factory()->empty_symbol();
-  if (is_named && (type == EXPRESSION || type == NESTED)) {
-    function_name = name;
+  // Anonymous functions were passed either the empty symbol or a null
+  // handle as the function name.  Remember if we were passed a non-empty
+  // handle to decide whether to invoke function name inference.
+  bool should_infer_name = function_name.is_null();
+
+  // We want a non-null handle as the function name.
+  if (should_infer_name) {
+    function_name = isolate()->factory()->empty_symbol();
   }
 
   int num_parameters = 0;
-  Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-  ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
+  // Function declarations are function scoped in normal mode, so they are
+  // hoisted. In harmony block scoping mode they are block scoped, so they
+  // are not hoisted.
+  Scope* scope = (type == FunctionLiteral::DECLARATION &&
+                  !harmony_block_scoping_)
+      ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
+      : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+  ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
   int materialized_literal_count;
   int expected_property_count;
   int start_pos;
   int end_pos;
   bool only_simple_this_property_assignments;
   Handle<FixedArray> this_property_assignments;
+  bool has_duplicate_parameters = false;
   // Parse function body.
   { LexicalScope lexical_scope(this, scope, isolate());
-    top_scope_->SetScopeName(name);
+    top_scope_->SetScopeName(function_name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
     start_pos = scanner().location().beg_pos;
-    Scanner::Location name_loc = Scanner::NoLocation();
-    Scanner::Location dupe_loc = Scanner::NoLocation();
-    Scanner::Location reserved_loc = Scanner::NoLocation();
+    Scanner::Location name_loc = Scanner::Location::invalid();
+    Scanner::Location dupe_loc = Scanner::Location::invalid();
+    Scanner::Location reserved_loc = Scanner::Location::invalid();
 
     bool done = (peek() == Token::RPAREN);
     while (!done) {
-      bool is_reserved = false;
+      bool is_strict_reserved = false;
       Handle<String> param_name =
-          ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+          ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+                                              CHECK_OK);
 
       // Store locations for possible future error reports.
       if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
         name_loc = scanner().location();
       }
       if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+        has_duplicate_parameters = true;
         dupe_loc = scanner().location();
       }
-      if (!reserved_loc.IsValid() && is_reserved) {
+      if (!reserved_loc.IsValid() && is_strict_reserved) {
         reserved_loc = scanner().location();
       }
 
-      Variable* parameter = top_scope_->DeclareLocal(param_name,
-                                                     Variable::VAR,
-                                                     Scope::PARAMETER);
-      top_scope_->AddParameter(parameter);
+      top_scope_->DeclareParameter(param_name,
+                                   harmony_block_scoping_
+                                   ? Variable::LET
+                                   : Variable::VAR);
       num_parameters++;
       if (num_parameters > kMaxNumFunctionParameters) {
         ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3598,46 +3780,56 @@
     // NOTE: We create a proxy and resolve it here so that in the
     // future we can change the AST to only refer to VariableProxies
     // instead of Variables and Proxis as is the case now.
-    if (!function_name.is_null() && function_name->length() > 0) {
+    if (type == FunctionLiteral::NAMED_EXPRESSION) {
       Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
       VariableProxy* fproxy =
           top_scope_->NewUnresolved(function_name, inside_with());
       fproxy->BindTo(fvar);
       body->Add(new(zone()) ExpressionStatement(
-                    new(zone()) Assignment(Token::INIT_CONST, fproxy,
-                                   new(zone()) ThisFunction(),
-                                   RelocInfo::kNoPosition)));
+          new(zone()) Assignment(isolate(),
+                                 Token::INIT_CONST,
+                                 fproxy,
+                                 new(zone()) ThisFunction(isolate()),
+                                 RelocInfo::kNoPosition)));
     }
 
-    // Determine if the function will be lazily compiled. The mode can
-    // only be PARSE_LAZILY if the --lazy flag is true.
+    // Determine if the function will be lazily compiled. The mode can only
+    // be PARSE_LAZILY if the --lazy flag is true.  We will not lazily
+    // compile if we do not have preparser data for the function.
     bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
                                top_scope_->outer_scope()->is_global_scope() &&
                                top_scope_->HasTrivialOuterContext() &&
-                               !parenthesized_function_);
+                               !parenthesized_function_ &&
+                               pre_data() != NULL);
     parenthesized_function_ = false;  // The bit was set for this function only.
 
-    int function_block_pos = scanner().location().beg_pos;
-    if (is_lazily_compiled && pre_data() != NULL) {
+    if (is_lazily_compiled) {
+      int function_block_pos = scanner().location().beg_pos;
       FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
       if (!entry.is_valid()) {
-        ReportInvalidPreparseData(name, CHECK_OK);
+        // There is no preparser data for the function, we will not lazily
+        // compile after all.
+        is_lazily_compiled = false;
+      } else {
+        end_pos = entry.end_pos();
+        if (end_pos <= function_block_pos) {
+          // End position greater than end of stream is safe, and hard to check.
+          ReportInvalidPreparseData(function_name, CHECK_OK);
+        }
+        isolate()->counters()->total_preparse_skipped()->Increment(
+            end_pos - function_block_pos);
+        // Seek to position just before terminal '}'.
+        scanner().SeekForward(end_pos - 1);
+        materialized_literal_count = entry.literal_count();
+        expected_property_count = entry.property_count();
+        if (entry.strict_mode()) top_scope_->EnableStrictMode();
+        only_simple_this_property_assignments = false;
+        this_property_assignments = isolate()->factory()->empty_fixed_array();
+        Expect(Token::RBRACE, CHECK_OK);
       }
-      end_pos = entry.end_pos();
-      if (end_pos <= function_block_pos) {
-        // End position greater than end of stream is safe, and hard to check.
-        ReportInvalidPreparseData(name, CHECK_OK);
-      }
-      isolate()->counters()->total_preparse_skipped()->Increment(
-          end_pos - function_block_pos);
-      // Seek to position just before terminal '}'.
-      scanner().SeekForward(end_pos - 1);
-      materialized_literal_count = entry.literal_count();
-      expected_property_count = entry.property_count();
-      only_simple_this_property_assignments = false;
-      this_property_assignments = isolate()->factory()->empty_fixed_array();
-      Expect(Token::RBRACE, CHECK_OK);
-    } else {
+    }
+
+    if (!is_lazily_compiled) {
       ParseSourceElements(body, Token::RBRACE, CHECK_OK);
 
       materialized_literal_count = lexical_scope.materialized_literal_count();
@@ -3652,7 +3844,7 @@
 
     // Validate strict mode.
     if (top_scope_->is_strict_mode()) {
-      if (IsEvalOrArguments(name)) {
+      if (IsEvalOrArguments(function_name)) {
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3674,7 +3866,7 @@
         *ok = false;
         return NULL;
       }
-      if (name_is_reserved) {
+      if (name_is_strict_reserved) {
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3694,8 +3886,13 @@
     }
   }
 
+  if (harmony_block_scoping_) {
+    CheckConflictingVarDeclarations(scope, CHECK_OK);
+  }
+
   FunctionLiteral* function_literal =
-      new(zone()) FunctionLiteral(name,
+      new(zone()) FunctionLiteral(isolate(),
+                                  function_name,
                                   scope,
                                   body,
                                   materialized_literal_count,
@@ -3705,10 +3902,11 @@
                                   num_parameters,
                                   start_pos,
                                   end_pos,
-                                  (function_name->length() > 0));
+                                  type,
+                                  has_duplicate_parameters);
   function_literal->set_function_token_position(function_token_position);
 
-  if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
+  if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
 }
 
@@ -3724,7 +3922,7 @@
   if (extension_ != NULL) {
     // The extension structures are only accessible while parsing the
     // very first time not when reparsing because of lazy compilation.
-    top_scope_->ForceEagerCompilation();
+    top_scope_->DeclarationScope()->ForceEagerCompilation();
   }
 
   const Runtime::Function* function = Runtime::FunctionForSymbol(name);
@@ -3755,14 +3953,15 @@
   }
 
   // We have a valid intrinsics call or a call to a builtin.
-  return new(zone()) CallRuntime(name, function, args);
+  return new(zone()) CallRuntime(isolate(), name, function, args);
 }
 
 
 bool Parser::peek_any_identifier() {
   Token::Value next = peek();
   return next == Token::IDENTIFIER ||
-         next == Token::FUTURE_RESERVED_WORD;
+         next == Token::FUTURE_RESERVED_WORD ||
+         next == Token::FUTURE_STRICT_RESERVED_WORD;
 }
 
 
@@ -3800,7 +3999,7 @@
     Next();
     return;
   }
-  if (scanner().has_line_terminator_before_next() ||
+  if (scanner().HasAnyLineTerminatorBeforeNext() ||
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     return;
@@ -3810,12 +4009,12 @@
 
 
 Literal* Parser::GetLiteralUndefined() {
-  return new(zone()) Literal(isolate()->factory()->undefined_value());
+  return NewLiteral(isolate()->factory()->undefined_value());
 }
 
 
 Literal* Parser::GetLiteralTheHole() {
-  return new(zone()) Literal(isolate()->factory()->the_hole_value());
+  return NewLiteral(isolate()->factory()->the_hole_value());
 }
 
 
@@ -3824,22 +4023,27 @@
 }
 
 
+// Parses an identifier that is valid for the current scope, in particular it
+// fails on strict mode future reserved keywords in a strict scope.
 Handle<String> Parser::ParseIdentifier(bool* ok) {
-  bool is_reserved;
-  return ParseIdentifierOrReservedWord(&is_reserved, ok);
+  if (top_scope_->is_strict_mode()) {
+    Expect(Token::IDENTIFIER, ok);
+  } else if (!Check(Token::IDENTIFIER)) {
+    Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+  }
+  if (!*ok) return Handle<String>();
+  return GetSymbol(ok);
 }
 
 
-Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
-                                                     bool* ok) {
-  *is_reserved = false;
-  if (top_scope_->is_strict_mode()) {
-    Expect(Token::IDENTIFIER, ok);
-  } else {
-    if (!Check(Token::IDENTIFIER)) {
-      Expect(Token::FUTURE_RESERVED_WORD, ok);
-      *is_reserved = true;
-    }
+// Parses and identifier or a strict mode future reserved word, and indicate
+// whether it is strict mode future reserved.
+Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
+    bool* is_strict_reserved, bool* ok) {
+  *is_strict_reserved = false;
+  if (!Check(Token::IDENTIFIER)) {
+    Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+    *is_strict_reserved = true;
   }
   if (!*ok) return Handle<String>();
   return GetSymbol(ok);
@@ -3849,8 +4053,9 @@
 Handle<String> Parser::ParseIdentifierName(bool* ok) {
   Token::Value next = Next();
   if (next != Token::IDENTIFIER &&
-          next != Token::FUTURE_RESERVED_WORD &&
-          !Token::IsKeyword(next)) {
+      next != Token::FUTURE_RESERVED_WORD &&
+      next != Token::FUTURE_STRICT_RESERVED_WORD &&
+      !Token::IsKeyword(next)) {
     ReportUnexpectedToken(next);
     *ok = false;
     return Handle<String>();
@@ -3876,12 +4081,14 @@
 }
 
 
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
+// Checks whether an octal literal was last seen between beg_pos and end_pos.
+// If so, reports an error. Only called for strict mode.
 void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
-  int octal = scanner().octal_position();
-  if (beg_pos <= octal && octal <= end_pos) {
-    ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
+  Scanner::Location octal = scanner().octal_position();
+  if (octal.IsValid() &&
+      beg_pos <= octal.beg_pos &&
+      octal.end_pos <= end_pos) {
+    ReportMessageAt(octal, "strict_octal_literal",
                     Vector<const char*>::empty());
     scanner().clear_octal_position();
     *ok = false;
@@ -3889,12 +4096,31 @@
 }
 
 
-// This function reads an identifier and determines whether or not it
+void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
+  Declaration* decl = scope->CheckConflictingVarDeclarations();
+  if (decl != NULL) {
+    // In harmony mode we treat conflicting variable bindinds as early
+    // errors. See ES5 16 for a definition of early errors.
+    Handle<String> name = decl->proxy()->name();
+    SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
+    const char* elms[2] = { "Variable", *c_string };
+    Vector<const char*> args(elms, 2);
+    int position = decl->proxy()->position();
+    Scanner::Location location = position == RelocInfo::kNoPosition
+        ? Scanner::Location::invalid()
+        : Scanner::Location(position, position + 1);
+    ReportMessageAt(location, "redeclaration", args);
+    *ok = false;
+  }
+}
+
+
+// This function reads an identifier name and determines whether or not it
 // is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
-                                                 bool* is_set,
-                                                 bool* ok) {
-  Handle<String> result = ParseIdentifier(ok);
+Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+                                                     bool* is_set,
+                                                     bool* ok) {
+  Handle<String> result = ParseIdentifierName(ok);
   if (!*ok) return Handle<String>();
   if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
     const char* token = scanner().literal_ascii_string().start();
@@ -3963,7 +4189,7 @@
 
 
 Literal* Parser::NewNumberLiteral(double number) {
-  return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
+  return NewLiteral(isolate()->factory()->NewNumber(number, TENURED));
 }
 
 
@@ -4009,191 +4235,16 @@
   Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
                                                                        TENURED);
 
-  ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
-  args->Add(new(zone()) Literal(type));
-  args->Add(new(zone()) Literal(array));
-  return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
-                   scanner().location().beg_pos);
-}
-
-// ----------------------------------------------------------------------------
-// JSON
-
-Handle<Object> JsonParser::ParseJson(Handle<String> script,
-                                     UC16CharacterStream* source) {
-  scanner_.Initialize(source);
-  stack_overflow_ = false;
-  Handle<Object> result = ParseJsonValue();
-  if (result.is_null() || scanner_.Next() != Token::EOS) {
-    if (stack_overflow_) {
-      // Scanner failed.
-      isolate()->StackOverflow();
-    } else {
-      // Parse failed. Scanner's current token is the unexpected token.
-      Token::Value token = scanner_.current_token();
-
-      const char* message;
-      const char* name_opt = NULL;
-
-      switch (token) {
-        case Token::EOS:
-          message = "unexpected_eos";
-          break;
-        case Token::NUMBER:
-          message = "unexpected_token_number";
-          break;
-        case Token::STRING:
-          message = "unexpected_token_string";
-          break;
-        case Token::IDENTIFIER:
-        case Token::FUTURE_RESERVED_WORD:
-          message = "unexpected_token_identifier";
-          break;
-        default:
-          message = "unexpected_token";
-          name_opt = Token::String(token);
-          ASSERT(name_opt != NULL);
-          break;
-      }
-
-      Scanner::Location source_location = scanner_.location();
-      Factory* factory = isolate()->factory();
-      MessageLocation location(factory->NewScript(script),
-                               source_location.beg_pos,
-                               source_location.end_pos);
-      Handle<JSArray> array;
-      if (name_opt == NULL) {
-        array = factory->NewJSArray(0);
-      } else {
-        Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
-        Handle<FixedArray> element = factory->NewFixedArray(1);
-        element->set(0, *name);
-        array = factory->NewJSArrayWithElements(element);
-      }
-      Handle<Object> result = factory->NewSyntaxError(message, array);
-      isolate()->Throw(*result, &location);
-      return Handle<Object>::null();
-    }
-  }
-  return result;
-}
-
-
-Handle<String> JsonParser::GetString() {
-  int literal_length = scanner_.literal_length();
-  if (literal_length == 0) {
-    return isolate()->factory()->empty_string();
-  }
-  if (scanner_.is_literal_ascii()) {
-    return isolate()->factory()->NewStringFromAscii(
-        scanner_.literal_ascii_string());
-  } else {
-    return isolate()->factory()->NewStringFromTwoByte(
-        scanner_.literal_uc16_string());
-  }
-}
-
-
-// Parse any JSON value.
-Handle<Object> JsonParser::ParseJsonValue() {
-  Token::Value token = scanner_.Next();
-  switch (token) {
-    case Token::STRING:
-      return GetString();
-    case Token::NUMBER:
-      return isolate()->factory()->NewNumber(scanner_.number());
-    case Token::FALSE_LITERAL:
-      return isolate()->factory()->false_value();
-    case Token::TRUE_LITERAL:
-      return isolate()->factory()->true_value();
-    case Token::NULL_LITERAL:
-      return isolate()->factory()->null_value();
-    case Token::LBRACE:
-      return ParseJsonObject();
-    case Token::LBRACK:
-      return ParseJsonArray();
-    default:
-      return ReportUnexpectedToken();
-  }
-}
-
-
-// Parse a JSON object. Scanner must be right after '{' token.
-Handle<Object> JsonParser::ParseJsonObject() {
-  Handle<JSFunction> object_constructor(
-      isolate()->global_context()->object_function());
-  Handle<JSObject> json_object =
-      isolate()->factory()->NewJSObject(object_constructor);
-  if (scanner_.peek() == Token::RBRACE) {
-    scanner_.Next();
-  } else {
-    if (StackLimitCheck(isolate()).HasOverflowed()) {
-      stack_overflow_ = true;
-      return Handle<Object>::null();
-    }
-    do {
-      if (scanner_.Next() != Token::STRING) {
-        return ReportUnexpectedToken();
-      }
-      Handle<String> key = GetString();
-      if (scanner_.Next() != Token::COLON) {
-        return ReportUnexpectedToken();
-      }
-      Handle<Object> value = ParseJsonValue();
-      if (value.is_null()) return Handle<Object>::null();
-      uint32_t index;
-      if (key->AsArrayIndex(&index)) {
-        SetOwnElement(json_object, index, value, kNonStrictMode);
-      } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
-        // We can't remove the __proto__ accessor since it's hardcoded
-        // in several places. Instead go along and add the value as
-        // the prototype of the created object if possible.
-        SetPrototype(json_object, value);
-      } else {
-        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
-      }
-    } while (scanner_.Next() == Token::COMMA);
-    if (scanner_.current_token() != Token::RBRACE) {
-      return ReportUnexpectedToken();
-    }
-  }
-  return json_object;
-}
-
-
-// Parse a JSON array. Scanner must be right after '[' token.
-Handle<Object> JsonParser::ParseJsonArray() {
-  ZoneScope zone_scope(DELETE_ON_EXIT);
-  ZoneList<Handle<Object> > elements(4);
-
-  Token::Value token = scanner_.peek();
-  if (token == Token::RBRACK) {
-    scanner_.Next();
-  } else {
-    if (StackLimitCheck(isolate()).HasOverflowed()) {
-      stack_overflow_ = true;
-      return Handle<Object>::null();
-    }
-    do {
-      Handle<Object> element = ParseJsonValue();
-      if (element.is_null()) return Handle<Object>::null();
-      elements.Add(element);
-      token = scanner_.Next();
-    } while (token == Token::COMMA);
-    if (token != Token::RBRACK) {
-      return ReportUnexpectedToken();
-    }
-  }
-
-  // Allocate a fixed array with all the elements.
-  Handle<FixedArray> fast_elements =
-      isolate()->factory()->NewFixedArray(elements.length());
-
-  for (int i = 0, n = elements.length(); i < n; i++) {
-    fast_elements->set(i, *elements[i]);
-  }
-
-  return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+  ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
+  args->Add(NewLiteral(type));
+  args->Add(NewLiteral(array));
+  CallRuntime* call_constructor = new(zone()) CallRuntime(isolate(),
+                                                          constructor,
+                                                          NULL,
+                                                          args);
+  return new(zone()) Throw(isolate(),
+                           call_constructor,
+                           scanner().location().beg_pos);
 }
 
 // ----------------------------------------------------------------------------
@@ -4383,7 +4434,8 @@
     case '.': {
       Advance();
       // everything except \x0a, \x0d, \u2028 and \u2029
-      ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+      ZoneList<CharacterRange>* ranges =
+          new(zone()) ZoneList<CharacterRange>(2);
       CharacterRange::AddClassEscape('.', ranges);
       RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
       builder->AddAtom(atom);
@@ -4410,7 +4462,7 @@
         Advance(2);
       } else {
         if (captures_ == NULL) {
-          captures_ = new ZoneList<RegExpCapture*>(2);
+          captures_ = new(zone()) ZoneList<RegExpCapture*>(2);
         }
         if (captures_started() >= kMaxCaptures) {
           ReportError(CStrVector("Too many captures") CHECK_FAILED);
@@ -4453,7 +4505,8 @@
       case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
         uc32 c = Next();
         Advance(2);
-        ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+        ZoneList<CharacterRange>* ranges =
+            new(zone()) ZoneList<CharacterRange>(2);
         CharacterRange::AddClassEscape(c, ranges);
         RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
         builder->AddAtom(atom);
@@ -4949,7 +5002,7 @@
     is_negated = true;
     Advance();
   }
-  ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+  ZoneList<CharacterRange>* ranges = new(zone()) ZoneList<CharacterRange>(2);
   while (has_more() && current() != ']') {
     uc16 char_class = kNoCharClass;
     CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
@@ -5070,9 +5123,11 @@
 // Create a Scanner for the preparser to use as input, and preparse the source.
 static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
                                   bool allow_lazy,
-                                  ParserRecorder* recorder) {
+                                  ParserRecorder* recorder,
+                                  bool harmony_block_scoping) {
   Isolate* isolate = Isolate::Current();
-  V8JavaScriptScanner scanner(isolate->unicode_cache());
+  JavaScriptScanner scanner(isolate->unicode_cache());
+  scanner.SetHarmonyBlockScoping(harmony_block_scoping);
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
   if (!preparser::PreParser::PreParseProgram(&scanner,
@@ -5093,7 +5148,8 @@
 // Preparse, but only collect data that is immediately useful,
 // even if the preparser data is only used once.
 ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
-                                           v8::Extension* extension) {
+                                           v8::Extension* extension,
+                                           bool harmony_block_scoping) {
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   if (!allow_lazy) {
     // Partial preparsing is only about lazily compiled functions.
@@ -5101,16 +5157,17 @@
     return NULL;
   }
   PartialParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder);
+  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
 }
 
 
 ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
-                                    v8::Extension* extension) {
+                                    v8::Extension* extension,
+                                    bool harmony_block_scoping) {
   Handle<Script> no_script;
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   CompleteParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder);
+  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
 }
 
 
@@ -5140,17 +5197,25 @@
   ASSERT(info->function() == NULL);
   FunctionLiteral* result = NULL;
   Handle<Script> script = info->script();
+  bool harmony_block_scoping = !info->is_native() &&
+                               FLAG_harmony_block_scoping;
   if (info->is_lazy()) {
     bool allow_natives_syntax =
         FLAG_allow_natives_syntax ||
         info->is_native();
     Parser parser(script, allow_natives_syntax, NULL, NULL);
+    parser.SetHarmonyBlockScoping(harmony_block_scoping);
     result = parser.ParseLazy(info);
   } else {
+    // Whether we allow %identifier(..) syntax.
     bool allow_natives_syntax =
         info->is_native() || FLAG_allow_natives_syntax;
     ScriptDataImpl* pre_data = info->pre_parse_data();
-    Parser parser(script, allow_natives_syntax, info->extension(), pre_data);
+    Parser parser(script,
+                  allow_natives_syntax,
+                  info->extension(),
+                  pre_data);
+    parser.SetHarmonyBlockScoping(harmony_block_scoping);
     if (pre_data != NULL && pre_data->has_error()) {
       Scanner::Location loc = pre_data->MessageLocation();
       const char* message = pre_data->BuildMessage();
@@ -5169,7 +5234,6 @@
                                    info->StrictMode());
     }
   }
-
   info->SetFunction(result);
   return (result != NULL);
 }
diff --git a/src/parser.h b/src/parser.h
index 64f1303..3312f2f 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -30,9 +30,9 @@
 
 #include "allocation.h"
 #include "ast.h"
-#include "scanner.h"
-#include "scopes.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -71,22 +71,14 @@
   FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
 
   int start_pos() { return backing_[kStartPosOffset]; }
-  void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
-
   int end_pos() { return backing_[kEndPosOffset]; }
-  void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
-
   int literal_count() { return backing_[kLiteralCountOffset]; }
-  void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
-
   int property_count() { return backing_[kPropertyCountOffset]; }
-  void set_property_count(int value) {
-    backing_[kPropertyCountOffset] = value;
-  }
+  bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
 
   bool is_valid() { return backing_.length() > 0; }
 
-  static const int kSize = 4;
+  static const int kSize = 5;
 
  private:
   Vector<unsigned> backing_;
@@ -94,6 +86,7 @@
   static const int kEndPosOffset = 1;
   static const int kLiteralCountOffset = 2;
   static const int kPropertyCountOffset = 3;
+  static const int kStrictModeOffset = 4;
 };
 
 
@@ -170,12 +163,14 @@
 
   // Generic preparser generating full preparse data.
   static ScriptDataImpl* PreParse(UC16CharacterStream* source,
-                                  v8::Extension* extension);
+                                  v8::Extension* extension,
+                                  bool harmony_block_scoping);
 
   // Preparser that only does preprocessing that makes sense if only used
   // immediately after.
   static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
-                                         v8::Extension* extension);
+                                         v8::Extension* extension,
+                                         bool harmony_block_scoping);
 };
 
 // ----------------------------------------------------------------------------
@@ -441,8 +436,9 @@
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
                        Vector<Handle<String> > args);
+  void SetHarmonyBlockScoping(bool block_scoping);
 
- protected:
+ private:
   // Limit on number of function parameters is chosen arbitrarily.
   // Code::Flags uses only the low 17 bits of num-parameters to
   // construct a hashable id, so if more than 2^17 are allowed, this
@@ -457,6 +453,12 @@
     PARSE_EAGERLY
   };
 
+  enum VariableDeclarationContext {
+    kSourceElement,
+    kStatement,
+    kForStatement
+  };
+
   Isolate* isolate() { return isolate_; }
   Zone* zone() { return isolate_->zone(); }
 
@@ -472,7 +474,7 @@
   void ReportMessage(const char* message, Vector<const char*> args);
 
   bool inside_with() const { return with_nesting_level_ > 0; }
-  V8JavaScriptScanner& scanner()  { return scanner_; }
+  JavaScriptScanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
 
@@ -485,22 +487,23 @@
   // for failure at the call sites.
   void* ParseSourceElements(ZoneList<Statement*>* processor,
                             int end_token, bool* ok);
+  Statement* ParseSourceElement(ZoneStringList* labels, bool* ok);
   Statement* ParseStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseFunctionDeclaration(bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
   Block* ParseBlock(ZoneStringList* labels, bool* ok);
-  Block* ParseVariableStatement(bool* ok);
-  Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
+  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
+  Block* ParseVariableStatement(VariableDeclarationContext var_context,
+                                bool* ok);
+  Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                   Handle<String>* out,
+                                   bool* ok);
   Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
                                                 bool* ok);
   IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseContinueStatement(bool* ok);
   Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseReturnStatement(bool* ok);
-  Block* WithHelper(Expression* obj,
-                    ZoneStringList* labels,
-                    bool is_catch_block,
-                    bool* ok);
   Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
   CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
   SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
@@ -559,17 +562,11 @@
   // in the object literal boilerplate.
   Handle<Object> GetBoilerplateValue(Expression* expression);
 
-  enum FunctionLiteralType {
-    EXPRESSION,
-    DECLARATION,
-    NESTED
-  };
-
   ZoneList<Expression*>* ParseArguments(bool* ok);
   FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
                                         bool name_is_reserved,
                                         int function_token_position,
-                                        FunctionLiteralType type,
+                                        FunctionLiteral::Type type,
                                         bool* ok);
 
 
@@ -632,11 +629,12 @@
   Literal* GetLiteralNumber(double value);
 
   Handle<String> ParseIdentifier(bool* ok);
-  Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
+  Handle<String> ParseIdentifierOrStrictReservedWord(
+      bool* is_strict_reserved, bool* ok);
   Handle<String> ParseIdentifierName(bool* ok);
-  Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
-                                           bool* is_set,
-                                           bool* ok);
+  Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
+                                               bool* is_set,
+                                               bool* ok);
 
   // Strict mode validation of LValue expressions
   void CheckStrictModeLValue(Expression* expression,
@@ -646,6 +644,17 @@
   // Strict mode octal literal validation.
   void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
 
+  // For harmony block scoping mode: Check if the scope has conflicting var/let
+  // declarations from different scopes. It covers for example
+  //
+  // function f() { { { var x; } let x; } }
+  // function g() { { var x; let x; } }
+  //
+  // The var declarations are hoisted to the function scope, but originate from
+  // a scope where the name has also been let bound or the var declaration is
+  // hoisted over such a scope.
+  void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+
   // Parser support
   VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
                          FunctionLiteral* fun,
@@ -674,9 +683,12 @@
   Expression* NewCall(Expression* expression,
                       ZoneList<Expression*>* arguments,
                       int pos) {
-    return new Call(expression, arguments, pos);
+    return new(zone()) Call(isolate(), expression, arguments, pos);
   }
 
+  inline Literal* NewLiteral(Handle<Object> handle) {
+    return new(zone()) Literal(isolate(), handle);
+  }
 
   // Create a number literal.
   Literal* NewNumberLiteral(double value);
@@ -704,7 +716,7 @@
   ZoneList<Handle<String> > symbol_cache_;
 
   Handle<Script> script_;
-  V8JavaScriptScanner scanner_;
+  JavaScriptScanner scanner_;
 
   Scope* top_scope_;
   int with_nesting_level_;
@@ -724,6 +736,7 @@
   // Heuristically that means that the function will be called immediately,
   // so never lazily compile it.
   bool parenthesized_function_;
+  bool harmony_block_scoping_;
 
   friend class LexicalScope;
 };
@@ -759,66 +772,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
 };
 
-
-// ----------------------------------------------------------------------------
-// JSON PARSING
-
-// JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
-// specification section 15.12.1 (and appendix A.8).
-// The grammar is given section 15.12.1.2 (and appendix A.8.2).
-class JsonParser BASE_EMBEDDED {
- public:
-  // Parse JSON input as a single JSON value.
-  // Returns null handle and sets exception if parsing failed.
-  static Handle<Object> Parse(Handle<String> source) {
-    if (source->IsExternalTwoByteString()) {
-      ExternalTwoByteStringUC16CharacterStream stream(
-          Handle<ExternalTwoByteString>::cast(source), 0, source->length());
-      return JsonParser().ParseJson(source, &stream);
-    } else {
-      GenericStringUC16CharacterStream stream(source, 0, source->length());
-      return JsonParser().ParseJson(source, &stream);
-    }
-  }
-
- private:
-  JsonParser()
-      : isolate_(Isolate::Current()),
-        scanner_(isolate_->unicode_cache()) { }
-  ~JsonParser() { }
-
-  Isolate* isolate() { return isolate_; }
-
-  // Parse a string containing a single JSON value.
-  Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
-  // Parse a single JSON value from input (grammar production JSONValue).
-  // A JSON value is either a (double-quoted) string literal, a number literal,
-  // one of "true", "false", or "null", or an object or array literal.
-  Handle<Object> ParseJsonValue();
-  // Parse a JSON object literal (grammar production JSONObject).
-  // An object literal is a squiggly-braced and comma separated sequence
-  // (possibly empty) of key/value pairs, where the key is a JSON string
-  // literal, the value is a JSON value, and the two are separated by a colon.
-  // A JSON array dosn't allow numbers and identifiers as keys, like a
-  // JavaScript array.
-  Handle<Object> ParseJsonObject();
-  // Parses a JSON array literal (grammar production JSONArray). An array
-  // literal is a square-bracketed and comma separated sequence (possibly empty)
-  // of JSON values.
-  // A JSON array doesn't allow leaving out values from the sequence, nor does
-  // it allow a terminal comma, like a JavaScript array does.
-  Handle<Object> ParseJsonArray();
-
-  // Mark that a parsing error has happened at the current token, and
-  // return a null handle. Primarily for readability.
-  Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
-  // Converts the currently parsed literal to a JavaScript String.
-  Handle<String> GetString();
-
-  Isolate* isolate_;
-  JsonScanner scanner_;
-  bool stack_overflow_;
-};
 } }  // namespace v8::internal
 
 #endif  // V8_PARSER_H_
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 6511328..a72f5da 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -166,22 +166,17 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  mprotect(address, size, PROT_READ);
+void OS::ProtectCode(void* address, const size_t size) {
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
 }
 
 
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  mprotect(address, size, prot);
+void OS::Guard(void* address, const size_t size) {
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
 }
 
-#endif
-
 
 void OS::Sleep(int milliseconds) {
   unsigned int ms = static_cast<unsigned int>(milliseconds);
@@ -249,7 +244,6 @@
 
 
 void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // This function assumes that the layout of the file is as follows:
   // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
   // If we encounter an unexpected situation we abort scanning further entries.
@@ -306,7 +300,6 @@
   }
   free(lib_name);
   fclose(fp);
-#endif
 }
 
 
@@ -371,17 +364,15 @@
 
 
 
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
 }
@@ -399,7 +390,6 @@
   // one) so we initialize it here too.
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -484,7 +474,6 @@
 
 class CygwinMutex : public Mutex {
  public:
-
   CygwinMutex() {
     pthread_mutexattr_t attrs;
     memset(&attrs, 0, sizeof(attrs));
@@ -594,8 +583,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 // ----------------------------------------------------------------------------
 // Cygwin profiler support.
 //
@@ -631,7 +618,7 @@
 class SamplerThread : public Thread {
  public:
   explicit SamplerThread(int interval)
-      : Thread(NULL, "SamplerThread"),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
@@ -649,8 +636,7 @@
     ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
-      instance_->Join();
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
     }
@@ -773,7 +759,5 @@
   SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
-
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 8b83f2b..685ec3c 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -52,6 +52,7 @@
 #undef MAP_TYPE
 
 #include "v8.h"
+#include "v8threads.h"
 
 #include "platform.h"
 #include "vm-state-inl.h"
@@ -180,20 +181,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  UNIMPLEMENTED();
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   unsigned int ms = static_cast<unsigned int>(milliseconds);
   usleep(1000 * ms);
@@ -265,15 +252,12 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 static unsigned StringToLong(char* buffer) {
   return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
 }
-#endif
 
 
 void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static const int MAP_LENGTH = 1024;
   int fd = open("/proc/self/maps", O_RDONLY);
   if (fd < 0) return;
@@ -310,7 +294,6 @@
     LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
   }
   close(fd);
-#endif
 }
 
 
@@ -397,42 +380,15 @@
 };
 
 
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
 }
@@ -448,9 +404,8 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread_->data_->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
   thread->Run();
   return NULL;
 }
@@ -470,13 +425,13 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
@@ -516,7 +471,6 @@
 
 class FreeBSDMutex : public Mutex {
  public:
-
   FreeBSDMutex() {
     pthread_mutexattr_t attrs;
     int result = pthread_mutexattr_init(&attrs);
@@ -615,8 +569,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 static pthread_t GetThreadID() {
   pthread_t thread_id = pthread_self();
   return thread_id;
@@ -684,7 +636,7 @@
   };
 
   explicit SignalSender(int interval)
-      : Thread(NULL, "SignalSender"),
+      : Thread("SignalSender"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
@@ -711,8 +663,7 @@
     ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
-      instance_->Join();
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
 
@@ -845,6 +796,5 @@
   SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index c60658f..b152dae 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -78,33 +78,59 @@
 static Mutex* limit_mutex = NULL;
 
 
+static void* GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
 void OS::Setup() {
-  // Seed the random number generator.
-  // Convert the current time to a 64-bit integer first, before converting it
-  // to an unsigned. Going directly can cause an overflow and the seed to be
-  // set to all ones. The seed will be identical for different instances that
-  // call this setup code within the same millisecond.
-  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  // Seed the random number generator. We preserve microsecond resolution.
+  uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
+
+#ifdef __arm__
+  // When running on ARM hardware check that the EABI used by V8 and
+  // by the C code is the same.
+  bool hard_float = OS::ArmUsingHardFloat();
+  if (hard_float) {
+#if !USE_EABI_HARDFLOAT
+    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
+           "-DUSE_EABI_HARDFLOAT\n");
+    exit(1);
+#endif
+  } else {
+#if USE_EABI_HARDFLOAT
+    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
+           "-DUSE_EABI_HARDFLOAT\n");
+    exit(1);
+#endif
+  }
+#endif
 }
 
 
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
-  // Here gcc is telling us that we are on an ARM and gcc is assuming
-  // that we have VFP3 instructions.  If gcc can assume it then so can
-  // we. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
-  return 1u << VFP3 | 1u << ARMv7;
-#elif CAN_USE_ARMV7_INSTRUCTIONS
-  return 1u << ARMv7;
-#elif(defined(__mips_hard_float) && __mips_hard_float != 0)
-    // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
-    // have FPU instructions.  If gcc can assume it then so can we.
-    return 1u << FPU;
-#else
   return 0;  // Linux runs on anything.
-#endif
 }
 
 
@@ -142,6 +168,7 @@
   return false;
 }
 
+
 bool OS::ArmCpuHasFeature(CpuFeature feature) {
   const char* search_string = NULL;
   // Simple detection of VFP at runtime for Linux.
@@ -177,6 +204,50 @@
 
   return false;
 }
+
+
+// Simple helper function to detect whether the C code is compiled with
+// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
+// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
+// calling this will return 1.0 and otherwise 0.0.
+static void ArmUsingHardFloatHelper() {
+  asm("mov r0, #0");
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+  // Load 0x3ff00000 into r1 using instructions available in both ARM
+  // and Thumb mode.
+  asm("mov r1, #3");
+  asm("mov r2, #255");
+  asm("lsl r1, r1, #8");
+  asm("orr r1, r1, r2");
+  asm("lsl r1, r1, #20");
+  // For vmov d0, r0, r1 use ARM mode.
+#ifdef __thumb__
+  asm volatile(
+    "@   Enter ARM Mode  \n\t"
+    "    adr r3, 1f      \n\t"
+    "    bx  r3          \n\t"
+    "    .ALIGN 4        \n\t"
+    "    .ARM            \n"
+    "1:  vmov d0, r0, r1 \n\t"
+    "@   Enter THUMB Mode\n\t"
+    "    adr r3, 2f+1    \n\t"
+    "    bx  r3          \n\t"
+    "    .THUMB          \n"
+    "2:                  \n\t");
+#else
+  asm("vmov d0, r0, r1");
+#endif  // __thumb__
+#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
+  asm("mov r1, #0");
+}
+
+
+bool OS::ArmUsingHardFloat() {
+  // Cast helper function from returning void to returning double.
+  typedef double (*F)();
+  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
+  return f() == 1.0;
+}
 #endif  // def __arm__
 
 
@@ -310,10 +381,10 @@
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  // TODO(805): Port randomization of allocated executable memory to Linux.
   const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  void* addr = GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   if (mbase == MAP_FAILED) {
     LOG(i::Isolate::Current(),
         StringEvent("OS::Allocate", "mmap failed"));
@@ -333,23 +404,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  mprotect(address, size, prot);
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   unsigned int ms = static_cast<unsigned int>(milliseconds);
   usleep(1000 * ms);
@@ -426,7 +480,6 @@
 
 
 void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // This function assumes that the layout of the file is as follows:
   // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
   // If we encounter an unexpected situation we abort scanning further entries.
@@ -483,7 +536,6 @@
   }
   free(lib_name);
   fclose(fp);
-#endif
 }
 
 
@@ -491,7 +543,6 @@
 
 
 void OS::SignalCodeMovingGC() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Support for ll_prof.py.
   //
   // The Linux profiler built into the kernel logs all mmap's with
@@ -507,7 +558,6 @@
   ASSERT(addr != MAP_FAILED);
   munmap(addr, size);
   fclose(f);
-#endif
 }
 
 
@@ -550,7 +600,7 @@
 
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
+  address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
                   MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
                   kMmapFd, kMmapFdOffset);
   size_ = size;
@@ -596,17 +646,15 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
 }
@@ -622,12 +670,13 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
+#ifdef PR_SET_NAME
   prctl(PR_SET_NAME,
         reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
         0, 0, 0);
+#endif
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -693,7 +742,6 @@
 
 class LinuxMutex : public Mutex {
  public:
-
   LinuxMutex() {
     pthread_mutexattr_t attrs;
     int result = pthread_mutexattr_init(&attrs);
@@ -702,6 +750,7 @@
     ASSERT(result == 0);
     result = pthread_mutex_init(&mutex_, &attrs);
     ASSERT(result == 0);
+    USE(result);
   }
 
   virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
@@ -806,8 +855,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
 // Android runs a fairly new Linux kernel, so signal info is there,
 // but the C library doesn't have the structs defined.
@@ -918,7 +965,7 @@
   };
 
   explicit SignalSender(int interval)
-      : Thread(NULL, "SignalSender"),
+      : Thread("SignalSender"),
         vm_tgid_(getpid()),
         interval_(interval) {}
 
@@ -955,8 +1002,7 @@
     ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
-      instance_->Join();
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
       RestoreSignalHandler();
@@ -971,10 +1017,11 @@
       bool cpu_profiling_enabled =
           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
-      if (cpu_profiling_enabled && !signal_handler_installed_)
+      if (cpu_profiling_enabled && !signal_handler_installed_) {
         InstallSignalHandler();
-      else if (!cpu_profiling_enabled && signal_handler_installed_)
+      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
         RestoreSignalHandler();
+      }
       // When CPU profiling is enabled both JavaScript and C++ code is
       // profiled. We must not suspend.
       if (!cpu_profiling_enabled) {
@@ -1095,6 +1142,5 @@
   SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 3e10b6a..6be941a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -169,20 +169,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  UNIMPLEMENTED();
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   usleep(1000 * milliseconds);
 }
@@ -248,7 +234,6 @@
 
 
 void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   unsigned int images_count = _dyld_image_count();
   for (unsigned int i = 0; i < images_count; ++i) {
     const mach_header* header = _dyld_get_image_header(i);
@@ -270,7 +255,6 @@
     LOG(Isolate::Current(),
         SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
   }
-#endif  // ENABLE_LOGGING_AND_PROFILING
 }
 
 
@@ -398,17 +382,15 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
 }
@@ -444,7 +426,6 @@
   thread->data()->thread_ = pthread_self();
   SetThreadName(thread->name());
   ASSERT(thread->data()->thread_ != kNoThread);
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -577,7 +558,6 @@
 
 class MacOSMutex : public Mutex {
  public:
-
   MacOSMutex() {
     pthread_mutexattr_t attr;
     pthread_mutexattr_init(&attr);
@@ -647,8 +627,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 class Sampler::PlatformData : public Malloced {
  public:
   PlatformData() : profiled_thread_(mach_thread_self()) {}
@@ -670,7 +648,7 @@
 class SamplerThread : public Thread {
  public:
   explicit SamplerThread(int interval)
-      : Thread(NULL, "SamplerThread"),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
@@ -688,8 +666,7 @@
     ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
-      instance_->Join();
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
     }
@@ -825,6 +802,5 @@
   SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index aacad14..8c2a863 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -186,6 +186,11 @@
 }
 
 
+bool OS::ArmUsingHardFloat() {
+  UNIMPLEMENTED();
+}
+
+
 bool OS::IsOutsideAllocatedSpace(void* address) {
   UNIMPLEMENTED();
   return false;
@@ -212,20 +217,11 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
+void OS::Guard(void* address, const size_t size) {
   UNIMPLEMENTED();
 }
 
 
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  UNIMPLEMENTED();
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   UNIMPLEMENTED();
 }
@@ -309,18 +305,16 @@
 };
 
 
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
   UNIMPLEMENTED();
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
   UNIMPLEMENTED();
@@ -434,7 +428,6 @@
   return new NullSemaphore(count);
 }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 
 class ProfileSampler::PlatformData  : public Malloced {
  public:
@@ -469,6 +462,5 @@
   UNIMPLEMENTED();
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index e90b3e8..973329b 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,6 +50,7 @@
 #undef MAP_TYPE
 
 #include "v8.h"
+#include "v8threads.h"
 
 #include "platform.h"
 #include "vm-state-inl.h"
@@ -73,6 +74,9 @@
 }
 
 
+static Mutex* limit_mutex = NULL;
+
+
 void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
@@ -81,6 +85,7 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
+  limit_mutex = CreateMutex();
 }
 
 
@@ -129,6 +134,9 @@
 
 
 static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  ASSERT(limit_mutex != NULL);
+  ScopedLock lock(limit_mutex);
+
   lowest_ever_allocated = Min(lowest_ever_allocated, address);
   highest_ever_allocated =
       Max(highest_ever_allocated,
@@ -164,26 +172,13 @@
 
 
 void OS::Free(void* buf, const size_t length) {
+  // TODO(1240712): munmap has a return value which is ignored here.
   int result = munmap(buf, length);
   USE(result);
   ASSERT(result == 0);
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  UNIMPLEMENTED();
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   unsigned int ms = static_cast<unsigned int>(milliseconds);
   usleep(1000 * ms);
@@ -255,15 +250,12 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 static unsigned StringToLong(char* buffer) {
   return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
 }
-#endif
 
 
 void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static const int MAP_LENGTH = 1024;
   int fd = open("/proc/self/maps", O_RDONLY);
   if (fd < 0) return;
@@ -297,10 +289,9 @@
     // There may be no filename in this line.  Skip to next.
     if (start_of_path == NULL) continue;
     buffer[bytes_read] = 0;
-    LOG(SharedLibraryEvent(start_of_path, start, end));
+    LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
   }
   close(fd);
-#endif
 }
 
 
@@ -309,8 +300,30 @@
 
 
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  UNIMPLEMENTED();
-  return 1;
+  int frames_size = frames.length();
+  ScopedVector<void*> addresses(frames_size);
+
+  int frames_count = backtrace(addresses.start(), frames_size);
+
+  char** symbols = backtrace_symbols(addresses.start(), frames_count);
+  if (symbols == NULL) {
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  free(symbols);
+
+  return frames_count;
 }
 
 
@@ -354,30 +367,26 @@
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   return mmap(address, size, PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
               kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
 class Thread::PlatformData : public Malloced {
  public:
-  PlatformData() : thread_(kNoThread) {}
-
   pthread_t thread_;  // Thread handle for pthread.
 };
 
 
-Thread::Thread(Isolate* isolate, const Options& options)
-    : data_(new PlatformData()),
-      isolate_(isolate),
+Thread::Thread(const Options& options)
+    : data_(new PlatformData),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
-    : data_(new PlatfromData()),
-      isolate_(isolate),
+Thread::Thread(const char* name)
+    : data_(new PlatformData),
       stack_size_(0) {
   set_name(name);
 }
@@ -395,7 +404,6 @@
   // one) so we initialize it here too.
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -416,7 +424,7 @@
     attr_ptr = &attr;
   }
   pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(IsValid());
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
@@ -461,7 +469,6 @@
 
 class OpenBSDMutex : public Mutex {
  public:
-
   OpenBSDMutex() {
     pthread_mutexattr_t attrs;
     int result = pthread_mutexattr_init(&attrs);
@@ -484,6 +491,16 @@
     return result;
   }
 
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
  private:
   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
 };
@@ -536,11 +553,16 @@
 
   struct timespec ts;
   TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+
+  int to = ts.tv_sec;
+
   while (true) {
     int result = sem_trywait(&sem_);
     if (result == 0) return true;  // Successfully got semaphore.
-    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
+    if (!to) return false;  // Timeout.
     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+    usleep(ts.tv_nsec / 1000);
+    to--;
   }
 }
 
@@ -550,35 +572,202 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-  USE(info);
-  if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL) return;
-
-  TickSample sample;
-
-  // We always sample the VM state.
-  sample.state = VMState::current_state();
-
-  active_sampler_->Tick(&sample);
+static pthread_t GetThreadID() {
+  pthread_t thread_id = pthread_self();
+  return thread_id;
 }
 
 
 class Sampler::PlatformData : public Malloced {
  public:
-  PlatformData() {
-    signal_handler_installed_ = false;
+  PlatformData() : vm_tid_(GetThreadID()) {}
+
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+    // We require a fully initialized and entered isolate.
+    return;
+  }
+  if (v8::Locker::IsActive() &&
+      !isolate->thread_manager()->IsLockedByCurrentThread()) {
+    return;
   }
 
-  bool signal_handler_installed_;
-  struct sigaction old_signal_handler_;
-  struct itimerval old_timer_value_;
+  Sampler* sampler = isolate->logger()->sampler();
+  if (sampler == NULL || !sampler->IsActive()) return;
+
+  TickSample sample_obj;
+  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+  if (sample == NULL) sample = &sample_obj;
+
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+  sample->state = isolate->current_vm_state();
+#if V8_HOST_ARCH_IA32
+  sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
+  sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
+  sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+  sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
+  sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+  sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+#elif V8_HOST_ARCH_ARM
+  sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
+  sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
+  sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
+#endif
+  sampler->SampleStack(sample);
+  sampler->Tick(sample);
+}
+
+
+class SignalSender : public Thread {
+ public:
+  enum SleepInterval {
+    HALF_INTERVAL,
+    FULL_INTERVAL
+  };
+
+  explicit SignalSender(int interval)
+      : Thread("SignalSender"),
+        interval_(interval) {}
+
+  static void AddActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::AddActiveSampler(sampler);
+    if (instance_ == NULL) {
+      // Install a signal handler.
+      struct sigaction sa;
+      sa.sa_sigaction = ProfilerSignalHandler;
+      sigemptyset(&sa.sa_mask);
+      sa.sa_flags = SA_RESTART | SA_SIGINFO;
+      signal_handler_installed_ =
+          (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+      // Start a thread that sends SIGPROF signal to VM threads.
+      instance_ = new SignalSender(sampler->interval());
+      instance_->Start();
+    } else {
+      ASSERT(instance_->interval_ == sampler->interval());
+    }
+  }
+
+  static void RemoveActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::RemoveActiveSampler(sampler);
+    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
+      delete instance_;
+      instance_ = NULL;
+
+      // Restore the old signal handler.
+      if (signal_handler_installed_) {
+        sigaction(SIGPROF, &old_signal_handler_, 0);
+        signal_handler_installed_ = false;
+      }
+    }
+  }
+
+  // Implement Thread::Run().
+  virtual void Run() {
+    SamplerRegistry::State state;
+    while ((state = SamplerRegistry::GetState()) !=
+           SamplerRegistry::HAS_NO_SAMPLERS) {
+      bool cpu_profiling_enabled =
+          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      // When CPU profiling is enabled both JavaScript and C++ code is
+      // profiled. We must not suspend.
+      if (!cpu_profiling_enabled) {
+        if (rate_limiter_.SuspendIfNecessary()) continue;
+      }
+      if (cpu_profiling_enabled && runtime_profiler_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+          return;
+        }
+        Sleep(HALF_INTERVAL);
+        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+          return;
+        }
+        Sleep(HALF_INTERVAL);
+      } else {
+        if (cpu_profiling_enabled) {
+          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+                                                      this)) {
+            return;
+          }
+        }
+        if (runtime_profiler_enabled) {
+          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+                                                      NULL)) {
+            return;
+          }
+        }
+        Sleep(FULL_INTERVAL);
+      }
+    }
+  }
+
+  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+    if (!sampler->IsProfiling()) return;
+    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+  }
+
+  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    sampler->isolate()->runtime_profiler()->NotifyTick();
+  }
+
+  void SendProfilingSignal(pthread_t tid) {
+    if (!signal_handler_installed_) return;
+    pthread_kill(tid, SIGPROF);
+  }
+
+  void Sleep(SleepInterval full_or_half) {
+    // Convert ms to us and subtract 100 us to compensate delays
+    // occuring during signal delivery.
+    useconds_t interval = interval_ * 1000 - 100;
+    if (full_or_half == HALF_INTERVAL) interval /= 2;
+    int result = usleep(interval);
+#ifdef DEBUG
+    if (result != 0 && errno != EINTR) {
+      fprintf(stderr,
+              "SignalSender usleep error; interval = %u, errno = %d\n",
+              interval,
+              errno);
+      ASSERT(result == 0 || errno == EINTR);
+    }
+#endif
+    USE(result);
+  }
+
+  const int interval_;
+  RuntimeProfilerRateLimiter rate_limiter_;
+
+  // Protects the process wide state below.
+  static Mutex* mutex_;
+  static SignalSender* instance_;
+  static bool signal_handler_installed_;
+  static struct sigaction old_signal_handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
+
 
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
@@ -586,55 +775,28 @@
       profiling_(false),
       active_(false),
       samples_taken_(0) {
-  data_ = new PlatformData();
+  data_ = new PlatformData;
 }
 
 
 Sampler::~Sampler() {
+  ASSERT(!IsActive());
   delete data_;
 }
 
 
 void Sampler::Start() {
-  // There can only be one active sampler at the time on POSIX
-  // platforms.
-  if (active_sampler_ != NULL) return;
-
-  // Request profiling signals.
-  struct sigaction sa;
-  sa.sa_sigaction = ProfilerSignalHandler;
-  sigemptyset(&sa.sa_mask);
-  sa.sa_flags = SA_SIGINFO;
-  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
-  data_->signal_handler_installed_ = true;
-
-  // Set the itimer to generate a tick for each interval.
-  itimerval itimer;
-  itimer.it_interval.tv_sec = interval_ / 1000;
-  itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
-  itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
-  itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
-  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
-  // Set this sampler as the active sampler.
-  active_sampler_ = this;
-  active_ = true;
+  ASSERT(!IsActive());
+  SetActive(true);
+  SignalSender::AddActiveSampler(this);
 }
 
 
 void Sampler::Stop() {
-  // Restore old signal handler
-  if (data_->signal_handler_installed_) {
-    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
-    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
-    data_->signal_handler_installed_ = false;
-  }
-
-  // This sampler is no longer the active sampler.
-  active_sampler_ = NULL;
-  active_ = false;
+  ASSERT(IsActive());
+  SignalSender::RemoveActiveSampler(this);
+  SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index c4b0fb8..52cf029 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -33,15 +33,19 @@
 #include <errno.h>
 #include <time.h>
 
+#include <sys/mman.h>
 #include <sys/socket.h>
 #include <sys/resource.h>
 #include <sys/time.h>
 #include <sys/types.h>
+#include <sys/stat.h>
 
 #include <arpa/inet.h>
 #include <netinet/in.h>
 #include <netdb.h>
 
+#undef MAP_TYPE
+
 #if defined(ANDROID)
 #define LOG_TAG "v8"
 #include <utils/Log.h>  // LOG_PRI_VA
@@ -54,6 +58,32 @@
 namespace v8 {
 namespace internal {
 
+
+// Maximum size of the virtual memory.  0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+  struct rlimit limit;
+  int result = getrlimit(RLIMIT_DATA, &limit);
+  if (result != 0) return 0;
+  return limit.rlim_cur;
+}
+
+
+#ifndef __CYGWIN__
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+  mprotect(address, size, PROT_READ | PROT_EXEC);
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+  mprotect(address, size, PROT_NONE);
+}
+#endif  // __CYGWIN__
+
+
 // ----------------------------------------------------------------------------
 // Math functions
 
@@ -118,7 +148,14 @@
 //
 
 FILE* OS::FOpen(const char* path, const char* mode) {
-  return fopen(path, mode);
+  FILE* file = fopen(path, mode);
+  if (file == NULL) return NULL;
+  struct stat file_stat;
+  if (fstat(fileno(file), &file_stat) != 0) return NULL;
+  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+  if (is_regular_file) return file;
+  fclose(file);
+  return NULL;
 }
 
 
@@ -127,6 +164,11 @@
 }
 
 
+FILE* OS::OpenTemporaryFile() {
+  return tmpfile();
+}
+
+
 const char* const OS::LogFileOpenMode = "w";
 
 
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 970c418..035d394 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -88,6 +88,7 @@
 }
 
 
+static Mutex* limit_mutex = NULL;
 void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
@@ -96,6 +97,7 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
+  limit_mutex = CreateMutex();
 }
 
 
@@ -145,6 +147,9 @@
 
 
 static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  ASSERT(limit_mutex != NULL);
+  ScopedLock lock(limit_mutex);
+
   lowest_ever_allocated = Min(lowest_ever_allocated, address);
   highest_ever_allocated =
       Max(highest_ever_allocated,
@@ -187,23 +192,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  // TODO(1240712): mprotect has a return value which is ignored here.
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  mprotect(address, size, prot);
-}
-
-#endif
-
-
 void OS::Sleep(int milliseconds) {
   useconds_t ms = static_cast<useconds_t>(milliseconds);
   usleep(1000 * ms);
@@ -381,17 +369,15 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
     : data_(new PlatformData()),
-      isolate_(isolate),
       stack_size_(0) {
   set_name(name);
 }
@@ -409,7 +395,6 @@
   // one) so we initialize it here too.
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -475,7 +460,6 @@
 
 class SolarisMutex : public Mutex {
  public:
-
   SolarisMutex() {
     pthread_mutexattr_t attr;
     pthread_mutexattr_init(&attr);
@@ -587,80 +571,171 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_tid_ = 0;
-
-
 static pthread_t GetThreadID() {
   return pthread_self();
 }
 
-
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
   USE(info);
   if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
-  if (vm_tid_ != GetThreadID()) return;
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+    // We require a fully initialized and entered isolate.
+    return;
+  }
+  if (v8::Locker::IsActive() &&
+      !isolate->thread_manager()->IsLockedByCurrentThread()) {
+    return;
+  }
+
+  Sampler* sampler = isolate->logger()->sampler();
+  if (sampler == NULL || !sampler->IsActive()) return;
 
   TickSample sample_obj;
-  TickSample* sample = CpuProfiler::TickSampleEvent();
+  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
   if (sample == NULL) sample = &sample_obj;
 
   // Extracting the sample from the context is extremely machine dependent.
   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
   mcontext_t& mcontext = ucontext->uc_mcontext;
-  sample->state = Top::current_vm_state();
+  sample->state = isolate->current_vm_state();
 
   sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
   sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
   sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
 
-  active_sampler_->SampleStack(sample);
-  active_sampler_->Tick(sample);
+  sampler->SampleStack(sample);
+  sampler->Tick(sample);
 }
 
-
 class Sampler::PlatformData : public Malloced {
  public:
+  PlatformData() : vm_tid_(GetThreadID()) {}
+
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+
+class SignalSender : public Thread {
+ public:
   enum SleepInterval {
-    FULL_INTERVAL,
-    HALF_INTERVAL
+    HALF_INTERVAL,
+    FULL_INTERVAL
   };
 
-  explicit PlatformData(Sampler* sampler)
-      : sampler_(sampler),
-        signal_handler_installed_(false),
-        vm_tgid_(getpid()),
-        signal_sender_launched_(false) {
+  explicit SignalSender(int interval)
+      : Thread("SignalSender"),
+        interval_(interval) {}
+
+  static void InstallSignalHandler() {
+    struct sigaction sa;
+    sa.sa_sigaction = ProfilerSignalHandler;
+    sigemptyset(&sa.sa_mask);
+    sa.sa_flags = SA_RESTART | SA_SIGINFO;
+    signal_handler_installed_ =
+        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
   }
 
-  void SignalSender() {
-    while (sampler_->IsActive()) {
-      if (rate_limiter_.SuspendIfNecessary()) continue;
-      if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
-        SendProfilingSignal();
+  static void RestoreSignalHandler() {
+    if (signal_handler_installed_) {
+      sigaction(SIGPROF, &old_signal_handler_, 0);
+      signal_handler_installed_ = false;
+    }
+  }
+
+  static void AddActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::AddActiveSampler(sampler);
+    if (instance_ == NULL) {
+      // Start a thread that will send SIGPROF signal to VM threads,
+      // when CPU profiling will be enabled.
+      instance_ = new SignalSender(sampler->interval());
+      instance_->Start();
+    } else {
+      ASSERT(instance_->interval_ == sampler->interval());
+    }
+  }
+
+  static void RemoveActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::RemoveActiveSampler(sampler);
+    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
+      delete instance_;
+      instance_ = NULL;
+      RestoreSignalHandler();
+    }
+  }
+
+  // Implement Thread::Run().
+  virtual void Run() {
+    SamplerRegistry::State state;
+    while ((state = SamplerRegistry::GetState()) !=
+           SamplerRegistry::HAS_NO_SAMPLERS) {
+      bool cpu_profiling_enabled =
+          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      if (cpu_profiling_enabled && !signal_handler_installed_) {
+        InstallSignalHandler();
+      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+        RestoreSignalHandler();
+      }
+
+      // When CPU profiling is enabled both JavaScript and C++ code is
+      // profiled. We must not suspend.
+      if (!cpu_profiling_enabled) {
+        if (rate_limiter_.SuspendIfNecessary()) continue;
+      }
+      if (cpu_profiling_enabled && runtime_profiler_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+          return;
+        }
         Sleep(HALF_INTERVAL);
-        RuntimeProfiler::NotifyTick();
+        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+          return;
+        }
         Sleep(HALF_INTERVAL);
       } else {
-        if (sampler_->IsProfiling()) SendProfilingSignal();
-        if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+        if (cpu_profiling_enabled) {
+          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+                                                      this)) {
+            return;
+          }
+        }
+        if (runtime_profiler_enabled) {
+          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+                                                      NULL)) {
+            return;
+          }
+        }
         Sleep(FULL_INTERVAL);
       }
     }
   }
 
-  void SendProfilingSignal() {
+  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+    if (!sampler->IsProfiling()) return;
+    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+  }
+
+  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    sampler->isolate()->runtime_profiler()->NotifyTick();
+  }
+
+  void SendProfilingSignal(pthread_t tid) {
     if (!signal_handler_installed_) return;
-    pthread_kill(vm_tid_, SIGPROF);
+    pthread_kill(tid, SIGPROF);
   }
 
   void Sleep(SleepInterval full_or_half) {
     // Convert ms to us and subtract 100 us to compensate delays
     // occuring during signal delivery.
-    useconds_t interval = sampler_->interval_ * 1000 - 100;
+    useconds_t interval = interval_ * 1000 - 100;
     if (full_or_half == HALF_INTERVAL) interval /= 2;
     int result = usleep(interval);
 #ifdef DEBUG
@@ -675,22 +750,22 @@
     USE(result);
   }
 
-  Sampler* sampler_;
-  bool signal_handler_installed_;
-  struct sigaction old_signal_handler_;
-  int vm_tgid_;
-  bool signal_sender_launched_;
-  pthread_t signal_sender_thread_;
+  const int interval_;
   RuntimeProfilerRateLimiter rate_limiter_;
+
+  // Protects the process wide state below.
+  static Mutex* mutex_;
+  static SignalSender* instance_;
+  static bool signal_handler_installed_;
+  static struct sigaction old_signal_handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-
-static void* SenderEntry(void* arg) {
-  Sampler::PlatformData* data =
-      reinterpret_cast<Sampler::PlatformData*>(arg);
-  data->SignalSender();
-  return 0;
-}
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
 
 
 Sampler::Sampler(Isolate* isolate, int interval)
@@ -699,65 +774,27 @@
       profiling_(false),
       active_(false),
       samples_taken_(0) {
-  data_ = new PlatformData(this);
+  data_ = new PlatformData;
 }
 
 
 Sampler::~Sampler() {
-  ASSERT(!data_->signal_sender_launched_);
+  ASSERT(!IsActive());
   delete data_;
 }
 
 
 void Sampler::Start() {
-  // There can only be one active sampler at the time on POSIX
-  // platforms.
   ASSERT(!IsActive());
-  vm_tid_ = GetThreadID();
-
-  // Request profiling signals.
-  struct sigaction sa;
-  sa.sa_sigaction = ProfilerSignalHandler;
-  sigemptyset(&sa.sa_mask);
-  sa.sa_flags = SA_RESTART | SA_SIGINFO;
-  data_->signal_handler_installed_ =
-      sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
-
-  // Start a thread that sends SIGPROF signal to VM thread.
-  // Sending the signal ourselves instead of relying on itimer provides
-  // much better accuracy.
   SetActive(true);
-  if (pthread_create(
-          &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
-    data_->signal_sender_launched_ = true;
-  }
-
-  // Set this sampler as the active sampler.
-  active_sampler_ = this;
+  SignalSender::AddActiveSampler(this);
 }
 
 
 void Sampler::Stop() {
+  ASSERT(IsActive());
+  SignalSender::RemoveActiveSampler(this);
   SetActive(false);
-
-  // Wait for signal sender termination (it will exit after setting
-  // active_ to false).
-  if (data_->signal_sender_launched_) {
-    Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
-    pthread_join(data_->signal_sender_thread_, NULL);
-    data_->signal_sender_launched_ = false;
-  }
-
-  // Restore old signal handler
-  if (data_->signal_handler_installed_) {
-    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
-    data_->signal_handler_installed_ = false;
-  }
-
-  // This sampler is no longer the active sampler.
-  active_sampler_ = NULL;
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 } }  // namespace v8::internal
diff --git a/src/platform-tls.h b/src/platform-tls.h
index 5649175..3251663 100644
--- a/src/platform-tls.h
+++ b/src/platform-tls.h
@@ -30,7 +30,7 @@
 #ifndef V8_PLATFORM_TLS_H_
 #define V8_PLATFORM_TLS_H_
 
-#ifdef V8_FAST_TLS
+#ifndef V8_NO_FAST_TLS
 
 // When fast TLS is requested we include the appropriate
 // implementation header.
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 8673f04..97788e2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,76 +35,8 @@
 #include "platform.h"
 #include "vm-state-inl.h"
 
-// Extra POSIX/ANSI routines for Win32 when when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
 #ifdef _MSC_VER
 
-namespace v8 {
-namespace internal {
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
-  return _finite(x);
-}
-
-}  // namespace v8
-}  // namespace internal
-
-// Test for a NaN (not a number) value - usually defined in math.h
-int isnan(double x) {
-  return _isnan(x);
-}
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
-  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
-  return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
-  return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h
-int fpclassify(double x) {
-  // Use the MS-specific _fpclass() for classification.
-  int flags = _fpclass(x);
-
-  // Determine class. We cannot use a switch statement because
-  // the _FPCLASS_ constants are defined as flags.
-  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
-  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
-  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
-  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
-
-  // All cases should be covered by the code above.
-  ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
-  return FP_NAN;
-}
-
-
-// Test sign - usually defined in math.h
-int signbit(double x) {
-  // We need to take care of the special case of both positive
-  // and negative versions of zero.
-  if (x == 0)
-    return _fpclass(x) & _FPCLASS_NZ;
-  else
-    return x < 0;
-}
-
-
 // Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
 // defined in strings.h.
 int strncasecmp(const char* s1, const char* s2, int n) {
@@ -138,16 +70,39 @@
 }
 
 
+#define _TRUNCATE 0
+#define STRUNCATE 80
+
 int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
                  const char* format, va_list argptr) {
+  ASSERT(count == _TRUNCATE);
   return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
 }
-#define _TRUNCATE 0
 
 
-int strncpy_s(char* strDest, size_t numberOfElements,
-              const char* strSource, size_t count) {
-  strncpy(strDest, strSource, count);
+int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
+  CHECK(source != NULL);
+  CHECK(dest != NULL);
+  CHECK_GT(dest_size, 0);
+
+  if (count == _TRUNCATE) {
+    while (dest_size > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+    }
+    if (dest_size == 0) {
+      *(dest - 1) = 0;
+      return STRUNCATE;
+    }
+  } else {
+    while (dest_size > 0 && count > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+      --count;
+    }
+  }
+  CHECK_GT(dest_size, 0);
+  *dest = 0;
   return 0;
 }
 
@@ -169,6 +124,11 @@
 namespace v8 {
 namespace internal {
 
+intptr_t OS::MaxVirtualMemory() {
+  return 0;
+}
+
+
 double ceiling(double x) {
   return ceil(x);
 }
@@ -407,13 +367,11 @@
   }
 
   // Make standard and DST timezone names.
-  OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize),
-               "%S",
-               tzinfo_.StandardName);
+  WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+                      std_tz_name_, kTzNameSize, NULL, NULL);
   std_tz_name_[kTzNameSize - 1] = '\0';
-  OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize),
-               "%S",
-               tzinfo_.DaylightName);
+  WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+                      dst_tz_name_, kTzNameSize, NULL, NULL);
   dst_tz_name_[kTzNameSize - 1] = '\0';
 
   // If OS returned empty string or resource id (like "@tzres.dll,-211")
@@ -714,6 +672,24 @@
 }
 
 
+FILE* OS::OpenTemporaryFile() {
+  // tmpfile_s tries to use the root dir, don't use it.
+  char tempPathBuffer[MAX_PATH];
+  DWORD path_result = 0;
+  path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+  if (path_result > MAX_PATH || path_result == 0) return NULL;
+  UINT name_result = 0;
+  char tempNameBuffer[MAX_PATH];
+  name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+  if (name_result == 0) return NULL;
+  FILE* result = FOpen(tempNameBuffer, "w+");  // Same mode as tmpfile uses.
+  if (result != NULL) {
+    Remove(tempNameBuffer);  // Delete on close.
+  }
+  return result;
+}
+
+
 // Open log file in binary mode to avoid /n -> /r/n conversion.
 const char* const OS::LogFileOpenMode = "wb";
 
@@ -913,24 +889,17 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
-  // TODO(1240712): VirtualProtect has a return value which is ignored here.
+void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
-  VirtualProtect(address, size, PAGE_READONLY, &old_protect);
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
 }
 
 
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
-  // TODO(1240712): VirtualProtect has a return value which is ignored here.
-  DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  DWORD old_protect;
-  VirtualProtect(address, size, new_protect, &old_protect);
+void OS::Guard(void* address, const size_t size) {
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
 }
 
-#endif
-
 
 void OS::Sleep(int milliseconds) {
   ::Sleep(milliseconds);
@@ -1330,7 +1299,7 @@
 
     // Try to locate a symbol for this frame.
     DWORD64 symbol_displacement;
-    SmartPointer<IMAGEHLP_SYMBOL64> symbol(
+    SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
         NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
     if (symbol.is_empty()) return kStackWalkError;  // Out of memory.
     memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
@@ -1475,10 +1444,6 @@
 // convention.
 static unsigned int __stdcall ThreadEntry(void* arg) {
   Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the last parameter to _beginthreadex() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return 0;
 }
@@ -1494,17 +1459,15 @@
 // Initialize a Win32 thread object. The thread has an invalid thread
 // handle until it is started.
 
-Thread::Thread(Isolate* isolate, const Options& options)
-    : isolate_(isolate),
-      stack_size_(options.stack_size) {
+Thread::Thread(const Options& options)
+    : stack_size_(options.stack_size) {
   data_ = new PlatformData(kNoThread);
   set_name(options.name);
 }
 
 
-Thread::Thread(Isolate* isolate, const char* name)
-    : isolate_(isolate),
-      stack_size_(0) {
+Thread::Thread(const char* name)
+    : stack_size_(0) {
   data_ = new PlatformData(kNoThread);
   set_name(name);
 }
@@ -1585,7 +1548,6 @@
 
 class Win32Mutex : public Mutex {
  public:
-
   Win32Mutex() { InitializeCriticalSection(&cs_); }
 
   virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
@@ -1839,8 +1801,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 // ----------------------------------------------------------------------------
 // Win32 profiler support.
 
@@ -1874,7 +1834,7 @@
 class SamplerThread : public Thread {
  public:
   explicit SamplerThread(int interval)
-      : Thread(NULL, "SamplerThread"),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
@@ -1892,8 +1852,7 @@
     ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
-      instance_->Join();
+      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
     }
@@ -2016,6 +1975,5 @@
   SetActive(false);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index fc417ef..034fe34 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -44,53 +44,12 @@
 #ifndef V8_PLATFORM_H_
 #define V8_PLATFORM_H_
 
-#define V8_INFINITY INFINITY
-
-// Windows specific stuff.
-#ifdef WIN32
-
-// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
-
-enum {
-  FP_NAN,
-  FP_INFINITE,
-  FP_ZERO,
-  FP_SUBNORMAL,
-  FP_NORMAL
-};
-
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
-
-namespace v8 {
-namespace internal {
-int isfinite(double x);
-} }
-int isnan(double x);
-int isinf(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-#endif  // _MSC_VER
-
-// Random is missing on both Visual Studio and MinGW.
-int random();
-
-#endif  // WIN32
-
-
 #ifdef __sun
 # ifndef signbit
 int signbit(double x);
 # endif
 #endif
 
-
 // GCC specific stuff
 #ifdef __GNUC__
 
@@ -99,20 +58,26 @@
 
 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
 
-// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
-// warning flag and certain versions of GCC due to a bug:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
-// For now, we use the more involved template-based version from <limits>, but
-// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-
 #endif  // __GNUC__
 
+
+// Windows specific stuff.
+#ifdef WIN32
+
+// Microsoft Visual C++ specific stuff.
+#ifdef _MSC_VER
+
+#include "win32-math.h"
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+#endif  // _MSC_VER
+
+// Random is missing on both Visual Studio and MinGW.
+int random();
+
+#endif  // WIN32
+
 #include "atomicops.h"
 #include "platform-tls.h"
 #include "utils.h"
@@ -177,6 +142,9 @@
   static FILE* FOpen(const char* path, const char* mode);
   static bool Remove(const char* path);
 
+  // Opens a temporary file, the file is auto removed on close.
+  static FILE* OpenTemporaryFile();
+
   // Log file open mode is platform-dependent due to line ends issues.
   static const char* const LogFileOpenMode;
 
@@ -203,15 +171,16 @@
                         size_t* allocated,
                         bool is_executable);
   static void Free(void* address, const size_t size);
+
+  // Mark code segments non-writable.
+  static void ProtectCode(void* address, const size_t size);
+
+  // Assign memory as a guard page so that access will cause an exception.
+  static void Guard(void* address, const size_t size);
+
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect a block of memory by marking it read-only/writable.
-  static void Protect(void* address, size_t size);
-  static void Unprotect(void* address, size_t size, bool is_executable);
-#endif
-
   // Returns an indication of whether a pointer is in a space that
   // has been allocated by Allocate().  This method may conservatively
   // always return false, but giving more accurate information may
@@ -288,12 +257,20 @@
   // positions indicated by the members of the CpuFeature enum from globals.h
   static uint64_t CpuFeaturesImpliedByPlatform();
 
+  // Maximum size of the virtual memory.  0 means there is no artificial
+  // limit.
+  static intptr_t MaxVirtualMemory();
+
   // Returns the double constant NAN
   static double nan_value();
 
   // Support runtime detection of VFP3 on ARM CPUs.
   static bool ArmCpuHasFeature(CpuFeature feature);
 
+  // Support runtime detection of whether the hard float option of the
+  // EABI is used.
+  static bool ArmUsingHardFloat();
+
   // Support runtime detection of FPU on MIPS CPUs.
   static bool MipsCpuHasFeature(CpuFeature feature);
 
@@ -380,9 +357,9 @@
     int stack_size;
   };
 
-  // Create new thread (with a value for storing in the TLS isolate field).
-  Thread(Isolate* isolate, const Options& options);
-  Thread(Isolate* isolate, const char* name);
+  // Create new thread.
+  explicit Thread(const Options& options);
+  explicit Thread(const char* name);
   virtual ~Thread();
 
   // Start new thread by calling the Run() method in the new thread.
@@ -429,7 +406,6 @@
   // A hint to the scheduler to let another thread run.
   static void YieldCPU();
 
-  Isolate* isolate() const { return isolate_; }
 
   // The thread name length is limited to 16 based on Linux's implementation of
   // prctl().
@@ -443,7 +419,6 @@
 
   PlatformData* data_;
 
-  Isolate* isolate_;
   char name_[kMaxThreadNameLength];
   int stack_size_;
 
@@ -597,7 +572,6 @@
   bool has_external_callback : 1;
 };
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 class Sampler {
  public:
   // Initialize sampler.
@@ -656,8 +630,6 @@
 };
 
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 } }  // namespace v8::internal
 
 #endif  // V8_PLATFORM_H_
diff --git a/src/frame-element.cc b/src/preparse-data-format.h
similarity index 60%
copy from src/frame-element.cc
copy to src/preparse-data-format.h
index f629900..e64326e 100644
--- a/src/frame-element.cc
+++ b/src/preparse-data-format.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
-#include "frame-element.h"
-#include "zone-inl.h"
+#ifndef V8_PREPARSE_DATA_FORMAT_H_
+#define V8_PREPARSE_DATA_FORMAT_H_
 
 namespace v8 {
 namespace internal {
 
+// Generic and general data used by preparse data recorders and readers.
 
-} }  // namespace v8::internal
+struct PreparseDataConstants {
+ public:
+  // Layout and constants of the preparse data exchange format.
+  static const unsigned kMagicNumber = 0xBadDead;
+  static const unsigned kCurrentVersion = 7;
+
+  static const int kMagicOffset = 0;
+  static const int kVersionOffset = 1;
+  static const int kHasErrorOffset = 2;
+  static const int kFunctionsSizeOffset = 3;
+  static const int kSymbolCountOffset = 4;
+  static const int kSizeOffset = 5;
+  static const int kHeaderSize = 6;
+
+  // If encoding a message, the following positions are fixed.
+  static const int kMessageStartPos = 0;
+  static const int kMessageEndPos = 1;
+  static const int kMessageArgCountPos = 2;
+  static const int kMessageTextPos = 3;
+
+  static const unsigned char kNumberTerminator = 0x80u;
+};
+
+
+} }  // namespace v8::internal.
+
+#endif  // V8_PREPARSE_DATA_FORMAT_H_
diff --git a/src/preparse-data.cc b/src/preparse-data.cc
index 92a0338..98c343e 100644
--- a/src/preparse-data.cc
+++ b/src/preparse-data.cc
@@ -26,15 +26,13 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list-inl.h"
-#include "hashmap.h"
 
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 
+#include "checks.h"
+#include "globals.h"
+#include "hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -76,7 +74,7 @@
   function_store_.Add((arg_opt == NULL) ? 0 : 1);
   STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
   WriteString(CStrVector(message));
-  if (arg_opt) WriteString(CStrVector(arg_opt));
+  if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
   is_recording_ = false;
 }
 
diff --git a/src/preparse-data.h b/src/preparse-data.h
index bb5707b..c6503c4 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,40 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_PREPARSER_DATA_H_
-#define V8_PREPARSER_DATA_H_
+#ifndef V8_PREPARSE_DATA_H_
+#define V8_PREPARSE_DATA_H_
 
+#include "allocation.h"
 #include "hashmap.h"
+#include "utils-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Generic and general data used by preparse data recorders and readers.
-
-class PreparseDataConstants : public AllStatic {
- public:
-  // Layout and constants of the preparse data exchange format.
-  static const unsigned kMagicNumber = 0xBadDead;
-  static const unsigned kCurrentVersion = 6;
-
-  static const int kMagicOffset = 0;
-  static const int kVersionOffset = 1;
-  static const int kHasErrorOffset = 2;
-  static const int kFunctionsSizeOffset = 3;
-  static const int kSymbolCountOffset = 4;
-  static const int kSizeOffset = 5;
-  static const int kHeaderSize = 6;
-
-  // If encoding a message, the following positions are fixed.
-  static const int kMessageStartPos = 0;
-  static const int kMessageEndPos = 1;
-  static const int kMessageArgCountPos = 2;
-  static const int kMessageTextPos = 3;
-
-  static const byte kNumberTerminator = 0x80u;
-};
-
-
 // ----------------------------------------------------------------------------
 // ParserRecorder - Logging of preparser data.
 
@@ -72,7 +48,8 @@
   virtual void LogFunction(int start,
                            int end,
                            int literals,
-                           int properties) = 0;
+                           int properties,
+                           int strict_mode) = 0;
 
   // Logs a symbol creation of a literal or identifier.
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
@@ -108,11 +85,16 @@
   FunctionLoggingParserRecorder();
   virtual ~FunctionLoggingParserRecorder() {}
 
-  virtual void LogFunction(int start, int end, int literals, int properties) {
+  virtual void LogFunction(int start,
+                           int end,
+                           int literals,
+                           int properties,
+                           int strict_mode) {
     function_store_.Add(start);
     function_store_.Add(end);
     function_store_.Add(literals);
     function_store_.Add(properties);
+    function_store_.Add(strict_mode);
   }
 
   // Logs an error message and marks the log as containing an error.
@@ -246,4 +228,4 @@
 
 } }  // namespace v8::internal.
 
-#endif  // V8_PREPARSER_DATA_H_
+#endif  // V8_PREPARSE_DATA_H_
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 9646eb6..899489e 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -25,6 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef _MSC_VER
+#define V8_WIN32_LEAN_AND_MEAN
+#include "win32-headers.h"
+#endif
+
 #include "../include/v8-preparser.h"
 
 #include "globals.h"
@@ -32,7 +37,8 @@
 #include "allocation.h"
 #include "utils.h"
 #include "list.h"
-#include "scanner-base.h"
+#include "hashmap.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "preparser.h"
 
@@ -157,23 +163,6 @@
 };
 
 
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
-  explicit StandAloneJavaScriptScanner(UnicodeCache* unicode_cache)
-      : JavaScriptScanner(unicode_cache) { }
-
-  void Initialize(UC16CharacterStream* source) {
-    source_ = source;
-    Init();
-    // Skip initial whitespace allowing HTML comment ends just like
-    // after a newline and scan first token.
-    has_line_terminator_before_next_ = true;
-    SkipWhiteSpace();
-    Scan();
-  }
-};
-
-
 // Functions declared by allocation.h and implemented in both api.cc (for v8)
 // or here (for a stand-alone preparser).
 
@@ -193,7 +182,7 @@
   internal::InputStreamUTF16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
   internal::UnicodeCache unicode_cache;
-  internal::StandAloneJavaScriptScanner scanner(&unicode_cache);
+  internal::JavaScriptScanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
diff --git a/src/preparser.cc b/src/preparser.cc
index fec1567..6021ebd 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,19 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "../include/v8stdint.h"
-#include "unicode.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
+#include <math.h>
 
-#include "scanner-base.h"
+#include "../include/v8stdint.h"
+
+#include "allocation.h"
+#include "checks.h"
+#include "conversions.h"
+#include "conversions-inl.h"
+#include "globals.h"
+#include "hashmap.h"
+#include "list.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "preparser.h"
+#include "unicode.h"
+#include "utils.h"
 
 namespace v8 {
+
+#ifdef _MSC_VER
+// Usually defined in math.h, but not in MSVC.
+// Abstracted to work
+int isfinite(double value);
+#endif
+
 namespace preparser {
 
 // Preparsing checks a JavaScript program and emits preparse-data that helps
@@ -53,15 +65,6 @@
 // That means that contextual checks (like a label being declared where
 // it is used) are generally omitted.
 
-namespace i = ::v8::internal;
-
-#define CHECK_OK  ok);  \
-  if (!*ok) return -1;  \
-  ((void)0
-#define DUMMY )  // to make indentation work
-#undef DUMMY
-
-
 void PreParser::ReportUnexpectedToken(i::Token::Value token) {
   // We don't report stack overflows here, to avoid increasing the
   // stack depth even further.  Instead we report it after parsing is
@@ -74,22 +77,51 @@
   // Four of the tokens are treated specially
   switch (token) {
   case i::Token::EOS:
-    return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
-                           "unexpected_eos", NULL);
+    return ReportMessageAt(source_location, "unexpected_eos", NULL);
   case i::Token::NUMBER:
-    return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
-                           "unexpected_token_number", NULL);
+    return ReportMessageAt(source_location, "unexpected_token_number", NULL);
   case i::Token::STRING:
-    return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
-                           "unexpected_token_string", NULL);
+    return ReportMessageAt(source_location, "unexpected_token_string", NULL);
   case i::Token::IDENTIFIER:
-  case i::Token::FUTURE_RESERVED_WORD:
-    return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
+    return ReportMessageAt(source_location,
                            "unexpected_token_identifier", NULL);
+  case i::Token::FUTURE_RESERVED_WORD:
+    return ReportMessageAt(source_location, "unexpected_reserved", NULL);
+  case i::Token::FUTURE_STRICT_RESERVED_WORD:
+    return ReportMessageAt(source_location,
+                           "unexpected_strict_reserved", NULL);
   default:
     const char* name = i::Token::String(token);
-    ReportMessageAt(source_location.beg_pos, source_location.end_pos,
-                    "unexpected_token", name);
+    ReportMessageAt(source_location, "unexpected_token", name);
+  }
+}
+
+
+// Checks whether octal literal last seen is between beg_pos and end_pos.
+// If so, reports an error.
+void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+  i::Scanner::Location octal = scanner_->octal_position();
+  if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+    ReportMessageAt(octal, "strict_octal_literal", NULL);
+    scanner_->clear_octal_position();
+    *ok = false;
+  }
+}
+
+
+#define CHECK_OK  ok);                      \
+  if (!*ok) return kUnknownSourceElements;  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
+PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
+  switch (peek()) {
+    case i::Token::LET:
+      return ParseVariableStatement(kSourceElement, ok);
+    default:
+      return ParseStatement(ok);
   }
 }
 
@@ -99,13 +131,29 @@
   // SourceElements ::
   //   (Statement)* <end_token>
 
+  bool allow_directive_prologue = true;
   while (peek() != end_token) {
-    ParseStatement(CHECK_OK);
+    Statement statement = ParseSourceElement(CHECK_OK);
+    if (allow_directive_prologue) {
+      if (statement.IsUseStrictLiteral()) {
+        set_strict_mode();
+      } else if (!statement.IsStringLiteral()) {
+        allow_directive_prologue = false;
+      }
+    }
   }
   return kUnknownSourceElements;
 }
 
 
+#undef CHECK_OK
+#define CHECK_OK  ok);                   \
+  if (!*ok) return Statement::Default();  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
 PreParser::Statement PreParser::ParseStatement(bool* ok) {
   // Statement ::
   //   Block
@@ -138,14 +186,14 @@
 
     case i::Token::CONST:
     case i::Token::VAR:
-      return ParseVariableStatement(ok);
+      return ParseVariableStatement(kStatement, ok);
 
     case i::Token::SEMICOLON:
       Next();
-      return kUnknownStatement;
+      return Statement::Default();
 
     case i::Token::IF:
-      return  ParseIfStatement(ok);
+      return ParseIfStatement(ok);
 
     case i::Token::DO:
       return ParseDoWhileStatement(ok);
@@ -180,9 +228,6 @@
     case i::Token::FUNCTION:
       return ParseFunctionDeclaration(ok);
 
-    case i::Token::NATIVE:
-      return ParseNativeDeclaration(ok);
-
     case i::Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
 
@@ -196,32 +241,24 @@
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(i::Token::FUNCTION, CHECK_OK);
-  ParseIdentifier(CHECK_OK);
-  ParseFunctionLiteral(CHECK_OK);
-  return kUnknownStatement;
-}
 
+  Identifier identifier = ParseIdentifier(CHECK_OK);
+  i::Scanner::Location location = scanner_->location();
 
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism.  A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-PreParser::Statement PreParser::ParseNativeDeclaration(bool* ok) {
-  Expect(i::Token::NATIVE, CHECK_OK);
-  Expect(i::Token::FUNCTION, CHECK_OK);
-  ParseIdentifier(CHECK_OK);
-  Expect(i::Token::LPAREN, CHECK_OK);
-  bool done = (peek() == i::Token::RPAREN);
-  while (!done) {
-    ParseIdentifier(CHECK_OK);
-    done = (peek() == i::Token::RPAREN);
-    if (!done) {
-      Expect(i::Token::COMMA, CHECK_OK);
+  Expression function_value = ParseFunctionLiteral(CHECK_OK);
+
+  if (function_value.IsStrictFunction() &&
+      !identifier.IsValidStrictVariable()) {
+    // Strict mode violation, using either reserved word or eval/arguments
+    // as name of strict function.
+    const char* type = "strict_function_name";
+    if (identifier.IsFutureStrictReserved()) {
+      type = "strict_reserved_word";
     }
+    ReportMessageAt(location, type, NULL);
+    *ok = false;
   }
-  Expect(i::Token::RPAREN, CHECK_OK);
-  Expect(i::Token::SEMICOLON, CHECK_OK);
-  return kUnknownStatement;
+  return Statement::FunctionDeclaration();
 }
 
 
@@ -234,18 +271,30 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    ParseStatement(CHECK_OK);
+    i::Scanner::Location start_location = scanner_->peek_location();
+    Statement statement = ParseSourceElement(CHECK_OK);
+    i::Scanner::Location end_location = scanner_->location();
+    if (strict_mode() && statement.IsFunctionDeclaration()) {
+      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                      "strict_function", NULL);
+      *ok = false;
+      return Statement::Default();
+    }
   }
-  Expect(i::Token::RBRACE, CHECK_OK);
-  return kUnknownStatement;
+  Expect(i::Token::RBRACE, ok);
+  return Statement::Default();
 }
 
 
-PreParser::Statement PreParser::ParseVariableStatement(bool* ok) {
+PreParser::Statement PreParser::ParseVariableStatement(
+    VariableDeclarationContext var_context,
+    bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
-  Statement result = ParseVariableDeclarations(true, NULL, CHECK_OK);
+  Statement result = ParseVariableDeclarations(var_context,
+                                               NULL,
+                                               CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
 }
@@ -256,54 +305,93 @@
 // *var is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is also used for the parsing
 // of 'for-in' loops.
-PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
-                                                          int* num_decl,
-                                                          bool* ok) {
+PreParser::Statement PreParser::ParseVariableDeclarations(
+    VariableDeclarationContext var_context,
+    int* num_decl,
+    bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
 
   if (peek() == i::Token::VAR) {
     Consume(i::Token::VAR);
   } else if (peek() == i::Token::CONST) {
+    if (strict_mode()) {
+      i::Scanner::Location location = scanner_->peek_location();
+      ReportMessageAt(location, "strict_const", NULL);
+      *ok = false;
+      return Statement::Default();
+    }
     Consume(i::Token::CONST);
+  } else if (peek() == i::Token::LET) {
+    if (var_context != kSourceElement &&
+        var_context != kForStatement) {
+      i::Scanner::Location location = scanner_->peek_location();
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "unprotected_let", NULL);
+      *ok = false;
+      return Statement::Default();
+    }
+    Consume(i::Token::LET);
   } else {
     *ok = false;
-    return 0;
+    return Statement::Default();
   }
 
-  // The scope of a variable/const declared anywhere inside a function
-  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). .
+  // The scope of a var/const declared variable anywhere inside a function
+  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
+  // of a let declared variable is the scope of the immediately enclosing
+  // block.
   int nvars = 0;  // the number of variables declared
   do {
     // Parse variable name.
     if (nvars > 0) Consume(i::Token::COMMA);
-    ParseIdentifier(CHECK_OK);
+    Identifier identifier  = ParseIdentifier(CHECK_OK);
+    if (strict_mode() && !identifier.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_var_name",
+                                    identifier,
+                                    ok);
+      return Statement::Default();
+    }
     nvars++;
     if (peek() == i::Token::ASSIGN) {
       Expect(i::Token::ASSIGN, CHECK_OK);
-      ParseAssignmentExpression(accept_IN, CHECK_OK);
+      ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
     }
   } while (peek() == i::Token::COMMA);
 
   if (num_decl != NULL) *num_decl = nvars;
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
-    bool* ok) {
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
 
   Expression expr = ParseExpression(true, CHECK_OK);
-  if (peek() == i::Token::COLON && expr == kIdentifierExpression) {
-    Consume(i::Token::COLON);
-    return ParseStatement(ok);
+  if (expr.IsRawIdentifier()) {
+    if (peek() == i::Token::COLON &&
+        (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+      Consume(i::Token::COLON);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+      }
+      return Statement::Default();
+    }
+    // Preparsing is disabled for extensions (because the extension details
+    // aren't passed to lazily compiled functions), so we don't
+    // accept "native function" in the preparser.
   }
   // Parsed expression statement.
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::ExpressionStatement(expr);
 }
 
 
@@ -320,7 +408,7 @@
     Next();
     ParseStatement(CHECK_OK);
   }
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -330,14 +418,14 @@
 
   Expect(i::Token::CONTINUE, CHECK_OK);
   i::Token::Value tok = peek();
-  if (!scanner_->has_line_terminator_before_next() &&
+  if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       tok != i::Token::SEMICOLON &&
       tok != i::Token::RBRACE &&
       tok != i::Token::EOS) {
     ParseIdentifier(CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -347,14 +435,14 @@
 
   Expect(i::Token::BREAK, CHECK_OK);
   i::Token::Value tok = peek();
-  if (!scanner_->has_line_terminator_before_next() &&
+  if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       tok != i::Token::SEMICOLON &&
       tok != i::Token::RBRACE &&
       tok != i::Token::EOS) {
     ParseIdentifier(CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -373,14 +461,14 @@
   // This is not handled during preparsing.
 
   i::Token::Value tok = peek();
-  if (!scanner_->has_line_terminator_before_next() &&
+  if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       tok != i::Token::SEMICOLON &&
       tok != i::Token::RBRACE &&
       tok != i::Token::EOS) {
     ParseExpression(true, CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -388,6 +476,12 @@
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
+  if (strict_mode()) {
+    i::Scanner::Location location = scanner_->location();
+    ReportMessageAt(location, "strict_mode_with", NULL);
+    *ok = false;
+    return Statement::Default();
+  }
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, CHECK_OK);
@@ -395,7 +489,7 @@
   scope_->EnterWith();
   ParseStatement(CHECK_OK);
   scope_->LeaveWith();
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -419,13 +513,20 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      ParseStatement(CHECK_OK);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      }
     }
     token = peek();
   }
-  Expect(i::Token::RBRACE, CHECK_OK);
-
-  return kUnknownStatement;
+  Expect(i::Token::RBRACE, ok);
+  return Statement::Default();
 }
 
 
@@ -438,8 +539,9 @@
   Expect(i::Token::WHILE, CHECK_OK);
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
-  Expect(i::Token::RPAREN, CHECK_OK);
-  return kUnknownStatement;
+  Expect(i::Token::RPAREN, ok);
+  if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
+  return Statement::Default();
 }
 
 
@@ -451,8 +553,8 @@
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, CHECK_OK);
-  ParseStatement(CHECK_OK);
-  return kUnknownStatement;
+  ParseStatement(ok);
+  return Statement::Default();
 }
 
 
@@ -463,16 +565,17 @@
   Expect(i::Token::FOR, CHECK_OK);
   Expect(i::Token::LPAREN, CHECK_OK);
   if (peek() != i::Token::SEMICOLON) {
-    if (peek() == i::Token::VAR || peek() == i::Token::CONST) {
+    if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
+        peek() == i::Token::LET) {
       int decl_count;
-      ParseVariableDeclarations(false, &decl_count, CHECK_OK);
+      ParseVariableDeclarations(kForStatement, &decl_count, CHECK_OK);
       if (peek() == i::Token::IN && decl_count == 1) {
         Expect(i::Token::IN, CHECK_OK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RPAREN, CHECK_OK);
 
         ParseStatement(CHECK_OK);
-        return kUnknownStatement;
+        return Statement::Default();
       }
     } else {
       ParseExpression(false, CHECK_OK);
@@ -482,7 +585,7 @@
         Expect(i::Token::RPAREN, CHECK_OK);
 
         ParseStatement(CHECK_OK);
-        return kUnknownStatement;
+        return Statement::Default();
       }
     }
   }
@@ -500,8 +603,8 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
 
-  ParseStatement(CHECK_OK);
-  return kUnknownStatement;
+  ParseStatement(ok);
+  return Statement::Default();
 }
 
 
@@ -510,17 +613,15 @@
   //   'throw' [no line terminator] Expression ';'
 
   Expect(i::Token::THROW, CHECK_OK);
-  if (scanner_->has_line_terminator_before_next()) {
+  if (scanner_->HasAnyLineTerminatorBeforeNext()) {
     i::JavaScriptScanner::Location pos = scanner_->location();
-    ReportMessageAt(pos.beg_pos, pos.end_pos,
-                    "newline_after_throw", NULL);
+    ReportMessageAt(pos, "newline_after_throw", NULL);
     *ok = false;
-    return kUnknownStatement;
+    return Statement::Default();
   }
   ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-
-  return kUnknownStatement;
+  ExpectSemicolon(ok);
+  return Statement::Default();
 }
 
 
@@ -547,12 +648,19 @@
   if (peek() == i::Token::CATCH) {
     Consume(i::Token::CATCH);
     Expect(i::Token::LPAREN, CHECK_OK);
-    ParseIdentifier(CHECK_OK);
+    Identifier id = ParseIdentifier(CHECK_OK);
+    if (strict_mode() && !id.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_catch_variable",
+                                    id,
+                                    ok);
+      return Statement::Default();
+    }
     Expect(i::Token::RPAREN, CHECK_OK);
     scope_->EnterWith();
     ParseBlock(ok);
     scope_->LeaveWith();
-    if (!*ok) return kUnknownStatement;
+    if (!*ok) Statement::Default();
     catch_or_finally_seen = true;
   }
   if (peek() == i::Token::FINALLY) {
@@ -563,7 +671,7 @@
   if (!catch_or_finally_seen) {
     *ok = false;
   }
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -575,11 +683,19 @@
   //   'debugger' ';'
 
   Expect(i::Token::DEBUGGER, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  ExpectSemicolon(ok);
+  return Statement::Default();
 }
 
 
+#undef CHECK_OK
+#define CHECK_OK  ok);                     \
+  if (!*ok) return Expression::Default();  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
 // Precedence = 1
 PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
   // Expression ::
@@ -590,7 +706,7 @@
   while (peek() == i::Token::COMMA) {
     Expect(i::Token::COMMA, CHECK_OK);
     ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = kUnknownExpression;
+    result = Expression::Default();
   }
   return result;
 }
@@ -603,6 +719,7 @@
   //   ConditionalExpression
   //   LeftHandSideExpression AssignmentOperator AssignmentExpression
 
+  i::Scanner::Location before = scanner_->peek_location();
   Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
 
   if (!i::Token::IsAssignmentOp(peek())) {
@@ -610,14 +727,23 @@
     return expression;
   }
 
+  if (strict_mode() && expression.IsIdentifier() &&
+      expression.AsIdentifier().IsEvalOrArguments()) {
+    i::Scanner::Location after = scanner_->location();
+    ReportMessageAt(before.beg_pos, after.end_pos,
+                    "strict_lhs_assignment", NULL);
+    *ok = false;
+    return Expression::Default();
+  }
+
   i::Token::Value op = Next();  // Get assignment operator.
   ParseAssignmentExpression(accept_IN, CHECK_OK);
 
-  if ((op == i::Token::ASSIGN) && (expression == kThisPropertyExpression)) {
+  if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
     scope_->AddProperty();
   }
 
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -638,7 +764,7 @@
   ParseAssignmentExpression(true, CHECK_OK);
   Expect(i::Token::COLON, CHECK_OK);
   ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -660,7 +786,7 @@
     while (Precedence(peek(), accept_IN) == prec1) {
       Next();
       ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-      result = kUnknownExpression;
+      result = Expression::Default();
     }
   }
   return result;
@@ -681,10 +807,22 @@
   //   '!' UnaryExpression
 
   i::Token::Value op = peek();
-  if (i::Token::IsUnaryOp(op) || i::Token::IsCountOp(op)) {
+  if (i::Token::IsUnaryOp(op)) {
     op = Next();
     ParseUnaryExpression(ok);
-    return kUnknownExpression;
+    return Expression::Default();
+  } else if (i::Token::IsCountOp(op)) {
+    op = Next();
+    i::Scanner::Location before = scanner_->peek_location();
+    Expression expression = ParseUnaryExpression(CHECK_OK);
+    if (strict_mode() && expression.IsIdentifier() &&
+        expression.AsIdentifier().IsEvalOrArguments()) {
+      i::Scanner::Location after = scanner_->location();
+      ReportMessageAt(before.beg_pos, after.end_pos,
+                      "strict_lhs_prefix", NULL);
+      *ok = false;
+    }
+    return Expression::Default();
   } else {
     return ParsePostfixExpression(ok);
   }
@@ -695,11 +833,20 @@
   // PostfixExpression ::
   //   LeftHandSideExpression ('++' | '--')?
 
+  i::Scanner::Location before = scanner_->peek_location();
   Expression expression = ParseLeftHandSideExpression(CHECK_OK);
-  if (!scanner_->has_line_terminator_before_next() &&
+  if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       i::Token::IsCountOp(peek())) {
+    if (strict_mode() && expression.IsIdentifier() &&
+        expression.AsIdentifier().IsEvalOrArguments()) {
+      i::Scanner::Location after = scanner_->location();
+      ReportMessageAt(before.beg_pos, after.end_pos,
+                      "strict_lhs_postfix", NULL);
+      *ok = false;
+      return Expression::Default();
+    }
     Next();
-    return kUnknownExpression;
+    return Expression::Default();
   }
   return expression;
 }
@@ -709,7 +856,7 @@
   // LeftHandSideExpression ::
   //   (NewExpression | MemberExpression) ...
 
-  Expression result;
+  Expression result = Expression::Default();
   if (peek() == i::Token::NEW) {
     result = ParseNewExpression(CHECK_OK);
   } else {
@@ -722,27 +869,27 @@
         Consume(i::Token::LBRACK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RBRACK, CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
 
       case i::Token::LPAREN: {
         ParseArguments(CHECK_OK);
-        result = kUnknownExpression;
+        result = Expression::Default();
         break;
       }
 
       case i::Token::PERIOD: {
         Consume(i::Token::PERIOD);
         ParseIdentifierName(CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
@@ -788,13 +935,21 @@
   //     ('[' Expression ']' | '.' Identifier | Arguments)*
 
   // Parse the initial primary or function expression.
-  Expression result = kUnknownExpression;
+  Expression result = Expression::Default();
   if (peek() == i::Token::FUNCTION) {
     Consume(i::Token::FUNCTION);
+    Identifier identifier = Identifier::Default();
     if (peek_any_identifier()) {
-      ParseIdentifier(CHECK_OK);
+      identifier = ParseIdentifier(CHECK_OK);
     }
     result = ParseFunctionLiteral(CHECK_OK);
+    if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_function_name",
+                                    identifier,
+                                    ok);
+      return Expression::Default();
+    }
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
   }
@@ -805,20 +960,20 @@
         Consume(i::Token::LBRACK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RBRACK, CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
       case i::Token::PERIOD: {
         Consume(i::Token::PERIOD);
         ParseIdentifierName(CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
@@ -827,7 +982,7 @@
         // Consume one of the new prefixes (already parsed).
         ParseArguments(CHECK_OK);
         new_count--;
-        result = kUnknownExpression;
+        result = Expression::Default();
         break;
       }
       default:
@@ -851,18 +1006,35 @@
   //   RegExpLiteral
   //   '(' Expression ')'
 
-  Expression result = kUnknownExpression;
+  Expression result = Expression::Default();
   switch (peek()) {
     case i::Token::THIS: {
       Next();
-      result = kThisExpression;
+      result = Expression::This();
       break;
     }
 
-    case i::Token::IDENTIFIER:
     case i::Token::FUTURE_RESERVED_WORD: {
-      ParseIdentifier(CHECK_OK);
-      result = kIdentifierExpression;
+      Next();
+      i::Scanner::Location location = scanner_->location();
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "reserved_word", NULL);
+      *ok = false;
+      return Expression::Default();
+    }
+
+    case i::Token::FUTURE_STRICT_RESERVED_WORD:
+      if (strict_mode()) {
+        Next();
+        i::Scanner::Location location = scanner_->location();
+        ReportMessageAt(location, "strict_reserved_word", NULL);
+        *ok = false;
+        return Expression::Default();
+      }
+      // FALLTHROUGH
+    case i::Token::IDENTIFIER: {
+      Identifier id = ParseIdentifier(CHECK_OK);
+      result = Expression::FromIdentifier(id);
       break;
     }
 
@@ -900,7 +1072,7 @@
       parenthesized_function_ = (peek() == i::Token::FUNCTION);
       result = ParseExpression(true, CHECK_OK);
       Expect(i::Token::RPAREN, CHECK_OK);
-      if (result == kIdentifierExpression) result = kUnknownExpression;
+      result = result.Parenthesize();
       break;
 
     case i::Token::MOD:
@@ -910,7 +1082,7 @@
     default: {
       Next();
       *ok = false;
-      return kUnknownExpression;
+      return Expression::Default();
     }
   }
 
@@ -933,7 +1105,40 @@
   Expect(i::Token::RBRACK, CHECK_OK);
 
   scope_->NextMaterializedLiteralIndex();
-  return kUnknownExpression;
+  return Expression::Default();
+}
+
+void PreParser::CheckDuplicate(DuplicateFinder* finder,
+                               i::Token::Value property,
+                               int type,
+                               bool* ok) {
+  int old_type;
+  if (property == i::Token::NUMBER) {
+    old_type = finder->AddNumber(scanner_->literal_ascii_string(), type);
+  } else if (scanner_->is_literal_ascii()) {
+    old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
+                                      type);
+  } else {
+    old_type = finder->AddUC16Symbol(scanner_->literal_uc16_string(), type);
+  }
+  if (HasConflict(old_type, type)) {
+    if (IsDataDataConflict(old_type, type)) {
+      // Both are data properties.
+      if (!strict_mode()) return;
+      ReportMessageAt(scanner_->location(),
+                      "strict_duplicate_property", NULL);
+    } else if (IsDataAccessorConflict(old_type, type)) {
+      // Both a data and an accessor property with the same name.
+      ReportMessageAt(scanner_->location(),
+                      "accessor_data_property", NULL);
+    } else {
+      ASSERT(IsAccessorAccessorConflict(old_type, type));
+      // Both accessors of the same type.
+      ReportMessageAt(scanner_->location(),
+                      "accessor_get_set", NULL);
+    }
+    *ok = false;
+  }
 }
 
 
@@ -945,50 +1150,59 @@
   //    )*[','] '}'
 
   Expect(i::Token::LBRACE, CHECK_OK);
+  DuplicateFinder duplicate_finder(scanner_->unicode_cache());
   while (peek() != i::Token::RBRACE) {
     i::Token::Value next = peek();
     switch (next) {
       case i::Token::IDENTIFIER:
-      case i::Token::FUTURE_RESERVED_WORD: {
+      case i::Token::FUTURE_RESERVED_WORD:
+      case i::Token::FUTURE_STRICT_RESERVED_WORD: {
         bool is_getter = false;
         bool is_setter = false;
-        ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+        ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
         if ((is_getter || is_setter) && peek() != i::Token::COLON) {
             i::Token::Value name = Next();
             bool is_keyword = i::Token::IsKeyword(name);
             if (name != i::Token::IDENTIFIER &&
                 name != i::Token::FUTURE_RESERVED_WORD &&
+                name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
                 name != i::Token::NUMBER &&
                 name != i::Token::STRING &&
                 !is_keyword) {
               *ok = false;
-              return kUnknownExpression;
+              return Expression::Default();
             }
             if (!is_keyword) {
               LogSymbol();
             }
+            PropertyType type = is_getter ? kGetterProperty : kSetterProperty;
+            CheckDuplicate(&duplicate_finder, name, type, CHECK_OK);
             ParseFunctionLiteral(CHECK_OK);
             if (peek() != i::Token::RBRACE) {
               Expect(i::Token::COMMA, CHECK_OK);
             }
             continue;  // restart the while
         }
+        CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
         break;
       }
       case i::Token::STRING:
         Consume(next);
+        CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
         GetStringSymbol();
         break;
       case i::Token::NUMBER:
         Consume(next);
+        CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
         break;
       default:
         if (i::Token::IsKeyword(next)) {
           Consume(next);
+          CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
         } else {
           // Unexpected token.
           *ok = false;
-          return kUnknownExpression;
+          return Expression::Default();
         }
     }
 
@@ -1001,7 +1215,7 @@
   Expect(i::Token::RBRACE, CHECK_OK);
 
   scope_->NextMaterializedLiteralIndex();
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -1009,25 +1223,21 @@
                                                     bool* ok) {
   if (!scanner_->ScanRegExpPattern(seen_equal)) {
     Next();
-    i::JavaScriptScanner::Location location = scanner_->location();
-    ReportMessageAt(location.beg_pos, location.end_pos,
-                    "unterminated_regexp", NULL);
+    ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL);
     *ok = false;
-    return kUnknownExpression;
+    return Expression::Default();
   }
 
   scope_->NextMaterializedLiteralIndex();
 
   if (!scanner_->ScanRegExpFlags()) {
     Next();
-    i::JavaScriptScanner::Location location = scanner_->location();
-    ReportMessageAt(location.beg_pos, location.end_pos,
-                    "invalid_regexp_flags", NULL);
+    ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL);
     *ok = false;
-    return kUnknownExpression;
+    return Expression::Default();
   }
   Next();
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -1035,16 +1245,21 @@
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
-  Expect(i::Token::LPAREN, CHECK_OK);
+  Expect(i::Token::LPAREN, ok);
+  if (!*ok) return -1;
   bool done = (peek() == i::Token::RPAREN);
   int argc = 0;
   while (!done) {
-    ParseAssignmentExpression(true, CHECK_OK);
+    ParseAssignmentExpression(true, ok);
+    if (!*ok) return -1;
     argc++;
     done = (peek() == i::Token::RPAREN);
-    if (!done) Expect(i::Token::COMMA, CHECK_OK);
+    if (!done) {
+      Expect(i::Token::COMMA, ok);
+      if (!*ok) return -1;
+    }
   }
-  Expect(i::Token::RPAREN, CHECK_OK);
+  Expect(i::Token::RPAREN, ok);
   return argc;
 }
 
@@ -1057,13 +1272,34 @@
   ScopeType outer_scope_type = scope_->type();
   bool inside_with = scope_->IsInsideWith();
   Scope function_scope(&scope_, kFunctionScope);
-
   //  FormalParameterList ::
   //    '(' (Identifier)*[','] ')'
   Expect(i::Token::LPAREN, CHECK_OK);
+  int start_position = scanner_->location().beg_pos;
   bool done = (peek() == i::Token::RPAREN);
+  DuplicateFinder duplicate_finder(scanner_->unicode_cache());
   while (!done) {
-    ParseIdentifier(CHECK_OK);
+    Identifier id = ParseIdentifier(CHECK_OK);
+    if (!id.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_param_name",
+                                    id,
+                                    CHECK_OK);
+    }
+    int prev_value;
+    if (scanner_->is_literal_ascii()) {
+      prev_value =
+          duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
+    } else {
+      prev_value =
+          duplicate_finder.AddUC16Symbol(scanner_->literal_uc16_string(), 1);
+    }
+
+    if (prev_value != 0) {
+      SetStrictModeViolation(scanner_->location(),
+                             "strict_param_dupe",
+                             CHECK_OK);
+    }
     done = (peek() == i::Token::RPAREN);
     if (!done) {
       Expect(i::Token::COMMA, CHECK_OK);
@@ -1086,7 +1322,7 @@
     log_->PauseRecording();
     ParseSourceElements(i::Token::RBRACE, ok);
     log_->ResumeRecording();
-    if (!*ok) return kUnknownExpression;
+    if (!*ok) Expression::Default();
 
     Expect(i::Token::RBRACE, CHECK_OK);
 
@@ -1094,12 +1330,21 @@
     int end_pos = scanner_->location().end_pos;
     log_->LogFunction(function_block_pos, end_pos,
                       function_scope.materialized_literal_count(),
-                      function_scope.expected_properties());
+                      function_scope.expected_properties(),
+                      strict_mode() ? 1 : 0);
   } else {
     ParseSourceElements(i::Token::RBRACE, CHECK_OK);
     Expect(i::Token::RBRACE, CHECK_OK);
   }
-  return kUnknownExpression;
+
+  if (strict_mode()) {
+    int end_position = scanner_->location().end_pos;
+    CheckOctalLiteral(start_position, end_position, CHECK_OK);
+    CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
+    return Expression::StrictFunction();
+  }
+
+  return Expression::Default();
 }
 
 
@@ -1109,11 +1354,13 @@
 
   Expect(i::Token::MOD, CHECK_OK);
   ParseIdentifier(CHECK_OK);
-  ParseArguments(CHECK_OK);
+  ParseArguments(ok);
 
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
+#undef CHECK_OK
+
 
 void PreParser::ExpectSemicolon(bool* ok) {
   // Check for automatic semicolon insertion according to
@@ -1123,7 +1370,7 @@
     Next();
     return;
   }
-  if (scanner_->has_line_terminator_before_next() ||
+  if (scanner_->HasAnyLineTerminatorBeforeNext() ||
       tok == i::Token::RBRACE ||
       tok == i::Token::EOS) {
     return;
@@ -1142,24 +1389,114 @@
 }
 
 
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
+PreParser::Expression PreParser::GetStringSymbol() {
+  const int kUseStrictLength = 10;
+  const char* kUseStrictChars = "use strict";
   LogSymbol();
-  return kUnknownIdentifier;
+  if (scanner_->is_literal_ascii() &&
+      scanner_->literal_length() == kUseStrictLength &&
+      !scanner_->literal_contains_escapes() &&
+      !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+               kUseStrictLength)) {
+    return Expression::UseStrictStringLiteral();
+  }
+  return Expression::StringLiteral();
 }
 
 
-PreParser::Expression PreParser::GetStringSymbol() {
+PreParser::Identifier PreParser::GetIdentifierSymbol() {
   LogSymbol();
-  return kUnknownExpression;
+  if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+    return Identifier::FutureReserved();
+  } else if (scanner_->current_token() ==
+             i::Token::FUTURE_STRICT_RESERVED_WORD) {
+    return Identifier::FutureStrictReserved();
+  }
+  if (scanner_->is_literal_ascii()) {
+    // Detect strict-mode poison words.
+    if (scanner_->literal_length() == 4 &&
+        !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+      return Identifier::Eval();
+    }
+    if (scanner_->literal_length() == 9 &&
+        !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+      return Identifier::Arguments();
+    }
+  }
+  return Identifier::Default();
 }
 
 
 PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
-  if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
-    Expect(i::Token::IDENTIFIER, ok);
+  i::Token::Value next = Next();
+  switch (next) {
+    case i::Token::FUTURE_RESERVED_WORD: {
+      i::Scanner::Location location = scanner_->location();
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "reserved_word", NULL);
+      *ok = false;
+    }
+      // FALLTHROUGH
+    case i::Token::FUTURE_STRICT_RESERVED_WORD:
+    case i::Token::IDENTIFIER:
+      return GetIdentifierSymbol();
+    default:
+      *ok = false;
+      return Identifier::Default();
   }
-  if (!*ok) return kUnknownIdentifier;
-  return GetIdentifierSymbol();
+}
+
+
+void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+                                       const char* type,
+                                       bool* ok) {
+  if (strict_mode()) {
+    ReportMessageAt(location, type, NULL);
+    *ok = false;
+    return;
+  }
+  // Delay report in case this later turns out to be strict code
+  // (i.e., for function names and parameters prior to a "use strict"
+  // directive).
+  // It's safe to overwrite an existing violation.
+  // It's either from a function that turned out to be non-strict,
+  // or it's in the current function (and we just need to report
+  // one error), or it's in a unclosed nesting function that wasn't
+  // strict (otherwise we would already be in strict mode).
+  strict_mode_violation_location_ = location;
+  strict_mode_violation_type_ = type;
+}
+
+
+void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
+                                                int end_pos,
+                                                bool* ok) {
+  i::Scanner::Location location = strict_mode_violation_location_;
+  if (location.IsValid() &&
+      location.beg_pos > beg_pos && location.end_pos < end_pos) {
+    ReportMessageAt(location, strict_mode_violation_type_, NULL);
+    *ok = false;
+  }
+}
+
+
+void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+                                              const char* eval_args_type,
+                                              Identifier identifier,
+                                              bool* ok) {
+  const char* type = eval_args_type;
+  if (identifier.IsFutureReserved()) {
+    type = "reserved_word";
+  } else if (identifier.IsFutureStrictReserved()) {
+    type = "strict_reserved_word";
+  }
+  if (strict_mode()) {
+    ReportMessageAt(location, type, NULL);
+    *ok = false;
+    return;
+  }
+  strict_mode_violation_location_ = location;
+  strict_mode_violation_type_ = type;
 }
 
 
@@ -1170,24 +1507,29 @@
     const char* keyword = i::Token::String(next);
     log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
                                                     i::StrLength(keyword)));
-    return kUnknownExpression;
+    return Identifier::Default();
   }
   if (next == i::Token::IDENTIFIER ||
-      next == i::Token::FUTURE_RESERVED_WORD) {
+      next == i::Token::FUTURE_RESERVED_WORD ||
+      next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
     return GetIdentifierSymbol();
   }
   *ok = false;
-  return kUnknownIdentifier;
+  return Identifier::Default();
 }
 
+#undef CHECK_OK
+
 
 // This function reads an identifier and determines whether or not it
 // is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
-                                                           bool* is_set,
-                                                           bool* ok) {
-  PreParser::Identifier result = ParseIdentifier(CHECK_OK);
-  if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
+PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+                                                               bool* is_set,
+                                                               bool* ok) {
+  Identifier result = ParseIdentifierName(ok);
+  if (!*ok) return Identifier::Default();
+  if (scanner_->is_literal_ascii() &&
+      scanner_->literal_length() == 3) {
     const char* token = scanner_->literal_ascii_string().start();
     *is_get = strncmp(token, "get", 3) == 0;
     *is_set = !*is_get && strncmp(token, "set", 3) == 0;
@@ -1198,8 +1540,141 @@
 bool PreParser::peek_any_identifier() {
   i::Token::Value next = peek();
   return next == i::Token::IDENTIFIER ||
-         next == i::Token::FUTURE_RESERVED_WORD;
+         next == i::Token::FUTURE_RESERVED_WORD ||
+         next == i::Token::FUTURE_STRICT_RESERVED_WORD;
 }
 
-#undef CHECK_OK
+
+int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
+  return AddSymbol(i::Vector<const byte>::cast(key), true, value);
+}
+
+int DuplicateFinder::AddUC16Symbol(i::Vector<const uint16_t> key, int value) {
+  return AddSymbol(i::Vector<const byte>::cast(key), false, value);
+}
+
+int DuplicateFinder::AddSymbol(i::Vector<const byte> key,
+                               bool is_ascii,
+                               int value) {
+  uint32_t hash = Hash(key, is_ascii);
+  byte* encoding = BackupKey(key, is_ascii);
+  i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
+  int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+  entry->value =
+    reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+  return old_value;
+}
+
+
+int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
+  ASSERT(key.length() > 0);
+  // Quick check for already being in canonical form.
+  if (IsNumberCanonical(key)) {
+    return AddAsciiSymbol(key, value);
+  }
+
+  int flags = i::ALLOW_HEX | i::ALLOW_OCTALS;
+  double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+  int length;
+  const char* string;
+  if (!isfinite(double_value)) {
+    string = "Infinity";
+    length = 8;  // strlen("Infinity");
+  } else {
+    string = DoubleToCString(double_value,
+                             i::Vector<char>(number_buffer_, kBufferSize));
+    length = i::StrLength(string);
+  }
+  return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string),
+                                         length), true, value);
+}
+
+
+bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) {
+  // Test for a safe approximation of number literals that are already
+  // in canonical form: max 15 digits, no leading zeroes, except an
+  // integer part that is a single zero, and no trailing zeros below
+  // the decimal point.
+  int pos = 0;
+  int length = number.length();
+  if (number.length() > 15) return false;
+  if (number[pos] == '0') {
+    pos++;
+  } else {
+    while (pos < length &&
+           static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
+  }
+  if (length == pos) return true;
+  if (number[pos] != '.') return false;
+  pos++;
+  bool invalid_last_digit = true;
+  while (pos < length) {
+    byte digit = number[pos] - '0';
+    if (digit > '9' - '0') return false;
+    invalid_last_digit = (digit == 0);
+    pos++;
+  }
+  return !invalid_last_digit;
+}
+
+
+uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) {
+  // Primitive hash function, almost identical to the one used
+  // for strings (except that it's seeded by the length and ASCII-ness).
+  int length = key.length();
+  uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+  for (int i = 0; i < length; i++) {
+    uint32_t c = key[i];
+    hash = (hash + c) * 1025;
+    hash ^= (hash >> 6);
+  }
+  return hash;
+}
+
+
+bool DuplicateFinder::Match(void* first, void* second) {
+  // Decode lengths.
+  // Length + ASCII-bit is encoded as base 128, most significant heptet first,
+  // with a 8th bit being non-zero while there are more heptets.
+  // The value encodes the number of bytes following, and whether the original
+  // was ASCII.
+  byte* s1 = reinterpret_cast<byte*>(first);
+  byte* s2 = reinterpret_cast<byte*>(second);
+  uint32_t length_ascii_field = 0;
+  byte c1;
+  do {
+    c1 = *s1;
+    if (c1 != *s2) return false;
+    length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+    s1++;
+    s2++;
+  } while ((c1 & 0x80) != 0);
+  int length = static_cast<int>(length_ascii_field >> 1);
+  return memcmp(s1, s2, length) == 0;
+}
+
+
+byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes,
+                                 bool is_ascii) {
+  uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+  backing_store_.StartSequence();
+  // Emit ascii_length as base-128 encoded number, with the 7th bit set
+  // on the byte of every heptet except the last, least significant, one.
+  if (ascii_length >= (1 << 7)) {
+    if (ascii_length >= (1 << 14)) {
+      if (ascii_length >= (1 << 21)) {
+        if (ascii_length >= (1 << 28)) {
+          backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+        }
+        backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+      }
+      backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+    }
+    backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+  }
+  backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+
+  backing_store_.AddBlock(bytes);
+  return backing_store_.EndSequence().start();
+}
 } }  // v8::preparser
diff --git a/src/preparser.h b/src/preparser.h
index b7fa6c7..b97b7cf 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,12 +28,22 @@
 #ifndef V8_PREPARSER_H
 #define V8_PREPARSER_H
 
+#include "token.h"
+#include "scanner.h"
+
 namespace v8 {
+
+namespace internal {
+class UnicodeCache;
+}
+
 namespace preparser {
 
+typedef uint8_t byte;
+
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
-// See preparse-data.h for the data.
+// See preparse-data-format.h for the data format.
 
 // The PreParser checks that the syntax follows the grammar for JavaScript,
 // and collects some information about the program along the way.
@@ -46,6 +56,53 @@
 
 namespace i = v8::internal;
 
+class DuplicateFinder {
+ public:
+  explicit DuplicateFinder(i::UnicodeCache* constants)
+      : unicode_constants_(constants),
+        backing_store_(16),
+        map_(&Match) { }
+
+  int AddAsciiSymbol(i::Vector<const char> key, int value);
+  int AddUC16Symbol(i::Vector<const uint16_t> key, int value);
+  // Add a a number literal by converting it (if necessary)
+  // to the string that ToString(ToNumber(literal)) would generate.
+  // and then adding that string with AddAsciiSymbol.
+  // This string is the actual value used as key in an object literal,
+  // and the one that must be different from the other keys.
+  int AddNumber(i::Vector<const char> key, int value);
+
+ private:
+  int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value);
+  // Backs up the key and its length in the backing store.
+  // The backup is stored with a base 127 encoding of the
+  // length (plus a bit saying whether the string is ASCII),
+  // followed by the bytes of the key.
+  byte* BackupKey(i::Vector<const byte> key, bool is_ascii);
+
+  // Compare two encoded keys (both pointing into the backing store)
+  // for having the same base-127 encoded lengths and ASCII-ness,
+  // and then having the same 'length' bytes following.
+  static bool Match(void* first, void* second);
+  // Creates a hash from a sequence of bytes.
+  static uint32_t Hash(i::Vector<const byte> key, bool is_ascii);
+  // Checks whether a string containing a JS number is its canonical
+  // form.
+  static bool IsNumberCanonical(i::Vector<const char> key);
+
+  // Size of buffer. Sufficient for using it to call DoubleToCString in
+  // from conversions.h.
+  static const int kBufferSize = 100;
+
+  i::UnicodeCache* unicode_constants_;
+  // Backing store used to store strings used as hashmap keys.
+  i::SequenceCollector<unsigned char> backing_store_;
+  i::HashMap map_;
+  // Buffer used for string->number->canonical string conversions.
+  char number_buffer_[kBufferSize];
+};
+
+
 class PreParser {
  public:
   enum PreParseResult {
@@ -53,7 +110,7 @@
     kPreParseSuccess
   };
 
-  ~PreParser() { }
+  ~PreParser() {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
@@ -67,41 +124,279 @@
   }
 
  private:
+  // Used to detect duplicates in object literals. Each of the values
+  // kGetterProperty, kSetterProperty and kValueProperty represents
+  // a type of object literal property. When parsing a property, its
+  // type value is stored in the DuplicateFinder for the property name.
+  // Values are chosen so that having intersection bits means the there is
+  // an incompatibility.
+  // I.e., you can add a getter to a property that already has a setter, since
+  // kGetterProperty and kSetterProperty doesn't intersect, but not if it
+  // already has a getter or a value. Adding the getter to an existing
+  // setter will store the value (kGetterProperty | kSetterProperty), which
+  // is incompatible with adding any further properties.
+  enum PropertyType {
+    kNone = 0,
+    // Bit patterns representing different object literal property types.
+    kGetterProperty = 1,
+    kSetterProperty = 2,
+    kValueProperty = 7,
+    // Helper constants.
+    kValueFlag = 4
+  };
+
+  // Checks the type of conflict based on values coming from PropertyType.
+  bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; }
+  bool IsDataDataConflict(int type1, int type2) {
+    return ((type1 & type2) & kValueFlag) != 0;
+  }
+  bool IsDataAccessorConflict(int type1, int type2) {
+    return ((type1 ^ type2) & kValueFlag) != 0;
+  }
+  bool IsAccessorAccessorConflict(int type1, int type2) {
+    return ((type1 | type2) & kValueFlag) == 0;
+  }
+
+
+  void CheckDuplicate(DuplicateFinder* finder,
+                      i::Token::Value property,
+                      int type,
+                      bool* ok);
+
+  // These types form an algebra over syntactic categories that is just
+  // rich enough to let us recognize and propagate the constructs that
+  // are either being counted in the preparser data, or is important
+  // to throw the correct syntax error exceptions.
+
   enum ScopeType {
     kTopLevelScope,
     kFunctionScope
   };
 
-  // Types that allow us to recognize simple this-property assignments.
-  // A simple this-property assignment is a statement on the form
-  // "this.propertyName = {primitive constant or function parameter name);"
-  // where propertyName isn't "__proto__".
-  // The result is only relevant if the function body contains only
-  // simple this-property assignments.
-
-  enum StatementType {
-    kUnknownStatement
+  enum VariableDeclarationContext {
+    kSourceElement,
+    kStatement,
+    kForStatement
   };
 
-  enum ExpressionType {
-    kUnknownExpression,
-    kIdentifierExpression,  // Used to detect labels.
-    kThisExpression,
-    kThisPropertyExpression
+  class Expression;
+
+  class Identifier {
+   public:
+    static Identifier Default() {
+      return Identifier(kUnknownIdentifier);
+    }
+    static Identifier Eval()  {
+      return Identifier(kEvalIdentifier);
+    }
+    static Identifier Arguments()  {
+      return Identifier(kArgumentsIdentifier);
+    }
+    static Identifier FutureReserved()  {
+      return Identifier(kFutureReservedIdentifier);
+    }
+    static Identifier FutureStrictReserved()  {
+      return Identifier(kFutureStrictReservedIdentifier);
+    }
+    bool IsEval() { return type_ == kEvalIdentifier; }
+    bool IsArguments() { return type_ == kArgumentsIdentifier; }
+    bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+    bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+    bool IsFutureStrictReserved() {
+      return type_ == kFutureStrictReservedIdentifier;
+    }
+    bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+   private:
+    enum Type {
+      kUnknownIdentifier,
+      kFutureReservedIdentifier,
+      kFutureStrictReservedIdentifier,
+      kEvalIdentifier,
+      kArgumentsIdentifier
+    };
+    explicit Identifier(Type type) : type_(type) { }
+    Type type_;
+
+    friend class Expression;
   };
 
-  enum IdentifierType {
-    kUnknownIdentifier
+  // Bits 0 and 1 are used to identify the type of expression:
+  // If bit 0 is set, it's an identifier.
+  // if bit 1 is set, it's a string literal.
+  // If neither is set, it's no particular type, and both set isn't
+  // use yet.
+  // Bit 2 is used to mark the expression as being parenthesized,
+  // so "(foo)" isn't recognized as a pure identifier (and possible label).
+  class Expression {
+   public:
+    static Expression Default() {
+      return Expression(kUnknownExpression);
+    }
+
+    static Expression FromIdentifier(Identifier id) {
+      return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
+    }
+
+    static Expression StringLiteral() {
+      return Expression(kUnknownStringLiteral);
+    }
+
+    static Expression UseStrictStringLiteral() {
+      return Expression(kUseStrictString);
+    }
+
+    static Expression This() {
+      return Expression(kThisExpression);
+    }
+
+    static Expression ThisProperty() {
+      return Expression(kThisPropertyExpression);
+    }
+
+    static Expression StrictFunction() {
+      return Expression(kStrictFunctionExpression);
+    }
+
+    bool IsIdentifier() {
+      return (code_ & kIdentifierFlag) != 0;
+    }
+
+    // Only works corretly if it is actually an identifier expression.
+    PreParser::Identifier AsIdentifier() {
+      return PreParser::Identifier(
+          static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
+    }
+
+    bool IsParenthesized() {
+      // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
+      return (code_ & 7) > 4;
+    }
+
+    bool IsRawIdentifier() {
+      return !IsParenthesized() && IsIdentifier();
+    }
+
+    bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+    bool IsRawStringLiteral() {
+      return !IsParenthesized() && IsStringLiteral();
+    }
+
+    bool IsUseStrictLiteral() {
+      return (code_ & kStringLiteralMask) == kUseStrictString;
+    }
+
+    bool IsThis() {
+      return code_ == kThisExpression;
+    }
+
+    bool IsThisProperty() {
+      return code_ == kThisPropertyExpression;
+    }
+
+    bool IsStrictFunction() {
+      return code_ == kStrictFunctionExpression;
+    }
+
+    Expression Parenthesize() {
+      int type = code_ & 3;
+      if (type != 0) {
+        // Identifiers and string literals can be parenthesized.
+        // They no longer work as labels or directive prologues,
+        // but are still recognized in other contexts.
+        return Expression(code_ | kParentesizedExpressionFlag);
+      }
+      // For other types of expressions, it's not important to remember
+      // the parentheses.
+      return *this;
+    }
+
+   private:
+    // First two/three bits are used as flags.
+    // Bit 0 and 1 represent identifiers or strings literals, and are
+    // mutually exclusive, but can both be absent.
+    // If bit 0 or 1 are set, bit 2 marks that the expression has
+    // been wrapped in parentheses (a string literal can no longer
+    // be a directive prologue, and an identifier can no longer be
+    // a label.
+    enum  {
+      kUnknownExpression = 0,
+      // Identifiers
+      kIdentifierFlag = 1,  // Used to detect labels.
+      kIdentifierShift = 3,
+
+      kStringLiteralFlag = 2,  // Used to detect directive prologue.
+      kUnknownStringLiteral = kStringLiteralFlag,
+      kUseStrictString = kStringLiteralFlag | 8,
+      kStringLiteralMask = kUseStrictString,
+
+      kParentesizedExpressionFlag = 4,  // Only if identifier or string literal.
+
+      // Below here applies if neither identifier nor string literal.
+      kThisExpression = 4,
+      kThisPropertyExpression = 8,
+      kStrictFunctionExpression = 12
+    };
+
+    explicit Expression(int expression_code) : code_(expression_code) { }
+
+    int code_;
   };
 
-  enum SourceElementTypes {
+  class Statement {
+   public:
+    static Statement Default() {
+      return Statement(kUnknownStatement);
+    }
+
+    static Statement FunctionDeclaration() {
+      return Statement(kFunctionDeclaration);
+    }
+
+    // Creates expression statement from expression.
+    // Preserves being an unparenthesized string literal, possibly
+    // "use strict".
+    static Statement ExpressionStatement(Expression expression) {
+      if (!expression.IsParenthesized()) {
+        if (expression.IsUseStrictLiteral()) {
+          return Statement(kUseStrictExpressionStatement);
+        }
+        if (expression.IsStringLiteral()) {
+          return Statement(kStringLiteralExpressionStatement);
+        }
+      }
+      return Default();
+    }
+
+    bool IsStringLiteral() {
+      return code_ != kUnknownStatement;
+    }
+
+    bool IsUseStrictLiteral() {
+      return code_ == kUseStrictExpressionStatement;
+    }
+
+    bool IsFunctionDeclaration() {
+      return code_ == kFunctionDeclaration;
+    }
+
+   private:
+    enum Type {
+      kUnknownStatement,
+      kStringLiteralExpressionStatement,
+      kUseStrictExpressionStatement,
+      kFunctionDeclaration
+    };
+
+    explicit Statement(Type code) : code_(code) {}
+    Type code_;
+  };
+
+  enum SourceElements {
     kUnknownSourceElements
   };
 
-  typedef int SourceElements;
-  typedef int Expression;
-  typedef int Statement;
-  typedef int Identifier;
   typedef int Arguments;
 
   class Scope {
@@ -112,7 +407,8 @@
           type_(type),
           materialized_literal_count_(0),
           expected_properties_(0),
-          with_nesting_count_(0) {
+          with_nesting_count_(0),
+          strict_((prev_ != NULL) && prev_->is_strict()) {
       *variable = this;
     }
     ~Scope() { *variable_ = prev_; }
@@ -122,6 +418,8 @@
     int expected_properties() { return expected_properties_; }
     int materialized_literal_count() { return materialized_literal_count_; }
     bool IsInsideWith() { return with_nesting_count_ != 0; }
+    bool is_strict() { return strict_; }
+    void set_strict() { strict_ = true; }
     void EnterWith() { with_nesting_count_++; }
     void LeaveWith() { with_nesting_count_--; }
 
@@ -132,6 +430,7 @@
     int materialized_literal_count_;
     int expected_properties_;
     int with_nesting_count_;
+    bool strict_;
   };
 
   // Private constructor only used in PreParseProgram.
@@ -143,25 +442,36 @@
         log_(log),
         scope_(NULL),
         stack_limit_(stack_limit),
+        strict_mode_violation_location_(i::Scanner::Location::invalid()),
+        strict_mode_violation_type_(NULL),
         stack_overflow_(false),
         allow_lazy_(true),
-        parenthesized_function_(false) { }
+        parenthesized_function_(false),
+        harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
 
   // Preparse the program. Only called in PreParseProgram after creating
   // the instance.
   PreParseResult PreParse() {
     Scope top_scope(&scope_, kTopLevelScope);
     bool ok = true;
+    int start_position = scanner_->peek_location().beg_pos;
     ParseSourceElements(i::Token::EOS, &ok);
     if (stack_overflow_) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner_->current_token());
+    } else if (scope_->is_strict()) {
+      CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
     }
     return kPreParseSuccess;
   }
 
   // Report syntax error
   void ReportUnexpectedToken(i::Token::Value token);
+  void ReportMessageAt(i::Scanner::Location location,
+                       const char* type,
+                       const char* name_opt) {
+    log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+  }
   void ReportMessageAt(int start_pos,
                        int end_pos,
                        const char* type,
@@ -169,17 +479,22 @@
     log_->LogMessage(start_pos, end_pos, type, name_opt);
   }
 
+  void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
   // All ParseXXX functions take as the last argument an *ok parameter
   // which is set to false if parsing failed; it is unchanged otherwise.
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
+  Statement ParseSourceElement(bool* ok);
   SourceElements ParseSourceElements(int end_token, bool* ok);
   Statement ParseStatement(bool* ok);
   Statement ParseFunctionDeclaration(bool* ok);
-  Statement ParseNativeDeclaration(bool* ok);
   Statement ParseBlock(bool* ok);
-  Statement ParseVariableStatement(bool* ok);
-  Statement ParseVariableDeclarations(bool accept_IN, int* num_decl, bool* ok);
+  Statement ParseVariableStatement(VariableDeclarationContext var_context,
+                                   bool* ok);
+  Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                      int* num_decl,
+                                      bool* ok);
   Statement ParseExpressionOrLabelledStatement(bool* ok);
   Statement ParseIfStatement(bool* ok);
   Statement ParseContinueStatement(bool* ok);
@@ -215,7 +530,9 @@
 
   Identifier ParseIdentifier(bool* ok);
   Identifier ParseIdentifierName(bool* ok);
-  Identifier ParseIdentifierOrGetOrSet(bool* is_get, bool* is_set, bool* ok);
+  Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
+                                           bool* is_set,
+                                           bool* ok);
 
   // Logs the currently parsed literal as a symbol in the preparser data.
   void LogSymbol();
@@ -245,6 +562,12 @@
 
   bool peek_any_identifier();
 
+  void set_strict_mode() {
+    scope_->set_strict();
+  }
+
+  bool strict_mode() { return scope_->is_strict(); }
+
   void Consume(i::Token::Value token) { Next(); }
 
   void Expect(i::Token::Value token, bool* ok) {
@@ -265,13 +588,27 @@
 
   static int Precedence(i::Token::Value tok, bool accept_IN);
 
+  void SetStrictModeViolation(i::Scanner::Location,
+                              const char* type,
+                              bool *ok);
+
+  void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
+
+  void StrictModeIdentifierViolation(i::Scanner::Location,
+                                     const char* eval_args_type,
+                                     Identifier identifier,
+                                     bool* ok);
+
   i::JavaScriptScanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
+  i::Scanner::Location strict_mode_violation_location_;
+  const char* strict_mode_violation_type_;
   bool stack_overflow_;
   bool allow_lazy_;
   bool parenthesized_function_;
+  bool harmony_block_scoping_;
 };
 } }  // v8::preparser
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index c777ab4..663af28 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -123,15 +123,11 @@
 }
 
 
-void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
-  Print("<enter with> (");
+void PrettyPrinter::VisitWithStatement(WithStatement* node) {
+  Print("with (");
   Visit(node->expression());
   Print(") ");
-}
-
-
-void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
-  Print("<exit with>");
+  Visit(node->statement());
 }
 
 
@@ -201,7 +197,8 @@
   Print("try ");
   Visit(node->try_block());
   Print(" catch (");
-  Visit(node->catch_var());
+  const bool quote = false;
+  PrintLiteral(node->variable()->name(), quote);
   Print(") ");
   Visit(node->catch_block());
 }
@@ -282,37 +279,6 @@
 }
 
 
-void PrettyPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  Print("{ ");
-  Visit(node->key());
-  Print(": ");
-  Visit(node->value());
-  Print(" }");
-}
-
-
-void PrettyPrinter::VisitSlot(Slot* node) {
-  switch (node->type()) {
-    case Slot::PARAMETER:
-      Print("parameter[%d]", node->index());
-      break;
-    case Slot::LOCAL:
-      Print("local[%d]", node->index());
-      break;
-    case Slot::CONTEXT:
-      Print("context[%d]", node->index());
-      break;
-    case Slot::LOOKUP:
-      Print("lookup[");
-      PrintLiteral(node->var()->name(), false);
-      Print("]");
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
   PrintLiteral(node->name(), false);
 }
@@ -370,7 +336,10 @@
 
 
 void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
-  Print("(%s", Token::String(node->op()));
+  Token::Value op = node->op();
+  bool needsSpace =
+      op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
+  Print("(%s%s", Token::String(op), needsSpace ? " " : "");
   Visit(node->expression());
   Print(")");
 }
@@ -388,7 +357,7 @@
 void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
   Print("(");
   Visit(node->left());
-  Print("%s", Token::String(node->op()));
+  Print(" %s ", Token::String(node->op()));
   Visit(node->right());
   Print(")");
 }
@@ -397,7 +366,7 @@
 void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
   Print("(");
   Visit(node->left());
-  Print("%s", Token::String(node->op()));
+  Print(" %s ", Token::String(node->op()));
   Visit(node->right());
   Print(")");
 }
@@ -664,17 +633,14 @@
 
 void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
   if (labels != NULL && labels->length() > 0) {
-    if (info == NULL) {
-      PrintIndented("LABELS ");
-    } else {
-      PrintIndented(info);
-      Print(" ");
-    }
+    PrintIndented(info == NULL ? "LABELS" : info);
+    Print(" ");
     PrintLabels(labels);
+    Print("\n");
   } else if (info != NULL) {
     PrintIndented(info);
+    Print("\n");
   }
-  Print("\n");
 }
 
 
@@ -755,7 +721,7 @@
   if (node->fun() == NULL) {
     // var or const declarations
     PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
-                                 node->proxy()->AsVariable(),
+                                 node->proxy()->var(),
                                  node->proxy()->name());
   } else {
     // function declarations
@@ -802,13 +768,10 @@
 }
 
 
-void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
-  PrintIndentedVisit("WITH ENTER", node->expression());
-}
-
-
-void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
-  PrintIndented("WITH EXIT\n");
+void AstPrinter::VisitWithStatement(WithStatement* node) {
+  IndentedScope indent(this, "WITH");
+  PrintIndentedVisit("OBJECT", node->expression());
+  PrintIndentedVisit("BODY", node->statement());
 }
 
 
@@ -859,7 +822,9 @@
 void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
   IndentedScope indent(this, "TRY CATCH");
   PrintIndentedVisit("TRY", node->try_block());
-  PrintIndentedVisit("CATCHVAR", node->catch_var());
+  PrintLiteralWithModeIndented("CATCHVAR",
+                               node->variable(),
+                               node->variable()->name());
   PrintIndentedVisit("CATCH", node->catch_block());
 }
 
@@ -959,27 +924,27 @@
 }
 
 
-void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  IndentedScope indent(this, "CatchExtensionObject");
-  PrintIndentedVisit("KEY", node->key());
-  PrintIndentedVisit("VALUE", node->value());
-}
-
-
-void AstPrinter::VisitSlot(Slot* node) {
-  PrintIndented("SLOT ");
-  PrettyPrinter::VisitSlot(node);
-  Print("\n");
-}
-
-
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
-  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name());
   Variable* var = node->var();
-  if (var != NULL && var->rewrite() != NULL) {
-    IndentedScope indent(this);
-    Visit(var->rewrite());
+  EmbeddedVector<char, 128> buf;
+  int pos = OS::SNPrintF(buf, "VAR PROXY");
+  switch (var->location()) {
+    case Variable::UNALLOCATED:
+      break;
+    case Variable::PARAMETER:
+      OS::SNPrintF(buf + pos, " parameter[%d]", var->index());
+      break;
+    case Variable::LOCAL:
+      OS::SNPrintF(buf + pos, " local[%d]", var->index());
+      break;
+    case Variable::CONTEXT:
+      OS::SNPrintF(buf + pos, " context[%d]", var->index());
+      break;
+    case Variable::LOOKUP:
+      OS::SNPrintF(buf + pos, " lookup");
+      break;
   }
+  PrintLiteralWithModeIndented(buf.start(), var, node->name());
 }
 
 
@@ -1137,7 +1102,7 @@
 
 
 void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
-  SmartPointer<char> value_string = value->ToCString();
+  SmartArrayPointer<char> value_string = value->ToCString();
   AddAttributePrefix(name);
   Print("\"%s\"", *value_string);
 }
@@ -1202,14 +1167,10 @@
 }
 
 
-void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  TagScope tag(this, "WithEnterStatement");
+void JsonAstBuilder::VisitWithStatement(WithStatement* stmt) {
+  TagScope tag(this, "WithStatement");
   Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
-  TagScope tag(this, "WithExitStatement");
+  Visit(stmt->statement());
 }
 
 
@@ -1251,8 +1212,10 @@
 
 void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
   TagScope tag(this, "TryCatchStatement");
+  { AttributesScope attributes(this);
+    AddAttribute("variable", stmt->variable()->name());
+  }
   Visit(stmt->try_block());
-  Visit(stmt->catch_var());
   Visit(stmt->catch_block());
 }
 
@@ -1291,39 +1254,32 @@
 }
 
 
-void JsonAstBuilder::VisitSlot(Slot* expr) {
-  TagScope tag(this, "Slot");
+void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
+  TagScope tag(this, "Variable");
   {
     AttributesScope attributes(this);
-    switch (expr->type()) {
-      case Slot::PARAMETER:
-        AddAttribute("type", "PARAMETER");
+    Variable* var = expr->var();
+    AddAttribute("name", var->name());
+    switch (var->location()) {
+      case Variable::UNALLOCATED:
+        AddAttribute("location", "UNALLOCATED");
         break;
-      case Slot::LOCAL:
-        AddAttribute("type", "LOCAL");
+      case Variable::PARAMETER:
+        AddAttribute("location", "PARAMETER");
+        AddAttribute("index", var->index());
         break;
-      case Slot::CONTEXT:
-        AddAttribute("type", "CONTEXT");
+      case Variable::LOCAL:
+        AddAttribute("location", "LOCAL");
+        AddAttribute("index", var->index());
         break;
-      case Slot::LOOKUP:
-        AddAttribute("type", "LOOKUP");
+      case Variable::CONTEXT:
+        AddAttribute("location", "CONTEXT");
+        AddAttribute("index", var->index());
+        break;
+      case Variable::LOOKUP:
+        AddAttribute("location", "LOOKUP");
         break;
     }
-    AddAttribute("index", expr->index());
-  }
-}
-
-
-void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
-  if (expr->var()->rewrite() == NULL) {
-    TagScope tag(this, "VariableProxy");
-    {
-      AttributesScope attributes(this);
-      AddAttribute("name", expr->name());
-      AddAttribute("mode", Variable::Mode2String(expr->var()->mode()));
-    }
-  } else {
-    Visit(expr->var()->rewrite());
   }
 }
 
@@ -1357,13 +1313,6 @@
 }
 
 
-void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  TagScope tag(this, "CatchExtensionObject");
-  Visit(expr->key());
-  Visit(expr->value());
-}
-
-
 void JsonAstBuilder::VisitAssignment(Assignment* expr) {
   TagScope tag(this, "Assignment");
   {
@@ -1383,10 +1332,6 @@
 
 void JsonAstBuilder::VisitProperty(Property* expr) {
   TagScope tag(this, "Property");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
-  }
   Visit(expr->obj());
   Visit(expr->key());
 }
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 451b17e..a26c48e 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -28,6 +28,7 @@
 #ifndef V8_PRETTYPRINTER_H_
 #define V8_PRETTYPRINTER_H_
 
+#include "allocation.h"
 #include "ast.h"
 
 namespace v8 {
@@ -51,7 +52,6 @@
   // Print a node to stdout.
   static void PrintOut(AstNode* node);
 
-  virtual void VisitSlot(Slot* node);
   // Individual nodes
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -86,7 +86,6 @@
   const char* PrintProgram(FunctionLiteral* program);
 
   // Individual nodes
-  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
@@ -162,7 +161,6 @@
   void AddAttribute(const char* name, bool value);
 
   // AST node visit functions.
-  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 747e5c7..88d6e87 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -28,8 +28,6 @@
 #ifndef V8_PROFILE_GENERATOR_INL_H_
 #define V8_PROFILE_GENERATOR_INL_H_
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "profile-generator.h"
 
 namespace v8 {
@@ -80,22 +78,6 @@
 }
 
 
-void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
-  CodeTree::Locator locator;
-  tree_.Insert(addr, &locator);
-  locator.set_value(CodeEntryInfo(entry, size));
-}
-
-
-void CodeMap::MoveCode(Address from, Address to) {
-  tree_.Move(from, to);
-}
-
-void CodeMap::DeleteCode(Address addr) {
-  tree_.Remove(addr);
-}
-
-
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
   switch (tag) {
     case GC:
@@ -123,6 +105,4 @@
 
 } }  // namespace v8::internal
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 #endif  // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 4cf62e2..e319efb 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,17 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "v8.h"
+
+#include "profile-generator-inl.h"
+
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "scopeinfo.h"
 #include "unicode.h"
 #include "zone-inl.h"
 
-#include "profile-generator-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -111,7 +110,8 @@
   Vector<char> dst = Vector<char>::New(len + 1);
   OS::StrNCpy(dst, src, len);
   dst[len] = '\0';
-  uint32_t hash = HashSequentialString(dst.start(), len);
+  uint32_t hash =
+      HashSequentialString(dst.start(), len, HEAP->HashSeed());
   return AddOrDisposeString(dst.start(), hash);
 }
 
@@ -144,7 +144,8 @@
     DeleteArray(str.start());
     return format;
   }
-  uint32_t hash = HashSequentialString(str.start(), len);
+  uint32_t hash = HashSequentialString(
+      str.start(), len, HEAP->HashSeed());
   return AddOrDisposeString(str.start(), hash);
 }
 
@@ -177,18 +178,21 @@
 
 
 uint32_t CodeEntry::GetCallUid() const {
-  uint32_t hash = ComputeIntegerHash(tag_);
+  uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
   if (shared_id_ != 0) {
-    hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(shared_id_));
+    hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
+                               v8::internal::kZeroHashSeed);
   } else {
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
+        v8::internal::kZeroHashSeed);
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
+        v8::internal::kZeroHashSeed);
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
-    hash ^= ComputeIntegerHash(line_number_);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
+        v8::internal::kZeroHashSeed);
+    hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
   }
   return hash;
 }
@@ -493,6 +497,28 @@
     CodeMap::CodeEntryInfo(NULL, 0);
 
 
+void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
+  DeleteAllCoveredCode(addr, addr + size);
+  CodeTree::Locator locator;
+  tree_.Insert(addr, &locator);
+  locator.set_value(CodeEntryInfo(entry, size));
+}
+
+
+void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
+  List<Address> to_delete;
+  Address addr = end - 1;
+  while (addr >= start) {
+    CodeTree::Locator locator;
+    if (!tree_.FindGreatestLessThan(addr, &locator)) break;
+    Address start2 = locator.key(), end2 = start2 + locator.value().size;
+    if (start2 < end && start < end2) to_delete.Add(start2);
+    addr = start2 - 1;
+  }
+  for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
+}
+
+
 CodeEntry* CodeMap::FindEntry(Address addr) {
   CodeTree::Locator locator;
   if (tree_.FindGreatestLessThan(addr, &locator)) {
@@ -521,6 +547,16 @@
 }
 
 
+void CodeMap::MoveCode(Address from, Address to) {
+  if (from == to) return;
+  CodeTree::Locator locator;
+  if (!tree_.Find(from, &locator)) return;
+  CodeEntryInfo entry = locator.value();
+  tree_.Remove(from);
+  AddCode(to, entry.entry, entry.size);
+}
+
+
 void CodeMap::CodeTreePrinter::Call(
     const Address& key, const CodeMap::CodeEntryInfo& value) {
   OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
@@ -984,6 +1020,11 @@
 }
 
 
+Handle<HeapObject> HeapEntry::GetHeapObject() {
+  return snapshot_->collection()->FindHeapObjectById(id());
+}
+
+
 template<class Visitor>
 void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
   List<HeapEntry*> list(10);
@@ -1095,7 +1136,7 @@
       : retained_size_(0) {
   }
 
-  int reained_size() const { return retained_size_; }
+  int retained_size() const { return retained_size_; }
 
   void Apply(HeapEntry** entry_ptr) {
     if ((*entry_ptr)->painted_reachable()) {
@@ -1136,7 +1177,7 @@
 
   RetainedSizeCalculator ret_size_calc;
   snapshot()->IterateEntries(&ret_size_calc);
-  retained_size_ = ret_size_calc.reained_size();
+  retained_size_ = ret_size_calc.retained_size();
   ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
   retained_size_ |= kExactRetainedSizeTag;
 }
@@ -1175,7 +1216,7 @@
       entries_sorted_(false) {
   STATIC_ASSERT(
       sizeof(HeapGraphEdge) ==
-      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize);  // NOLINT
+      SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
   STATIC_ASSERT(
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
@@ -1196,12 +1237,9 @@
                                    int children_count,
                                    int retainers_count) {
   ASSERT(raw_entries_ == NULL);
-  raw_entries_ = NewArray<char>(
-      HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
-#ifdef DEBUG
   raw_entries_size_ =
       HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
-#endif
+  raw_entries_ = NewArray<char>(raw_entries_size_);
 }
 
 
@@ -1347,8 +1385,8 @@
 
 
 void HeapObjectsMap::SnapshotGenerationFinished() {
-    initial_fill_mode_ = false;
-    RemoveDeadEntries();
+  initial_fill_mode_ = false;
+  RemoveDeadEntries();
 }
 
 
@@ -1429,10 +1467,13 @@
 uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
   uint64_t id = static_cast<uint64_t>(info->GetHash());
   const char* label = info->GetLabel();
-  id ^= HashSequentialString(label, static_cast<int>(strlen(label)));
+  id ^= HashSequentialString(label,
+                             static_cast<int>(strlen(label)),
+                             HEAP->HashSeed());
   intptr_t element_count = info->GetElementCount();
   if (element_count != -1)
-    id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
+    id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
+                             v8::internal::kZeroHashSeed);
   return id << 1;
 }
 
@@ -1494,6 +1535,24 @@
 }
 
 
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
+  AssertNoAllocation no_allocation;
+  HeapObject* object = NULL;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  // Make sure that object with the given id is still reachable.
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next()) {
+    if (ids_.FindObject(obj->address()) == id) {
+      ASSERT(object == NULL);
+      object = obj;
+      // Can't break -- kFilterUnreachable requires full heap traversal.
+    }
+  }
+  return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
 HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
     reinterpret_cast<HeapEntry*>(1);
 
@@ -1601,6 +1660,28 @@
 }
 
 
+const char* HeapObjectsSet::GetTag(Object* obj) {
+  HeapObject* object = HeapObject::cast(obj);
+  HashMap::Entry* cache_entry =
+      entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+  if (cache_entry != NULL
+      && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
+    return reinterpret_cast<const char*>(cache_entry->value);
+  } else {
+    return NULL;
+  }
+}
+
+
+void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+  if (!obj->IsHeapObject()) return;
+  HeapObject* object = HeapObject::cast(obj);
+  HashMap::Entry* cache_entry =
+      entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+  cache_entry->value = const_cast<char*>(tag);
+}
+
+
 HeapObject *const V8HeapExplorer::kInternalRootObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
@@ -1612,7 +1693,8 @@
 V8HeapExplorer::V8HeapExplorer(
     HeapSnapshot* snapshot,
     SnapshottingProgressReportingInterface* progress)
-    : snapshot_(snapshot),
+    : heap_(Isolate::Current()->heap()),
+      snapshot_(snapshot),
       collection_(snapshot_->collection()),
       progress_(progress),
       filler_(NULL) {
@@ -1638,6 +1720,18 @@
     return snapshot_->AddRootEntry(children_count);
   } else if (object == kGcRootsObject) {
     return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+  } else if (object->IsJSGlobalObject()) {
+    const char* tag = objects_tags_.GetTag(object);
+    const char* name = collection_->names()->GetName(
+        GetConstructorName(JSObject::cast(object)));
+    if (tag != NULL) {
+      name = collection_->names()->GetFormatted("%s / %s", name, tag);
+    }
+    return AddEntry(object,
+                    HeapEntry::kObject,
+                    name,
+                    children_count,
+                    retainers_count);
   } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
@@ -1657,8 +1751,7 @@
     return AddEntry(object,
                     HeapEntry::kObject,
                     collection_->names()->GetName(
-                        GetConstructorNameForHeapProfile(
-                            JSObject::cast(object))),
+                        GetConstructorName(JSObject::cast(object))),
                     children_count,
                     retainers_count);
   } else if (object->IsString()) {
@@ -1690,10 +1783,14 @@
                         : "",
                     children_count,
                     retainers_count);
-  } else if (object->IsFixedArray() || object->IsByteArray()) {
+  } else if (object->IsFixedArray() ||
+             object->IsFixedDoubleArray() ||
+             object->IsByteArray() ||
+             object->IsExternalArray()) {
+    const char* tag = objects_tags_.GetTag(object);
     return AddEntry(object,
                     HeapEntry::kArray,
-                    "",
+                    tag != NULL ? tag : "",
                     children_count,
                     retainers_count);
   } else if (object->IsHeapNumber()) {
@@ -1735,7 +1832,7 @@
   switch (object->map()->instance_type()) {
     case MAP_TYPE: return "system / Map";
     case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
-    case PROXY_TYPE: return "system / Proxy";
+    case FOREIGN_TYPE: return "system / Foreign";
     case ODDBALL_TYPE: return "system / Oddball";
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
     case NAME##_TYPE: return "system / "#Name;
@@ -1779,6 +1876,7 @@
     ASSERT(Memory::Object_at(field)->IsHeapObject());
     *field |= kFailureTag;
   }
+
  private:
   bool CheckVisitedAndUnmark(Object** field) {
     if ((*field)->IsFailure()) {
@@ -1800,15 +1898,13 @@
   HeapEntry* entry = GetEntry(obj);
   if (entry == NULL) return;  // No interest in this object.
 
+  bool extract_indexed_refs = true;
   if (obj->IsJSGlobalProxy()) {
     // We need to reference JS global objects from snapshot's root.
     // We use JSGlobalProxy because this is what embedder (e.g. browser)
     // uses for the global object.
     JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
     SetRootShortcutReference(proxy->map()->prototype());
-    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
-    IndexedReferencesExtractor refs_extractor(this, obj, entry);
-    obj->Iterate(&refs_extractor);
   } else if (obj->IsJSObject()) {
     JSObject* js_obj = JSObject::cast(obj);
     ExtractClosureReferences(js_obj, entry);
@@ -1816,7 +1912,7 @@
     ExtractElementReferences(js_obj, entry);
     ExtractInternalReferences(js_obj, entry);
     SetPropertyReference(
-        obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
+        obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
     if (obj->IsJSFunction()) {
       JSFunction* js_fun = JSFunction::cast(js_obj);
       Object* proto_or_map = js_fun->prototype_or_initial_map();
@@ -1824,39 +1920,49 @@
         if (!proto_or_map->IsMap()) {
           SetPropertyReference(
               obj, entry,
-              HEAP->prototype_symbol(), proto_or_map,
+              heap_->prototype_symbol(), proto_or_map,
               JSFunction::kPrototypeOrInitialMapOffset);
         } else {
           SetPropertyReference(
               obj, entry,
-              HEAP->prototype_symbol(), js_fun->prototype());
+              heap_->prototype_symbol(), js_fun->prototype());
         }
       }
       SetInternalReference(js_fun, entry,
                            "shared", js_fun->shared(),
                            JSFunction::kSharedFunctionInfoOffset);
+      TagObject(js_fun->unchecked_context(), "(context)");
       SetInternalReference(js_fun, entry,
                            "context", js_fun->unchecked_context(),
                            JSFunction::kContextOffset);
+      TagObject(js_fun->literals(), "(function literals)");
       SetInternalReference(js_fun, entry,
                            "literals", js_fun->literals(),
                            JSFunction::kLiteralsOffset);
     }
+    TagObject(js_obj->properties(), "(object properties)");
     SetInternalReference(obj, entry,
                          "properties", js_obj->properties(),
                          JSObject::kPropertiesOffset);
+    TagObject(js_obj->elements(), "(object elements)");
     SetInternalReference(obj, entry,
                          "elements", js_obj->elements(),
                          JSObject::kElementsOffset);
-    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
-    IndexedReferencesExtractor refs_extractor(this, obj, entry);
-    obj->Iterate(&refs_extractor);
   } else if (obj->IsString()) {
     if (obj->IsConsString()) {
       ConsString* cs = ConsString::cast(obj);
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
+    extract_indexed_refs = false;
+  } else if (obj->IsGlobalContext()) {
+    Context* context = Context::cast(obj);
+    TagObject(context->jsfunction_result_caches(),
+              "(context func. result caches)");
+    TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+    TagObject(context->runtime_context(), "(runtime context)");
+    TagObject(context->map_cache(), "(context map cache)");
+    TagObject(context->data(), "(context data)");
   } else if (obj->IsMap()) {
     Map* map = Map::cast(obj);
     SetInternalReference(obj, entry,
@@ -1864,15 +1970,15 @@
     SetInternalReference(obj, entry,
                          "constructor", map->constructor(),
                          Map::kConstructorOffset);
-    SetInternalReference(obj, entry,
-                         "descriptors", map->instance_descriptors(),
-                         Map::kInstanceDescriptorsOffset);
+    if (!map->instance_descriptors()->IsEmpty()) {
+      TagObject(map->instance_descriptors(), "(map descriptors)");
+      SetInternalReference(obj, entry,
+                           "descriptors", map->instance_descriptors(),
+                           Map::kInstanceDescriptorsOrBitField3Offset);
+    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
-    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
-    IndexedReferencesExtractor refs_extractor(this, obj, entry);
-    obj->Iterate(&refs_extractor);
   } else if (obj->IsSharedFunctionInfo()) {
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
     SetInternalReference(obj, entry,
@@ -1881,16 +1987,61 @@
     SetInternalReference(obj, entry,
                          "code", shared->unchecked_code(),
                          SharedFunctionInfo::kCodeOffset);
+    TagObject(shared->scope_info(), "(function scope info)");
+    SetInternalReference(obj, entry,
+                         "scope_info", shared->scope_info(),
+                         SharedFunctionInfo::kScopeInfoOffset);
     SetInternalReference(obj, entry,
                          "instance_class_name", shared->instance_class_name(),
                          SharedFunctionInfo::kInstanceClassNameOffset);
     SetInternalReference(obj, entry,
                          "script", shared->script(),
                          SharedFunctionInfo::kScriptOffset);
-    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
-    IndexedReferencesExtractor refs_extractor(this, obj, entry);
-    obj->Iterate(&refs_extractor);
-  } else {
+  } else if (obj->IsScript()) {
+    Script* script = Script::cast(obj);
+    SetInternalReference(obj, entry,
+                         "source", script->source(),
+                         Script::kSourceOffset);
+    SetInternalReference(obj, entry,
+                         "name", script->name(),
+                         Script::kNameOffset);
+    SetInternalReference(obj, entry,
+                         "data", script->data(),
+                         Script::kDataOffset);
+    SetInternalReference(obj, entry,
+                         "context_data", script->context_data(),
+                         Script::kContextOffset);
+    TagObject(script->line_ends(), "(script line ends)");
+    SetInternalReference(obj, entry,
+                         "line_ends", script->line_ends(),
+                         Script::kLineEndsOffset);
+  } else if (obj->IsDescriptorArray()) {
+    DescriptorArray* desc_array = DescriptorArray::cast(obj);
+    if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
+      Object* content_array =
+          desc_array->get(DescriptorArray::kContentArrayIndex);
+      TagObject(content_array, "(map descriptor content)");
+      SetInternalReference(obj, entry,
+                           "content", content_array,
+                           FixedArray::OffsetOfElementAt(
+                               DescriptorArray::kContentArrayIndex));
+    }
+  } else if (obj->IsCodeCache()) {
+    CodeCache* code_cache = CodeCache::cast(obj);
+    TagObject(code_cache->default_cache(), "(default code cache)");
+    SetInternalReference(obj, entry,
+                         "default_cache", code_cache->default_cache(),
+                         CodeCache::kDefaultCacheOffset);
+    TagObject(code_cache->normal_type_cache(), "(code type cache)");
+    SetInternalReference(obj, entry,
+                         "type_cache", code_cache->normal_type_cache(),
+                         CodeCache::kNormalTypeCacheOffset);
+  } else if (obj->IsCode()) {
+    Code* code = Code::cast(obj);
+    TagObject(code->unchecked_relocation_info(), "(code relocation info)");
+    TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
+  }
+  if (extract_indexed_refs) {
     SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
     IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
@@ -1904,7 +2055,7 @@
     HandleScope hs;
     JSFunction* func = JSFunction::cast(js_obj);
     Context* context = func->context();
-    ZoneScope zscope(DELETE_ON_EXIT);
+    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
     SerializedScopeInfo* serialized_scope_info =
         context->closure()->shared()->scope_info();
     ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
@@ -1984,7 +2135,7 @@
       }
     }
   } else if (js_obj->HasDictionaryElements()) {
-    NumberDictionary* dictionary = js_obj->element_dictionary();
+    SeededNumberDictionary* dictionary = js_obj->element_dictionary();
     int length = dictionary->Capacity();
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
@@ -2009,6 +2160,31 @@
 }
 
 
+String* V8HeapExplorer::GetConstructorName(JSObject* object) {
+  if (object->IsJSFunction()) return HEAP->closure_symbol();
+  String* constructor_name = object->constructor_name();
+  if (constructor_name == HEAP->Object_symbol()) {
+    // Look up an immediate "constructor" property, if it is a function,
+    // return its name. This is for instances of binding objects, which
+    // have prototype constructor type "Object".
+    Object* constructor_prop = NULL;
+    LookupResult result;
+    object->LocalLookupRealNamedProperty(HEAP->constructor_symbol(), &result);
+    if (result.IsProperty()) {
+      constructor_prop = result.GetLazyValue();
+    }
+    if (constructor_prop->IsJSFunction()) {
+      Object* maybe_name = JSFunction::cast(constructor_prop)->shared()->name();
+      if (maybe_name->IsString()) {
+        String* name = String::cast(maybe_name);
+        if (name->length() > 0) return name;
+      }
+    }
+  }
+  return object->constructor_name();
+}
+
+
 HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
   if (!obj->IsHeapObject()) return NULL;
   return filler_->FindOrAddEntry(obj, this);
@@ -2048,7 +2224,7 @@
   }
   SetRootGcRootsReference();
   RootsReferencesExtractor extractor(this);
-  HEAP->IterateRoots(&extractor, VISIT_ALL);
+  heap_->IterateRoots(&extractor, VISIT_ALL);
   filler_ = NULL;
   return progress_->ProgressReport(false);
 }
@@ -2203,6 +2379,76 @@
 }
 
 
+void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+  if (obj->IsHeapObject() &&
+      !obj->IsOddball() &&
+      obj != heap_->raw_unchecked_empty_byte_array() &&
+      obj != heap_->raw_unchecked_empty_fixed_array() &&
+      obj != heap_->raw_unchecked_empty_fixed_double_array() &&
+      obj != heap_->raw_unchecked_empty_descriptor_array()) {
+    objects_tags_.SetTag(obj, tag);
+  }
+}
+
+
+class GlobalObjectsEnumerator : public ObjectVisitor {
+ public:
+  virtual void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsGlobalContext()) {
+        Context* context = Context::cast(*p);
+        JSObject* proxy = context->global_proxy();
+        if (proxy->IsJSGlobalProxy()) {
+          Object* global = proxy->map()->prototype();
+          if (global->IsJSGlobalObject()) {
+            objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+          }
+        }
+      }
+    }
+  }
+  int count() { return objects_.length(); }
+  Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
+
+ private:
+  List<Handle<JSGlobalObject> > objects_;
+};
+
+
+// Modifies heap. Must not be run during heap traversal.
+void V8HeapExplorer::TagGlobalObjects() {
+  Isolate* isolate = Isolate::Current();
+  GlobalObjectsEnumerator enumerator;
+  isolate->global_handles()->IterateAllRoots(&enumerator);
+  Handle<String> document_string =
+      isolate->factory()->NewStringFromAscii(CStrVector("document"));
+  Handle<String> url_string =
+      isolate->factory()->NewStringFromAscii(CStrVector("URL"));
+  const char** urls = NewArray<const char*>(enumerator.count());
+  for (int i = 0, l = enumerator.count(); i < l; ++i) {
+    urls[i] = NULL;
+    Handle<JSGlobalObject> global_obj = enumerator.at(i);
+    Object* obj_document;
+    if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
+       obj_document->IsJSObject()) {
+      JSObject* document = JSObject::cast(obj_document);
+      Object* obj_url;
+      if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
+          obj_url->IsString()) {
+        urls[i] = collection_->names()->GetName(String::cast(obj_url));
+      }
+    }
+  }
+
+  AssertNoAllocation no_allocation;
+  for (int i = 0, l = enumerator.count(); i < l; ++i) {
+    objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+  }
+
+  DeleteArray(urls);
+}
+
+
 class GlobalHandlesExtractor : public ObjectVisitor {
  public:
   explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
@@ -2445,6 +2691,7 @@
                                   HeapEntry*) {
     entries_->CountReference(parent_ptr, child_ptr);
   }
+
  private:
   HeapEntriesMap* entries_;
 };
@@ -2516,6 +2763,7 @@
                               child_entry,
                               retainer_index);
   }
+
  private:
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -2524,6 +2772,8 @@
 
 
 bool HeapSnapshotGenerator::GenerateSnapshot() {
+  v8_heap_explorer_.TagGlobalObjects();
+
   AssertNoAllocation no_alloc;
 
   SetProgressTotal(4);  // 2 passes + dominators + sizes.
@@ -2794,10 +3044,19 @@
   bool aborted_;
 };
 
+const int HeapSnapshotJSONSerializer::kMaxSerializableSnapshotRawSize =
+    256 * MB;
+
 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
   ASSERT(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
 
+  HeapSnapshot* original_snapshot = NULL;
+  if (snapshot_->raw_entries_size() >= kMaxSerializableSnapshotRawSize) {
+    // The snapshot is too big. Serialize a fake snapshot.
+    original_snapshot = snapshot_;
+    snapshot_ = CreateFakeSnapshot();
+  }
   // Since nodes graph is cyclic, we need the first pass to enumerate
   // them. Strings can be serialized in one pass.
   EnumerateNodes();
@@ -2805,6 +3064,26 @@
 
   delete writer_;
   writer_ = NULL;
+
+  if (original_snapshot != NULL) {
+    delete snapshot_;
+    snapshot_ = original_snapshot;
+  }
+}
+
+
+HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
+  HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
+                                          HeapSnapshot::kFull,
+                                          snapshot_->title(),
+                                          snapshot_->uid());
+  result->AllocateEntries(2, 1, 0);
+  HeapEntry* root = result->AddRootEntry(1);
+  HeapEntry* message = result->AddEntry(
+      HeapEntry::kString, "The snapshot is too big", 0, 4, 0, 0);
+  root->SetUnidirElementReference(0, 1, message);
+  result->SetDominatorsToSelf();
+  return result;
 }
 
 
@@ -3084,12 +3363,4 @@
   sorted_entries->Sort(SortUsingEntryValue);
 }
 
-
-String* GetConstructorNameForHeapProfile(JSObject* object) {
-  if (object->IsJSFunction()) return HEAP->closure_symbol();
-  return object->constructor_name();
-}
-
 } }  // namespace v8::internal
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index bbc9efc..0beb109 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -28,8 +28,7 @@
 #ifndef V8_PROFILE_GENERATOR_H_
 #define V8_PROFILE_GENERATOR_H_
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
+#include "allocation.h"
 #include "hashmap.h"
 #include "../include/v8-profiler.h"
 
@@ -239,9 +238,8 @@
 class CodeMap {
  public:
   CodeMap() : next_shared_id_(1) { }
-  INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
-  INLINE(void MoveCode(Address from, Address to));
-  INLINE(void DeleteCode(Address addr));
+  void AddCode(Address addr, CodeEntry* entry, unsigned size);
+  void MoveCode(Address from, Address to);
   CodeEntry* FindEntry(Address addr);
   int GetSharedId(Address addr);
 
@@ -271,6 +269,8 @@
     void Call(const Address& key, const CodeEntryInfo& value);
   };
 
+  void DeleteAllCoveredCode(Address start, Address end);
+
   // Fake CodeEntry pointer to distinguish shared function entries.
   static CodeEntry* const kSharedFunctionCodeEntry;
 
@@ -585,6 +585,8 @@
 
   void Print(int max_depth, int indent);
 
+  Handle<HeapObject> GetHeapObject();
+
   static int EntriesSize(int entries_count,
                          int children_count,
                          int retainers_count);
@@ -637,8 +639,7 @@
 class HeapSnapshot {
  public:
   enum Type {
-    kFull = v8::HeapSnapshot::kFull,
-    kAggregated = v8::HeapSnapshot::kAggregated
+    kFull = v8::HeapSnapshot::kFull
   };
 
   HeapSnapshot(HeapSnapshotsCollection* collection,
@@ -656,6 +657,7 @@
   HeapEntry* gc_roots() { return gc_roots_entry_; }
   HeapEntry* natives_root() { return natives_root_entry_; }
   List<HeapEntry*>* entries() { return &entries_; }
+  int raw_entries_size() { return raw_entries_size_; }
 
   void AllocateEntries(
       int entries_count, int children_count, int retainers_count);
@@ -691,9 +693,7 @@
   char* raw_entries_;
   List<HeapEntry*> entries_;
   bool entries_sorted_;
-#ifdef DEBUG
   int raw_entries_size_;
-#endif
 
   friend class HeapSnapshotTester;
 
@@ -735,7 +735,8 @@
 
   static uint32_t AddressHash(Address addr) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
+        v8::internal::kZeroHashSeed);
   }
 
   bool initial_fill_mode_;
@@ -765,6 +766,7 @@
   TokenEnumerator* token_enumerator() { return token_enumerator_; }
 
   uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
+  Handle<HeapObject> FindHeapObjectById(uint64_t id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
 
  private:
@@ -835,7 +837,8 @@
 
   static uint32_t Hash(HeapThing thing) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
+        v8::internal::kZeroHashSeed);
   }
   static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
     return key1 == key2;
@@ -858,6 +861,8 @@
   void Clear();
   bool Contains(Object* object);
   void Insert(Object* obj);
+  const char* GetTag(Object* obj);
+  void SetTag(Object* obj, const char* tag);
 
  private:
   HashMap entries_;
@@ -919,6 +924,9 @@
   void AddRootEntries(SnapshotFillerInterface* filler);
   int EstimateObjectsCount();
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+  void TagGlobalObjects();
+
+  static String* GetConstructorName(JSObject* object);
 
   static HeapObject* const kInternalRootObject;
 
@@ -970,13 +978,16 @@
   void SetRootShortcutReference(Object* child);
   void SetRootGcRootsReference();
   void SetGcRootsReference(Object* child);
+  void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
 
+  Heap* heap_;
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
   SnapshottingProgressReportingInterface* progress_;
   SnapshotFillerInterface* filler_;
+  HeapObjectsSet objects_tags_;
 
   static HeapObject* const kGcRootsObject;
 
@@ -1009,7 +1020,8 @@
   void VisitSubtreeWrapper(Object** p, uint16_t class_id);
 
   static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
-    return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
+    return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
+                              v8::internal::kZeroHashSeed);
   }
   static bool RetainedInfosMatch(void* key1, void* key2) {
     return key1 == key2 ||
@@ -1087,10 +1099,12 @@
 
   INLINE(static uint32_t ObjectHash(const void* key)) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
+        v8::internal::kZeroHashSeed);
   }
 
   void EnumerateNodes();
+  HeapSnapshot* CreateFakeSnapshot();
   int GetNodeId(HeapEntry* entry);
   int GetStringId(const char* s);
   void SerializeEdge(HeapGraphEdge* edge);
@@ -1102,6 +1116,8 @@
   void SerializeStrings();
   void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
 
+  static const int kMaxSerializableSnapshotRawSize;
+
   HeapSnapshot* snapshot_;
   HashMap nodes_;
   HashMap strings_;
@@ -1115,11 +1131,6 @@
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
 };
 
-
-String* GetConstructorNameForHeapProfile(JSObject* object);
-
 } }  // namespace v8::internal
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 #endif  // V8_PROFILE_GENERATOR_H_
diff --git a/src/property.cc b/src/property.cc
index c35fb83..7cc2df5 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,8 +52,8 @@
       GetTransitionMap()->Print(out);
       PrintF(out, "\n");
       break;
-    case EXTERNAL_ARRAY_TRANSITION:
-      PrintF(out, " -type = external array transition\n");
+    case ELEMENTS_TRANSITION:
+      PrintF(out, " -type = elements transition\n");
       PrintF(out, " -map:\n");
       GetTransitionMap()->Print(out);
       PrintF(out, "\n");
@@ -74,6 +74,9 @@
       PrintF(out, " -callback object:\n");
       GetCallbackObject()->Print(out);
       break;
+    case HANDLER:
+      PrintF(out, " -type = lookup proxy\n");
+      break;
     case INTERCEPTOR:
       PrintF(out, " -type = lookup interceptor\n");
       break;
diff --git a/src/property.h b/src/property.h
index ee95ca2..e7d9fc5 100644
--- a/src/property.h
+++ b/src/property.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_PROPERTY_H_
 #define V8_PROPERTY_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -110,14 +112,14 @@
       : Descriptor(key, map, attributes, MAP_TRANSITION) { }
 };
 
-class ExternalArrayTransitionDescriptor: public Descriptor {
+class ElementsTransitionDescriptor: public Descriptor {
  public:
-  ExternalArrayTransitionDescriptor(String* key,
-                                    Map* map,
-                                    ExternalArrayType array_type)
+  ElementsTransitionDescriptor(String* key,
+                               Map* map,
+                               ElementsKind elements_kind)
       : Descriptor(key, map, PropertyDetails(NONE,
-                                             EXTERNAL_ARRAY_TRANSITION,
-                                             array_type)) { }
+                                             ELEMENTS_TRANSITION,
+                                             elements_kind)) { }
 };
 
 // Marks a field name in a map so that adding the field is guaranteed
@@ -155,24 +157,15 @@
 class CallbacksDescriptor:  public Descriptor {
  public:
   CallbacksDescriptor(String* key,
-                      Object* proxy,
+                      Object* foreign,
                       PropertyAttributes attributes,
                       int index = 0)
-      : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
+      : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
 };
 
 
 class LookupResult BASE_EMBEDDED {
  public:
-  // Where did we find the result;
-  enum {
-    NOT_FOUND,
-    DESCRIPTOR_TYPE,
-    DICTIONARY_TYPE,
-    INTERCEPTOR_TYPE,
-    CONSTANT_TYPE
-  } lookup_type_;
-
   LookupResult()
       : lookup_type_(NOT_FOUND),
         cacheable_(true),
@@ -209,6 +202,13 @@
     number_ = entry;
   }
 
+  void HandlerResult() {
+    lookup_type_ = HANDLER_TYPE;
+    holder_ = NULL;
+    details_ = PropertyDetails(NONE, HANDLER);
+    cacheable_ = false;
+  }
+
   void InterceptorResult(JSObject* holder) {
     lookup_type_ = INTERCEPTOR_TYPE;
     holder_ = holder;
@@ -243,6 +243,7 @@
   bool IsDontEnum() { return details_.IsDontEnum(); }
   bool IsDeleted() { return details_.IsDeleted(); }
   bool IsFound() { return lookup_type_ != NOT_FOUND; }
+  bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
 
   // Is the result is a property excluding transitions and the null
   // descriptor?
@@ -280,7 +281,7 @@
   Map* GetTransitionMap() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
     ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
-           type() == EXTERNAL_ARRAY_TRANSITION);
+           type() == ELEMENTS_TRANSITION);
     return Map::cast(GetValue());
   }
 
@@ -343,6 +344,16 @@
   }
 
  private:
+  // Where did we find the result;
+  enum {
+    NOT_FOUND,
+    DESCRIPTOR_TYPE,
+    DICTIONARY_TYPE,
+    HANDLER_TYPE,
+    INTERCEPTOR_TYPE,
+    CONSTANT_TYPE
+  } lookup_type_;
+
   JSObject* holder_;
   int number_;
   bool cacheable_;
diff --git a/src/proxy.js b/src/proxy.js
new file mode 100644
index 0000000..4e44cd4
--- /dev/null
+++ b/src/proxy.js
@@ -0,0 +1,144 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+global.Proxy = new $Object();
+
+var $Proxy = global.Proxy
+
+$Proxy.create = function(handler, proto) {
+  if (!IS_SPEC_OBJECT(handler))
+    throw MakeTypeError("handler_non_object", ["create"])
+  if (!IS_SPEC_OBJECT(proto)) proto = null  // Mozilla does this...
+  return %CreateJSProxy(handler, proto)
+}
+
+$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+  if (!IS_SPEC_OBJECT(handler))
+    throw MakeTypeError("handler_non_object", ["create"])
+  if (!IS_SPEC_FUNCTION(callTrap))
+    throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
+  if (IS_UNDEFINED(constructTrap)) {
+    constructTrap = callTrap
+  } else if (!IS_SPEC_FUNCTION(constructTrap)) {
+    throw MakeTypeError("trap_function_expected",
+                        ["createFunction", "construct"])
+  }
+  return %CreateJSFunctionProxy(
+    handler, callTrap, constructTrap, $Function.prototype)
+}
+
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Builtins
+////////////////////////////////////////////////////////////////////////////////
+
+function DelegateCallAndConstruct(callTrap, constructTrap) {
+  return function() {
+    return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
+                  this, arguments, 0, %_ArgumentsLength())
+  }
+}
+
+function DerivedGetTrap(receiver, name) {
+  var desc = this.getPropertyDescriptor(name)
+  if (IS_UNDEFINED(desc)) { return desc }
+  if ('value' in desc) {
+    return desc.value
+  } else {
+    if (IS_UNDEFINED(desc.get)) { return desc.get }
+    // The proposal says: desc.get.call(receiver)
+    return %_CallFunction(receiver, desc.get)
+  }
+}
+
+function DerivedSetTrap(receiver, name, val) {
+  var desc = this.getOwnPropertyDescriptor(name)
+  if (desc) {
+    if ('writable' in desc) {
+      if (desc.writable) {
+        desc.value = val
+        this.defineProperty(name, desc)
+        return true
+      } else {
+        return false
+      }
+    } else { // accessor
+      if (desc.set) {
+        // The proposal says: desc.set.call(receiver, val)
+        %_CallFunction(receiver, val, desc.set)
+        return true
+      } else {
+        return false
+      }
+    }
+  }
+  desc = this.getPropertyDescriptor(name)
+  if (desc) {
+    if ('writable' in desc) {
+      if (desc.writable) {
+        // fall through
+      } else {
+        return false
+      }
+    } else { // accessor
+      if (desc.set) {
+        // The proposal says: desc.set.call(receiver, val)
+        %_CallFunction(receiver, val, desc.set)
+        return true
+      } else {
+        return false
+      }
+    }
+  }
+  this.defineProperty(name, {
+    value: val,
+    writable: true,
+    enumerable: true,
+    configurable: true});
+  return true;
+}
+
+function DerivedHasTrap(name) {
+  return !!this.getPropertyDescriptor(name)
+}
+
+function DerivedHasOwnTrap(name) {
+  return !!this.getOwnPropertyDescriptor(name)
+}
+
+function DerivedKeysTrap() {
+  var names = this.getOwnPropertyNames()
+  var enumerableNames = []
+  for (var i = 0, count = 0; i < names.length; ++i) {
+    var name = names[i]
+    if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
+      enumerableNames[count++] = names[i]
+    }
+  }
+  return enumerableNames
+}
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 75cf8bf..262ead2 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -107,6 +107,7 @@
 
   virtual IrregexpImplementation Implementation();
   virtual Handle<HeapObject> GetCode(Handle<String> source);
+
  private:
   void Expand();
   // Code and bitmap emission.
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 8c6cf3a..1cf0349 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -95,6 +95,7 @@
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
   virtual void ClearRegisters(int reg_from, int reg_to);
   virtual void WriteStackPointerToRegister(int reg);
+
  private:
   RegExpMacroAssembler* assembler_;
 };
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 5578243..f91ea93 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -120,27 +120,31 @@
   String* subject_ptr = *subject;
   // Character offsets into string.
   int start_offset = previous_index;
-  int end_offset = subject_ptr->length();
+  int char_length = subject_ptr->length() - start_offset;
+  int slice_offset = 0;
 
-  // The string has been flattened, so it it is a cons string it contains the
+  // The string has been flattened, so if it is a cons string it contains the
   // full string in the first part.
   if (StringShape(subject_ptr).IsCons()) {
     ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
     subject_ptr = ConsString::cast(subject_ptr)->first();
+  } else if (StringShape(subject_ptr).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(subject_ptr);
+    subject_ptr = slice->parent();
+    slice_offset = slice->offset();
   }
   // Ensure that an underlying string has the same ascii-ness.
   bool is_ascii = subject_ptr->IsAsciiRepresentation();
   ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
   int char_size_shift = is_ascii ? 0 : 1;
-  int char_length = end_offset - start_offset;
 
   const byte* input_start =
-      StringCharacterPosition(subject_ptr, start_offset);
+      StringCharacterPosition(subject_ptr, start_offset + slice_offset);
   int byte_length = char_length << char_size_shift;
   const byte* input_end = input_start + byte_length;
   Result res = Execute(*regexp_code,
-                       subject_ptr,
+                       *subject,
                        start_offset,
                        input_start,
                        input_end,
@@ -152,7 +156,7 @@
 
 NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
     Code* code,
-    String* input,
+    String* input,  // This needs to be the unpacked (sliced, cons) string.
     int start_offset,
     const byte* input_start,
     const byte* input_end,
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index 5943206..5684239 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -89,6 +89,7 @@
   char* ArchiveStack(char* to);
   char* RestoreStack(char* from);
   void FreeThreadResources() { thread_local_.Free(); }
+
  private:
   RegExpStack();
   ~RegExpStack();
diff --git a/src/regexp.js b/src/regexp.js
index f68dee6..38d4496 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -50,24 +50,29 @@
   var global = false;
   var ignoreCase = false;
   var multiline = false;
-
   for (var i = 0; i < flags.length; i++) {
     var c = %_CallFunction(flags, i, StringCharAt);
     switch (c) {
       case 'g':
-        // Allow duplicate flags to be consistent with JSC and others.
+        if (global) {
+          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
+        }
         global = true;
         break;
       case 'i':
+        if (ignoreCase) {
+          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
+        }
         ignoreCase = true;
         break;
       case 'm':
+        if (multiline) {
+          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
+        }
         multiline = true;
         break;
       default:
-        // Ignore flags that have no meaning to be consistent with
-        // JSC.
-        break;
+        throw MakeSyntaxError("invalid_regexp_flags", [flags]);
     }
   }
 
@@ -235,7 +240,7 @@
   // Conversion is required by the ES5 specification (RegExp.prototype.exec
   // algorithm, step 5) even if the value is discarded for non-global RegExps.
   var i = TO_INTEGER(lastIndex);
-  
+
   if (this.global) {
     if (i < 0 || i > string.length) {
       this.lastIndex = 0;
@@ -250,11 +255,11 @@
     }
     lastMatchInfoOverride = null;
     this.lastIndex = lastMatchInfo[CAPTURE1];
-    return true;    
+    return true;
   } else {
     // Non-global regexp.
-    // Remove irrelevant preceeding '.*' in a non-global test regexp. 
-    // The expression checks whether this.source starts with '.*' and 
+    // Remove irrelevant preceeding '.*' in a non-global test regexp.
+    // The expression checks whether this.source starts with '.*' and
     // that the third char is not a '?'.
     if (%_StringCharCodeAt(this.source, 0) == 46 &&  // '.'
         %_StringCharCodeAt(this.source, 1) == 42 &&  // '*'
@@ -262,14 +267,14 @@
       if (!%_ObjectEquals(regexp_key, this)) {
         regexp_key = this;
         regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
-                                 (!this.ignoreCase 
+                                 (!this.ignoreCase
                                   ? !this.multiline ? "" : "m"
                                   : !this.multiline ? "i" : "im"));
       }
       if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
         return false;
       }
-    }    
+    }
     %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
     var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
@@ -400,7 +405,8 @@
 
 // -------------------------------------------------------------------
 
-function SetupRegExp() {
+function SetUpRegExp() {
+  %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
   %FunctionSetPrototype($RegExp, new $Object());
   %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
@@ -479,5 +485,4 @@
   }
 }
 
-
-SetupRegExp();
+SetUpRegExp();
diff --git a/src/rewriter.cc b/src/rewriter.cc
index efe8044..3d4c2dc 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -66,9 +66,13 @@
 
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
-    VariableProxy* result_proxy = new VariableProxy(result_);
-    return new Assignment(Token::ASSIGN, result_proxy, value,
-                          RelocInfo::kNoPosition);
+    Zone* zone = isolate()->zone();
+    VariableProxy* result_proxy = new(zone) VariableProxy(isolate(), result_);
+    return new(zone) Assignment(isolate(),
+                                Token::ASSIGN,
+                                result_proxy,
+                                value,
+                                RelocInfo::kNoPosition);
   }
 
   // Node visitors.
@@ -193,141 +197,25 @@
 }
 
 
+void Processor::VisitWithStatement(WithStatement* node) {
+  bool set_after_body = is_set_;
+  Visit(node->statement());
+  is_set_ = is_set_ && set_after_body;
+}
+
+
 // Do nothing:
 void Processor::VisitDeclaration(Declaration* node) {}
 void Processor::VisitEmptyStatement(EmptyStatement* node) {}
 void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
-void Processor::VisitWithExitStatement(WithExitStatement* node) {}
 void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
 
 
 // Expressions are never visited yet.
-void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitConditional(Conditional* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitVariableProxy(VariableProxy* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitLiteral(Literal* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitArrayLiteral(ArrayLiteral* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitObjectLiteral(ObjectLiteral* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitAssignment(Assignment* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitThrow(Throw* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitProperty(Property* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCall(Call* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCallNew(CallNew* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCallRuntime(CallRuntime* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitUnaryOperation(UnaryOperation* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCountOperation(CountOperation* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitBinaryOperation(BinaryOperation* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCompareOperation(CompareOperation* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitCompareToNull(CompareToNull* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
-void Processor::VisitThisFunction(ThisFunction* node) {
-  USE(node);
-  UNREACHABLE();
-}
+#define DEF_VISIT(type)                                         \
+  void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
 
 
 // Assumes code has been parsed and scopes have been analyzed.  Mutates the
@@ -337,7 +225,7 @@
   ASSERT(function != NULL);
   Scope* scope = function->scope();
   ASSERT(scope != NULL);
-  if (scope->is_function_scope()) return true;
+  if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
@@ -348,8 +236,10 @@
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
-      VariableProxy* result_proxy = new VariableProxy(result);
-      body->Add(new ReturnStatement(result_proxy));
+      Isolate* isolate = info->isolate();
+      Zone* zone = isolate->zone();
+      VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
+      body->Add(new(zone) ReturnStatement(result_proxy));
     }
   }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 97f0341..26d8846 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,32 +43,6 @@
 namespace internal {
 
 
-class PendingListNode : public Malloced {
- public:
-  explicit PendingListNode(JSFunction* function);
-  ~PendingListNode() { Destroy(); }
-
-  PendingListNode* next() const { return next_; }
-  void set_next(PendingListNode* node) { next_ = node; }
-  Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
-
-  // If the function is garbage collected before we've had the chance
-  // to optimize it the weak handle will be null.
-  bool IsValid() { return !function_.is_null(); }
-
-  // Returns the number of microseconds this node has been pending.
-  int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
-
- private:
-  void Destroy();
-  static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
-
-  PendingListNode* next_;
-  Handle<Object> function_;  // Weak handle.
-  int64_t start_;
-};
-
-
 // Optimization sampler constants.
 static const int kSamplerFrameCount = 2;
 static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
@@ -80,39 +54,14 @@
 static const int kSamplerThresholdDelta = 1;
 
 static const int kSamplerThresholdSizeFactorInit = 3;
-static const int kSamplerThresholdSizeFactorMin = 1;
-static const int kSamplerThresholdSizeFactorDelta = 1;
 
 static const int kSizeLimit = 1500;
 
 
-PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
-  GlobalHandles* global_handles = Isolate::Current()->global_handles();
-  function_ = global_handles->Create(function);
-  start_ = OS::Ticks();
-  global_handles->MakeWeak(function_.location(), this, &WeakCallback);
-}
-
-
-void PendingListNode::Destroy() {
-  if (!IsValid()) return;
-  GlobalHandles* global_handles = Isolate::Current()->global_handles();
-  global_handles->Destroy(function_.location());
-  function_= Handle<Object>::null();
-}
-
-
-void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
-  reinterpret_cast<PendingListNode*>(data)->Destroy();
-}
-
-
 Atomic32 RuntimeProfiler::state_ = 0;
 // TODO(isolates): Create the semaphore lazily and clean it up when no
 // longer required.
-#ifdef ENABLE_LOGGING_AND_PROFILING
 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
-#endif
 
 #ifdef DEBUG
 bool RuntimeProfiler::has_been_globally_setup_ = false;
@@ -125,16 +74,8 @@
       sampler_threshold_(kSamplerThresholdInit),
       sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
       sampler_ticks_until_threshold_adjustment_(
-        kSamplerTicksBetweenThresholdAdjustment),
-      js_ratio_(0),
-      sampler_window_position_(0),
-      optimize_soon_list_(NULL),
-      state_window_position_(0),
-      state_window_ticks_(0) {
-  state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
-  state_counts_[IN_JS_STATE] = 0;
-  STATIC_ASSERT(IN_NON_JS_STATE == 0);
-  memset(state_window_, 0, sizeof(state_window_));
+          kSamplerTicksBetweenThresholdAdjustment),
+      sampler_window_position_(0) {
   ClearSampleBuffer();
 }
 
@@ -148,15 +89,13 @@
 }
 
 
-void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
+void RuntimeProfiler::Optimize(JSFunction* function) {
   ASSERT(function->IsOptimizable());
   if (FLAG_trace_opt) {
-    PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
+    PrintF("[marking ");
     function->PrintName();
+    PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
     PrintF(" for recompilation");
-    if (delay > 0) {
-      PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
-    }
     PrintF("]\n");
   }
 
@@ -170,21 +109,19 @@
   // Debug::has_break_points().
   ASSERT(function->IsMarkedForLazyRecompilation());
   if (!FLAG_use_osr ||
-      isolate_->debug()->has_break_points() ||
+      isolate_->DebuggerHasBreakPoints() ||
       function->IsBuiltin()) {
     return;
   }
 
   SharedFunctionInfo* shared = function->shared();
-  // If the code is not optimizable or references context slots, don't try OSR.
-  if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
-    return;
-  }
+  // If the code is not optimizable, don't try OSR.
+  if (!shared->code()->optimizable()) return;
 
   // We are not prepared to do OSR for a function that already has an
   // allocated arguments object.  The optimized code would bypass it for
   // arguments accesses, which is unsound.  Don't try OSR.
-  if (shared->scope_info()->HasArgumentsShadow()) return;
+  if (shared->uses_arguments()) return;
 
   // We're using on-stack replacement: patch the unoptimized code so that
   // any back edge in any unoptimized frame will trigger on-stack
@@ -242,20 +179,6 @@
 
 void RuntimeProfiler::OptimizeNow() {
   HandleScope scope(isolate_);
-  PendingListNode* current = optimize_soon_list_;
-  while (current != NULL) {
-    PendingListNode* next = current->next();
-    if (current->IsValid()) {
-      Handle<JSFunction> function = current->function();
-      int delay = current->Delay();
-      if (function->IsOptimizable()) {
-        Optimize(*function, true, delay);
-      }
-    }
-    delete current;
-    current = next;
-  }
-  optimize_soon_list_ = NULL;
 
   // Run through the JavaScript frames and collect them. If we already
   // have a sample of the function, we mark it for optimizations
@@ -302,24 +225,9 @@
         : 1;
 
     int threshold = sampler_threshold_ * threshold_size_factor;
-    int current_js_ratio = NoBarrier_Load(&js_ratio_);
-
-    // Adjust threshold depending on the ratio of time spent
-    // in JS code.
-    if (current_js_ratio < 20) {
-      // If we spend less than 20% of the time in JS code,
-      // do not optimize.
-      continue;
-    } else if (current_js_ratio < 75) {
-      // Below 75% of time spent in JS code, only optimize very
-      // frequently used functions.
-      threshold *= 3;
-    }
 
     if (LookupSample(function) >= threshold) {
-      Optimize(function, false, 0);
-      isolate_->compilation_cache()->MarkForEagerOptimizing(
-          Handle<JSFunction>(function));
+      Optimize(function);
     }
   }
 
@@ -332,42 +240,8 @@
 }
 
 
-void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
-  if (!function->IsOptimizable()) return;
-  PendingListNode* node = new PendingListNode(function);
-  node->set_next(optimize_soon_list_);
-  optimize_soon_list_ = node;
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
-  SamplerState old_state = state_window_[state_window_position_];
-  state_counts_[old_state]--;
-  state_window_[state_window_position_] = current_state;
-  state_counts_[current_state]++;
-  ASSERT(IsPowerOf2(kStateWindowSize));
-  state_window_position_ = (state_window_position_ + 1) &
-      (kStateWindowSize - 1);
-  // Note: to calculate correct ratio we have to track how many valid
-  // ticks are actually in the state window, because on profiler
-  // startup this number can be less than the window size.
-  state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
-  NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
-                  state_window_ticks_);
-}
-#endif
-
-
 void RuntimeProfiler::NotifyTick() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  // Record state sample.
-  SamplerState state = IsSomeIsolateInJS()
-      ? IN_JS_STATE
-      : IN_NON_JS_STATE;
-  UpdateStateRatio(state);
   isolate_->stack_guard()->RequestRuntimeProfilerTick();
-#endif
 }
 
 
@@ -415,7 +289,6 @@
 
 
 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // The profiler thread must still be waiting.
   ASSERT(NoBarrier_Load(&state_) >= 0);
   // In IsolateEnteredJS we have already incremented the counter and
@@ -423,8 +296,6 @@
   // to get the right count of active isolates.
   NoBarrier_AtomicIncrement(&state_, 1);
   semaphore_->Signal();
-  isolate->ResetEagerOptimizingData();
-#endif
 }
 
 
@@ -434,20 +305,33 @@
 
 
 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
   if (old_state != 0) return false;
   semaphore_->Wait();
-#endif
   return true;
 }
 
 
-void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  semaphore_->Signal();
-#endif
+void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
+  // Do a fake increment. If the profiler is waiting on the semaphore,
+  // the returned state is 0, which can be left as an initial state in
+  // case profiling is restarted later. If the profiler is not
+  // waiting, the increment will prevent it from waiting, but has to
+  // be undone after the profiler is stopped.
+  Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
+  ASSERT(new_state >= 0);
+  if (new_state == 0) {
+    // The profiler thread is waiting. Wake it up. It must check for
+    // stop conditions before attempting to wait again.
+    semaphore_->Signal();
+  }
+  thread->Join();
+  // The profiler thread is now stopped. Undo the increment in case it
+  // was not waiting.
+  if (new_state != 0) {
+    NoBarrier_AtomicIncrement(&state_, -1);
+  }
 }
 
 
@@ -469,18 +353,9 @@
 
 
 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  static const int kNonJSTicksThreshold = 100;
-  if (RuntimeProfiler::IsSomeIsolateInJS()) {
-    non_js_ticks_ = 0;
-  } else {
-    if (non_js_ticks_ < kNonJSTicksThreshold) {
-      ++non_js_ticks_;
-    } else {
-      return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
-    }
+  if (!RuntimeProfiler::IsSomeIsolateInJS()) {
+    return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
   }
-#endif
   return false;
 }
 
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 692b4ff..15c2097 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -37,7 +37,6 @@
 class Isolate;
 class JSFunction;
 class Object;
-class PendingListNode;
 class Semaphore;
 
 class RuntimeProfiler {
@@ -52,7 +51,6 @@
   }
 
   void OptimizeNow();
-  void OptimizeSoon(JSFunction* function);
 
   void NotifyTick();
 
@@ -86,10 +84,9 @@
   static bool IsSomeIsolateInJS();
   static bool WaitForSomeIsolateToEnterJS();
 
-  // When shutting down we join the profiler thread. Doing so while
-  // it's waiting on a semaphore will cause a deadlock, so we have to
-  // wake it up first.
-  static void WakeUpRuntimeProfilerThreadBeforeShutdown();
+  // Stops the runtime profiler thread when profiling support is being
+  // turned off.
+  static void StopRuntimeProfilerThreadBeforeShutdown(Thread* thread);
 
   void UpdateSamplesAfterScavenge();
   void RemoveDeadSamples();
@@ -97,16 +94,10 @@
 
  private:
   static const int kSamplerWindowSize = 16;
-  static const int kStateWindowSize = 128;
-
-  enum SamplerState {
-    IN_NON_JS_STATE = 0,
-    IN_JS_STATE = 1
-  };
 
   static void HandleWakeUp(Isolate* isolate);
 
-  void Optimize(JSFunction* function, bool eager, int delay);
+  void Optimize(JSFunction* function);
 
   void AttemptOnStackReplacement(JSFunction* function);
 
@@ -118,31 +109,16 @@
 
   void AddSample(JSFunction* function, int weight);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  void UpdateStateRatio(SamplerState current_state);
-#endif
-
   Isolate* isolate_;
 
   int sampler_threshold_;
   int sampler_threshold_size_factor_;
   int sampler_ticks_until_threshold_adjustment_;
 
-  // The ratio of ticks spent in JS code in percent.
-  Atomic32 js_ratio_;
-
   Object* sampler_window_[kSamplerWindowSize];
   int sampler_window_position_;
   int sampler_window_weight_[kSamplerWindowSize];
 
-  // Support for pending 'optimize soon' requests.
-  PendingListNode* optimize_soon_list_;
-
-  SamplerState state_window_[kStateWindowSize];
-  int state_window_position_;
-  int state_window_ticks_;
-  int state_counts_[2];
-
   // Possible state values:
   //   -1            => the profiler thread is waiting on the semaphore
   //   0 or positive => the number of isolates running JavaScript code.
@@ -159,7 +135,7 @@
 // Rate limiter intended to be used in the profiler thread.
 class RuntimeProfilerRateLimiter BASE_EMBEDDED {
  public:
-  RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
+  RuntimeProfilerRateLimiter() {}
 
   // Suspends the current thread (which must be the profiler thread)
   // when not executing JavaScript to minimize CPU usage. Returns
@@ -170,8 +146,6 @@
   bool SuspendIfNecessary();
 
  private:
-  int non_js_ticks_;
-
   DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
 };
 
diff --git a/src/runtime.cc b/src/runtime.cc
index 855bd41..b1c4c10 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -32,6 +32,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "arguments.h"
+#include "bootstrapper.h"
 #include "codegen.h"
 #include "compilation-cache.h"
 #include "compiler.h"
@@ -42,17 +43,20 @@
 #include "execution.h"
 #include "global-handles.h"
 #include "jsregexp.h"
+#include "json-parser.h"
 #include "liveedit.h"
 #include "liveobjectlist-inl.h"
+#include "misc-intrinsics.h"
 #include "parser.h"
 #include "platform.h"
-#include "runtime.h"
 #include "runtime-profiler.h"
+#include "runtime.h"
 #include "scopeinfo.h"
-#include "smart-pointer.h"
+#include "smart-array-pointer.h"
+#include "string-search.h"
 #include "stub-cache.h"
 #include "v8threads.h"
-#include "string-search.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -79,19 +83,19 @@
   RUNTIME_ASSERT(obj->IsBoolean());                                   \
   bool name = (obj)->IsTrue();
 
-// Cast the given object to a Smi and store its value in an int variable
-// with the given name.  If the object is not a Smi call IllegalOperation
+// Cast the given argument to a Smi and store its value in an int variable
+// with the given name.  If the argument is not a Smi call IllegalOperation
 // and return.
-#define CONVERT_SMI_CHECKED(name, obj)                            \
-  RUNTIME_ASSERT(obj->IsSmi());                                   \
-  int name = Smi::cast(obj)->value();
+#define CONVERT_SMI_ARG_CHECKED(name, index)                         \
+  RUNTIME_ASSERT(args[index]->IsSmi());                              \
+  int name = args.smi_at(index);
 
-// Cast the given object to a double and store it in a variable with
-// the given name.  If the object is not a number (as opposed to
+// Cast the given argument to a double and store it in a variable with
+// the given name.  If the argument is not a number (as opposed to
 // the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_CHECKED(name, obj)                            \
-  RUNTIME_ASSERT(obj->IsNumber());                                   \
-  double name = (obj)->Number();
+#define CONVERT_DOUBLE_ARG_CHECKED(name, index)                      \
+  RUNTIME_ASSERT(args[index]->IsNumber());                           \
+  double name = args.number_at(index);
 
 // Call the specified converter on the object *comand store the result in
 // a variable of the specified type with the given name.  If the
@@ -173,7 +177,7 @@
   // Pixel elements cannot be created using an object literal.
   ASSERT(!copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
-    case JSObject::FAST_ELEMENTS: {
+    case FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
       if (elements->map() == heap->fixed_cow_array_map()) {
         isolate->counters()->cow_arrays_created_runtime()->Increment();
@@ -197,8 +201,8 @@
       }
       break;
     }
-    case JSObject::DICTIONARY_ELEMENTS: {
-      NumberDictionary* element_dictionary = copy->element_dictionary();
+    case DICTIONARY_ELEMENTS: {
+      SeededNumberDictionary* element_dictionary = copy->element_dictionary();
       int capacity = element_dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Object* k = element_dictionary->KeyAt(i);
@@ -216,8 +220,20 @@
       }
       break;
     }
-    default:
-      UNREACHABLE();
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNIMPLEMENTED();
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+      // No contained objects, nothing to do.
       break;
   }
   return copy;
@@ -480,7 +496,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
   Handle<Object> object =
@@ -497,9 +513,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
-  CONVERT_SMI_CHECKED(flags, args[3]);
+  CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
 
@@ -523,9 +539,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
-  CONVERT_SMI_CHECKED(flags, args[3]);
+  CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
 
@@ -549,7 +565,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
@@ -568,7 +584,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
@@ -587,27 +603,106 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(String, key, args[0]);
-  Object* value = args[1];
-  // Create a catch context extension object.
-  JSFunction* constructor =
-      isolate->context()->global_context()->
-          context_extension_function();
-  Object* object;
-  { MaybeObject* maybe_object = isolate->heap()->AllocateJSObject(constructor);
-    if (!maybe_object->ToObject(&object)) return maybe_object;
-  }
-  // Assign the exception value to the catch variable and make sure
-  // that the catch variable is DontDelete.
-  { MaybeObject* maybe_value =
-        // Passing non-strict per ECMA-262 5th Ed. 12.14. Catch, bullet #4.
-        JSObject::cast(object)->SetProperty(
-            key, value, DONT_DELETE, kNonStrictMode);
-    if (!maybe_value->ToObject(&value)) return maybe_value;
-  }
-  return object;
+  Object* handler = args[0];
+  Object* prototype = args[1];
+  Object* used_prototype =
+      prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
+  return isolate->heap()->AllocateJSProxy(handler, used_prototype);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
+  ASSERT(args.length() == 4);
+  Object* handler = args[0];
+  Object* call_trap = args[1];
+  Object* construct_trap = args[2];
+  Object* prototype = args[3];
+  Object* used_prototype =
+      prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
+  return isolate->heap()->AllocateJSFunctionProxy(
+      handler, call_trap, construct_trap, used_prototype);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
+  ASSERT(args.length() == 1);
+  Object* obj = args[0];
+  return isolate->heap()->ToBoolean(obj->IsJSProxy());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
+  ASSERT(args.length() == 1);
+  Object* obj = args[0];
+  return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSProxy, proxy, args[0]);
+  return proxy->handler();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
+  return proxy->call_trap();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
+  return proxy->construct_trap();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSProxy, proxy, args[0]);
+  proxy->Fix();
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  ASSERT(weakmap->map()->inobject_properties() == 0);
+  Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+  weakmap->set_table(*table);
+  weakmap->set_next(Smi::FromInt(0));
+  return *weakmap;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
+  // because they cannot be cast to JSObject to get an identity hash code.
+  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  return weakmap->table()->Lookup(*key);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  // TODO(mstarzinger): See Runtime_WeakMapGet above.
+  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  Handle<Object> value(args[2]);
+  Handle<ObjectHashTable> table(weakmap->table());
+  Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+  weakmap->set_table(*new_table);
+  return *value;
 }
 
 
@@ -620,6 +715,28 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSReceiver, input_obj, args[0]);
+  Object* obj = input_obj;
+  // We don't expect access checks to be needed on JSProxy objects.
+  ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
+  do {
+    if (obj->IsAccessCheckNeeded() &&
+        !isolate->MayNamedAccess(JSObject::cast(obj),
+                                 isolate->heap()->Proto_symbol(),
+                                 v8::ACCESS_GET)) {
+      isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+      return isolate->heap()->undefined_value();
+    }
+    obj = obj->GetPrototype();
+  } while (obj->IsJSObject() &&
+           JSObject::cast(obj)->map()->is_hidden_prototype());
+  return obj;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -860,9 +977,15 @@
           ASSERT(proto->IsJSGlobalObject());
           holder = Handle<JSObject>(JSObject::cast(proto));
         }
-        NumberDictionary* dictionary = holder->element_dictionary();
+        FixedArray* elements = FixedArray::cast(holder->elements());
+        SeededNumberDictionary* dictionary = NULL;
+        if (elements->map() == heap->non_strict_arguments_elements_map()) {
+          dictionary = SeededNumberDictionary::cast(elements->get(1));
+        } else {
+          dictionary = SeededNumberDictionary::cast(elements);
+        }
         int entry = dictionary->FindEntry(index);
-        ASSERT(entry != NumberDictionary::kNotFound);
+        ASSERT(entry != SeededNumberDictionary::kNotFound);
         PropertyDetails details = dictionary->DetailsAt(entry);
         switch (details.type()) {
           case CALLBACKS: {
@@ -959,8 +1082,7 @@
     ASSERT(proto->IsJSGlobalObject());
     obj = JSObject::cast(proto);
   }
-  return obj->map()->is_extensible() ? isolate->heap()->true_value()
-                                     : isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean(obj->map()->is_extensible());
 }
 
 
@@ -1026,8 +1148,7 @@
     Map::cast(new_map)->set_is_access_check_needed(false);
     object->set_map(Map::cast(new_map));
   }
-  return needs_access_checks ? isolate->heap()->true_value()
-                             : isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean(needs_access_checks);
 }
 
 
@@ -1063,23 +1184,14 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
-  ASSERT(args.length() == 4);
+  ASSERT(args.length() == 3);
   HandleScope scope(isolate);
   Handle<GlobalObject> global = Handle<GlobalObject>(
       isolate->context()->global());
 
   Handle<Context> context = args.at<Context>(0);
   CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
-  bool is_eval = Smi::cast(args[2])->value() == 1;
-  StrictModeFlag strict_mode =
-      static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
-
-  // Compute the property attributes. According to ECMA-262, section
-  // 13, page 71, the property must be read-only and
-  // non-deletable. However, neither SpiderMonkey nor KJS creates the
-  // property as read-only, so we don't either.
-  PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
+  CONVERT_SMI_ARG_CHECKED(flags, 2);
 
   // Traverse the name/value pairs and set the properties.
   int length = pairs->length();
@@ -1092,7 +1204,7 @@
     // assign to it when evaluating the assignment for "const x =
     // <expr>" the initial value is the hole.
     bool is_const_property = value->IsTheHole();
-
+    bool is_function_declaration = false;
     if (value->IsUndefined() || is_const_property) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
@@ -1141,6 +1253,7 @@
         }
       }
     } else {
+      is_function_declaration = true;
       // Copy the function and update its context. Use it as value.
       Handle<SharedFunctionInfo> shared =
           Handle<SharedFunctionInfo>::cast(value);
@@ -1154,10 +1267,6 @@
     LookupResult lookup;
     global->LocalLookup(*name, &lookup);
 
-    PropertyAttributes attributes = is_const_property
-        ? static_cast<PropertyAttributes>(base | READ_ONLY)
-        : base;
-
     // There's a local property that we need to overwrite because
     // we're either declaring a function or there's an interceptor
     // that claims the property is absent.
@@ -1172,6 +1281,19 @@
       return ThrowRedeclarationError(isolate, type, name);
     }
 
+    // Compute the property attributes. According to ECMA-262, section
+    // 13, page 71, the property must be read-only and
+    // non-deletable. However, neither SpiderMonkey nor KJS creates the
+    // property as read-only, so we don't either.
+    int attr = NONE;
+    if ((flags & kDeclareGlobalsEvalFlag) == 0) {
+      attr |= DONT_DELETE;
+    }
+    bool is_native = (flags & kDeclareGlobalsNativeFlag) != 0;
+    if (is_const_property || (is_native && is_function_declaration)) {
+      attr |= READ_ONLY;
+    }
+
     // Safari does not allow the invocation of callback setters for
     // function declarations. To mimic this behavior, we do not allow
     // the invocation of setters for function values. This makes a
@@ -1182,20 +1304,24 @@
     if (value->IsJSFunction()) {
       // Do not change DONT_DELETE to false from true.
       if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
-        attributes = static_cast<PropertyAttributes>(
-            attributes | (lookup.GetAttributes() & DONT_DELETE));
+        attr |= lookup.GetAttributes() & DONT_DELETE;
       }
+      PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
+
       RETURN_IF_EMPTY_HANDLE(isolate,
                              SetLocalPropertyIgnoreAttributes(global,
                                                               name,
                                                               value,
                                                               attributes));
     } else {
+      StrictModeFlag strict_mode =
+          ((flags & kDeclareGlobalsStrictModeFlag) != 0) ? kStrictMode
+                                                         : kNonStrictMode;
       RETURN_IF_EMPTY_HANDLE(isolate,
                              SetProperty(global,
                                          name,
                                          value,
-                                         attributes,
+                                         static_cast<PropertyAttributes>(attr),
                                          strict_mode));
     }
   }
@@ -1211,19 +1337,19 @@
 
   CONVERT_ARG_CHECKED(Context, context, 0);
   Handle<String> name(String::cast(args[1]));
-  PropertyAttributes mode =
-      static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
+  PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
   RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3], isolate);
 
-  // Declarations are always done in the function context.
-  context = Handle<Context>(context->fcontext());
+  // Declarations are always done in a function or global context.
+  context = Handle<Context>(context->declaration_context());
 
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
+  BindingFlags binding_flags;
   Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes);
+      context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (attributes != ABSENT) {
     // The name was declared before; check for conflicting
@@ -1271,7 +1397,7 @@
     Handle<JSObject> context_ext;
     if (context->has_extension()) {
       // The function context's extension context exists - use it.
-      context_ext = Handle<JSObject>(context->extension());
+      context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
     } else {
       // The function context's extension context does not exists - allocate
       // it.
@@ -1325,8 +1451,7 @@
   CONVERT_ARG_CHECKED(String, name, 0);
   GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
-  StrictModeFlag strict_mode =
-      static_cast<StrictModeFlag>(Smi::cast(args[1])->value());
+  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
   ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
 
   // According to ECMA-262, section 12.2, page 62, the property must
@@ -1505,14 +1630,15 @@
   CONVERT_ARG_CHECKED(Context, context, 1);
   Handle<String> name(String::cast(args[2]));
 
-  // Initializations are always done in the function context.
-  context = Handle<Context>(context->fcontext());
+  // Initializations are always done in a function or global context.
+  context = Handle<Context>(context->declaration_context());
 
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = FOLLOW_CHAINS;
+  BindingFlags binding_flags;
   Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes);
+      context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   // In most situations, the property introduced by the const
   // declaration should be present in the context extension object.
@@ -1527,14 +1653,12 @@
   // In that case, the initialization behaves like a normal assignment
   // to property 'x'.
   if (index >= 0) {
-    // Property was found in a context.
     if (holder->IsContext()) {
-      // The holder cannot be the function context.  If it is, there
-      // should have been a const redeclaration error when declaring
-      // the const property.
-      ASSERT(!holder.is_identical_to(context));
-      if ((attributes & READ_ONLY) == 0) {
-        Handle<Context>::cast(holder)->set(index, *value);
+      // Property was found in a context.  Perform the assignment if we
+      // found some non-constant or an uninitialized constant.
+      Handle<Context> context = Handle<Context>::cast(holder);
+      if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+        context->set(index, *value);
       }
     } else {
       // The holder is an arguments object.
@@ -1608,7 +1732,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
-  CONVERT_SMI_CHECKED(properties, args[1]);
+  CONVERT_SMI_ARG_CHECKED(properties, 1);
   if (object->HasFastProperties()) {
     NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
   }
@@ -1623,7 +1747,7 @@
   CONVERT_ARG_CHECKED(String, subject, 1);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
-  CONVERT_SMI_CHECKED(index, args[2]);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
   CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
   RUNTIME_ASSERT(last_match_info->HasFastElements());
   RUNTIME_ASSERT(index >= 0);
@@ -1640,8 +1764,10 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
   ASSERT(args.length() == 3);
-  CONVERT_SMI_CHECKED(elements_count, args[0]);
-  if (elements_count > JSArray::kMaxFastElementsLength) {
+  CONVERT_SMI_ARG_CHECKED(elements_count, 0);
+  if (elements_count < 0 ||
+      elements_count > FixedArray::kMaxLength ||
+      !Smi::IsValid(elements_count)) {
     return isolate->ThrowIllegalOperation();
   }
   Object* new_object;
@@ -1783,10 +1909,19 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
-  // Returns a real global receiver, not one of builtins object.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
+  NoHandleAllocation handle_free;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+  SharedFunctionInfo* shared = function->shared();
+  if (shared->native() || shared->strict_mode()) {
+    return isolate->heap()->undefined_value();
+  }
+  // Returns undefined for strict or native functions, or
+  // the associated global receiver for "normal" functions.
+
   Context* global_context =
-      isolate->context()->global()->global_context();
+      function->context()->global()->global_context();
   return global_context->global()->global_receiver();
 }
 
@@ -1795,7 +1930,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  int index = Smi::cast(args[1])->value();
+  int index = args.smi_at(1);
   Handle<String> pattern = args.at<String>(2);
   Handle<String> flags = args.at<String>(3);
 
@@ -1841,6 +1976,33 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return isolate->heap()->ToBoolean(
+      f->shared()->name_should_print_as_anonymous());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  f->shared()->set_name_should_print_as_anonymous(true);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  fun->shared()->set_bound(true);
+  return isolate->heap()->undefined_value();
+}
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
@@ -1919,6 +2081,24 @@
 }
 
 
+// Creates a local, readonly, property called length with the correct
+// length (when read by the user). This effectively overwrites the
+// interceptor used to normally provide the length.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionSetLength) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Smi, length, args[1]);
+  MaybeObject* maybe_name =
+      isolate->heap()->AllocateStringFromAscii(CStrVector("length"));
+  String* name;
+  if (!maybe_name->To(&name)) return maybe_name;
+  PropertyAttributes attr =
+      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+  return fun->AddProperty(name, length, attr, kNonStrictMode);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -1934,13 +2114,67 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+
+  MaybeObject* maybe_name =
+      isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
+  String* name;
+  if (!maybe_name->To(&name)) return maybe_name;
+
+  if (function->HasFastProperties()) {
+    // Construct a new field descriptor with updated attributes.
+    DescriptorArray* instance_desc = function->map()->instance_descriptors();
+    int index = instance_desc->Search(name);
+    ASSERT(index != DescriptorArray::kNotFound);
+    PropertyDetails details(instance_desc->GetDetails(index));
+    CallbacksDescriptor new_desc(name,
+        instance_desc->GetValue(index),
+        static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
+        details.index());
+    // Construct a new field descriptors array containing the new descriptor.
+    Object* descriptors_unchecked;
+    { MaybeObject* maybe_descriptors_unchecked =
+        instance_desc->CopyInsert(&new_desc, REMOVE_TRANSITIONS);
+      if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
+        return maybe_descriptors_unchecked;
+      }
+    }
+    DescriptorArray* new_descriptors =
+        DescriptorArray::cast(descriptors_unchecked);
+    // Create a new map featuring the new field descriptors array.
+    Object* map_unchecked;
+    { MaybeObject* maybe_map_unchecked = function->map()->CopyDropDescriptors();
+      if (!maybe_map_unchecked->ToObject(&map_unchecked)) {
+        return maybe_map_unchecked;
+      }
+    }
+    Map* new_map = Map::cast(map_unchecked);
+    new_map->set_instance_descriptors(new_descriptors);
+    function->set_map(new_map);
+  } else {  // Dictionary properties.
+    // Directly manipulate the property details.
+    int entry = function->property_dictionary()->FindEntry(name);
+    ASSERT(entry != StringDictionary::kNotFound);
+    PropertyDetails details = function->property_dictionary()->DetailsAt(entry);
+    PropertyDetails new_details(
+        static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
+        details.type(),
+        details.index());
+    function->property_dictionary()->DetailsAtPut(entry, new_details);
+  }
+  return function;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSFunction, f, args[0]);
-  return f->shared()->IsApiFunction() ? isolate->heap()->true_value()
-                                      : isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
 }
 
 
@@ -1949,8 +2183,7 @@
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSFunction, f, args[0]);
-  return f->IsBuiltin() ? isolate->heap()->true_value() :
-                          isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean(f->IsBuiltin());
 }
 
 
@@ -2010,6 +2243,11 @@
     // are guaranteed to be in old space.
     target->set_literals(*literals, SKIP_WRITE_BARRIER);
     target->set_next_function_link(isolate->heap()->undefined_value());
+
+    if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
+      isolate->logger()->LogExistingFunction(
+          shared, Handle<Code>(shared->code()));
+    }
   }
 
   target->set_context(*context);
@@ -2021,7 +2259,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  CONVERT_SMI_CHECKED(num, args[1]);
+  CONVERT_SMI_ARG_CHECKED(num, 1);
   RUNTIME_ASSERT(num >= 0);
   SetExpectedNofProperties(function, num);
   return isolate->heap()->undefined_value();
@@ -2245,24 +2483,24 @@
 
     Handle<String> joined_string;
     if (is_ascii_) {
-      joined_string = NewRawAsciiString(character_count_);
+      Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
       AssertNoAllocation no_alloc;
-      SeqAsciiString* seq = SeqAsciiString::cast(*joined_string);
       char* char_buffer = seq->GetChars();
       StringBuilderConcatHelper(*subject_,
                                 char_buffer,
                                 *array_builder_.array(),
                                 array_builder_.length());
+      joined_string = Handle<String>::cast(seq);
     } else {
       // Non-ASCII.
-      joined_string = NewRawTwoByteString(character_count_);
+      Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
       AssertNoAllocation no_alloc;
-      SeqTwoByteString* seq = SeqTwoByteString::cast(*joined_string);
       uc16* char_buffer = seq->GetChars();
       StringBuilderConcatHelper(*subject_,
                                 char_buffer,
                                 *array_builder_.array(),
                                 array_builder_.length());
+      joined_string = Handle<String>::cast(seq);
     }
     return joined_string;
   }
@@ -2280,15 +2518,13 @@
   }
 
  private:
-  Handle<String> NewRawAsciiString(int size) {
-    CALL_HEAP_FUNCTION(heap_->isolate(),
-                       heap_->AllocateRawAsciiString(size), String);
+  Handle<SeqAsciiString> NewRawAsciiString(int length) {
+    return heap_->isolate()->factory()->NewRawAsciiString(length);
   }
 
 
-  Handle<String> NewRawTwoByteString(int size) {
-    CALL_HEAP_FUNCTION(heap_->isolate(),
-                       heap_->AllocateRawTwoByteString(size), String);
+  Handle<SeqTwoByteString> NewRawTwoByteString(int length) {
+    return heap_->isolate()->factory()->NewRawTwoByteString(length);
   }
 
 
@@ -2309,7 +2545,7 @@
 class CompiledReplacement {
  public:
   CompiledReplacement()
-      : parts_(1), replacement_substrings_(0) {}
+      : parts_(1), replacement_substrings_(0), simple_hint_(false) {}
 
   void Compile(Handle<String> replacement,
                int capture_count,
@@ -2324,6 +2560,11 @@
   int parts() {
     return parts_.length();
   }
+
+  bool simple_hint() {
+    return simple_hint_;
+  }
+
  private:
   enum PartType {
     SUBJECT_PREFIX = 1,
@@ -2382,7 +2623,7 @@
   };
 
   template<typename Char>
-  static void ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+  static bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
                                       Vector<Char> characters,
                                       int capture_count,
                                       int subject_length) {
@@ -2479,35 +2720,39 @@
     if (length > last) {
       if (last == 0) {
         parts->Add(ReplacementPart::ReplacementString());
+        return true;
       } else {
         parts->Add(ReplacementPart::ReplacementSubString(last, length));
       }
     }
+    return false;
   }
 
   ZoneList<ReplacementPart> parts_;
   ZoneList<Handle<String> > replacement_substrings_;
+  bool simple_hint_;
 };
 
 
 void CompiledReplacement::Compile(Handle<String> replacement,
                                   int capture_count,
                                   int subject_length) {
-  ASSERT(replacement->IsFlat());
-  if (replacement->IsAsciiRepresentation()) {
+  {
     AssertNoAllocation no_alloc;
-    ParseReplacementPattern(&parts_,
-                            replacement->ToAsciiVector(),
-                            capture_count,
-                            subject_length);
-  } else {
-    ASSERT(replacement->IsTwoByteRepresentation());
-    AssertNoAllocation no_alloc;
-
-    ParseReplacementPattern(&parts_,
-                            replacement->ToUC16Vector(),
-                            capture_count,
-                            subject_length);
+    String::FlatContent content = replacement->GetFlatContent();
+    ASSERT(content.IsFlat());
+    if (content.IsAscii()) {
+      simple_hint_ = ParseReplacementPattern(&parts_,
+                                             content.ToAsciiVector(),
+                                             capture_count,
+                                             subject_length);
+    } else {
+      ASSERT(content.IsTwoByte());
+      simple_hint_ = ParseReplacementPattern(&parts_,
+                                             content.ToUC16Vector(),
+                                             capture_count,
+                                             subject_length);
+    }
   }
   Isolate* isolate = replacement->GetIsolate();
   // Find substrings of replacement string and create them as String objects.
@@ -2569,6 +2814,170 @@
 }
 
 
+void FindAsciiStringIndices(Vector<const char> subject,
+                            char pattern,
+                            ZoneList<int>* indices,
+                            unsigned int limit) {
+  ASSERT(limit > 0);
+  // Collect indices of pattern in subject using memchr.
+  // Stop after finding at most limit values.
+  const char* subject_start = reinterpret_cast<const char*>(subject.start());
+  const char* subject_end = subject_start + subject.length();
+  const char* pos = subject_start;
+  while (limit > 0) {
+    pos = reinterpret_cast<const char*>(
+        memchr(pos, pattern, subject_end - pos));
+    if (pos == NULL) return;
+    indices->Add(static_cast<int>(pos - subject_start));
+    pos++;
+    limit--;
+  }
+}
+
+
+template <typename SubjectChar, typename PatternChar>
+void FindStringIndices(Isolate* isolate,
+                       Vector<const SubjectChar> subject,
+                       Vector<const PatternChar> pattern,
+                       ZoneList<int>* indices,
+                       unsigned int limit) {
+  ASSERT(limit > 0);
+  // Collect indices of pattern in subject.
+  // Stop after finding at most limit values.
+  int pattern_length = pattern.length();
+  int index = 0;
+  StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
+  while (limit > 0) {
+    index = search.Search(subject, index);
+    if (index < 0) return;
+    indices->Add(index);
+    index += pattern_length;
+    limit--;
+  }
+}
+
+
+void FindStringIndicesDispatch(Isolate* isolate,
+                               String* subject,
+                               String* pattern,
+                               ZoneList<int>* indices,
+                               unsigned int limit) {
+  {
+    AssertNoAllocation no_gc;
+    String::FlatContent subject_content = subject->GetFlatContent();
+    String::FlatContent pattern_content = pattern->GetFlatContent();
+    ASSERT(subject_content.IsFlat());
+    ASSERT(pattern_content.IsFlat());
+    if (subject_content.IsAscii()) {
+      Vector<const char> subject_vector = subject_content.ToAsciiVector();
+      if (pattern_content.IsAscii()) {
+        Vector<const char> pattern_vector = pattern_content.ToAsciiVector();
+        if (pattern_vector.length() == 1) {
+          FindAsciiStringIndices(subject_vector,
+                                 pattern_vector[0],
+                                 indices,
+                                 limit);
+        } else {
+          FindStringIndices(isolate,
+                            subject_vector,
+                            pattern_vector,
+                            indices,
+                            limit);
+        }
+      } else {
+        FindStringIndices(isolate,
+                          subject_vector,
+                          pattern_content.ToUC16Vector(),
+                          indices,
+                          limit);
+      }
+    } else {
+      Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
+      if (pattern->IsAsciiRepresentation()) {
+        FindStringIndices(isolate,
+                          subject_vector,
+                          pattern_content.ToAsciiVector(),
+                          indices,
+                          limit);
+      } else {
+        FindStringIndices(isolate,
+                          subject_vector,
+                          pattern_content.ToUC16Vector(),
+                          indices,
+                          limit);
+      }
+    }
+  }
+}
+
+
+template<typename ResultSeqString>
+MUST_USE_RESULT static MaybeObject* StringReplaceStringWithString(
+    Isolate* isolate,
+    Handle<String> subject,
+    Handle<JSRegExp> pattern_regexp,
+    Handle<String> replacement) {
+  ASSERT(subject->IsFlat());
+  ASSERT(replacement->IsFlat());
+
+  ZoneScope zone_space(isolate, DELETE_ON_EXIT);
+  ZoneList<int> indices(8);
+  ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
+  String* pattern =
+      String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
+  int subject_len = subject->length();
+  int pattern_len = pattern->length();
+  int replacement_len = replacement->length();
+
+  FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff);
+
+  int matches = indices.length();
+  if (matches == 0) return *subject;
+
+  int result_len = (replacement_len - pattern_len) * matches + subject_len;
+  int subject_pos = 0;
+  int result_pos = 0;
+
+  Handle<ResultSeqString> result;
+  if (ResultSeqString::kHasAsciiEncoding) {
+    result = Handle<ResultSeqString>::cast(
+        isolate->factory()->NewRawAsciiString(result_len));
+  } else {
+    result = Handle<ResultSeqString>::cast(
+        isolate->factory()->NewRawTwoByteString(result_len));
+  }
+
+  for (int i = 0; i < matches; i++) {
+    // Copy non-matched subject content.
+    if (subject_pos < indices.at(i)) {
+      String::WriteToFlat(*subject,
+                          result->GetChars() + result_pos,
+                          subject_pos,
+                          indices.at(i));
+      result_pos += indices.at(i) - subject_pos;
+    }
+
+    // Replace match.
+    if (replacement_len > 0) {
+      String::WriteToFlat(*replacement,
+                          result->GetChars() + result_pos,
+                          0,
+                          replacement_len);
+      result_pos += replacement_len;
+    }
+
+    subject_pos = indices.at(i) + pattern_len;
+  }
+  // Add remaining subject content at the end.
+  if (subject_pos < subject_len) {
+    String::WriteToFlat(*subject,
+                        result->GetChars() + result_pos,
+                        subject_pos,
+                        subject_len);
+  }
+  return *result;
+}
+
 
 MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
     Isolate* isolate,
@@ -2600,7 +3009,7 @@
   int capture_count = regexp_handle->CaptureCount();
 
   // CompiledReplacement uses zone allocation.
-  CompilationZoneScope zone(DELETE_ON_EXIT);
+  ZoneScope zone(isolate, DELETE_ON_EXIT);
   CompiledReplacement compiled_replacement;
   compiled_replacement.Compile(replacement_handle,
                                capture_count,
@@ -2608,6 +3017,20 @@
 
   bool is_global = regexp_handle->GetFlags().is_global();
 
+  // Shortcut for simple non-regexp global replacements
+  if (is_global &&
+      regexp->TypeTag() == JSRegExp::ATOM &&
+      compiled_replacement.simple_hint()) {
+    if (subject_handle->HasOnlyAsciiChars() &&
+        replacement_handle->HasOnlyAsciiChars()) {
+      return StringReplaceStringWithString<SeqAsciiString>(
+          isolate, subject_handle, regexp_handle, replacement_handle);
+    } else {
+      return StringReplaceStringWithString<SeqTwoByteString>(
+          isolate, subject_handle, regexp_handle, replacement_handle);
+    }
+  }
+
   // Guessing the number of parts that the final result string is built
   // from. Global regexps can match any number of times, so we guess
   // conservatively.
@@ -2693,6 +3116,20 @@
 
   Handle<String> subject_handle(subject);
   Handle<JSRegExp> regexp_handle(regexp);
+
+  // Shortcut for simple non-regexp global replacements
+  if (regexp_handle->GetFlags().is_global() &&
+      regexp_handle->TypeTag() == JSRegExp::ATOM) {
+    Handle<String> empty_string_handle(HEAP->empty_string());
+    if (subject_handle->HasOnlyAsciiChars()) {
+      return StringReplaceStringWithString<SeqAsciiString>(
+          isolate, subject_handle, regexp_handle, empty_string_handle);
+    } else {
+      return StringReplaceStringWithString<SeqTwoByteString>(
+          isolate, subject_handle, regexp_handle, empty_string_handle);
+    }
+  }
+
   Handle<JSArray> last_match_info_handle(last_match_info);
   Handle<Object> match = RegExpImpl::Exec(regexp_handle,
                                           subject_handle,
@@ -2879,34 +3316,32 @@
 
   AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
   // Extract flattened substrings of cons strings before determining asciiness.
-  String* seq_sub = *sub;
-  if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
-  String* seq_pat = *pat;
-  if (seq_pat->IsConsString()) seq_pat = ConsString::cast(seq_pat)->first();
+  String::FlatContent seq_sub = sub->GetFlatContent();
+  String::FlatContent seq_pat = pat->GetFlatContent();
 
   // dispatch on type of strings
-  if (seq_pat->IsAsciiRepresentation()) {
-    Vector<const char> pat_vector = seq_pat->ToAsciiVector();
-    if (seq_sub->IsAsciiRepresentation()) {
+  if (seq_pat.IsAscii()) {
+    Vector<const char> pat_vector = seq_pat.ToAsciiVector();
+    if (seq_sub.IsAscii()) {
       return SearchString(isolate,
-                          seq_sub->ToAsciiVector(),
+                          seq_sub.ToAsciiVector(),
                           pat_vector,
                           start_index);
     }
     return SearchString(isolate,
-                        seq_sub->ToUC16Vector(),
+                        seq_sub.ToUC16Vector(),
                         pat_vector,
                         start_index);
   }
-  Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
-  if (seq_sub->IsAsciiRepresentation()) {
+  Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
+  if (seq_sub.IsAscii()) {
     return SearchString(isolate,
-                        seq_sub->ToAsciiVector(),
+                        seq_sub.ToAsciiVector(),
                         pat_vector,
                         start_index);
   }
   return SearchString(isolate,
-                      seq_sub->ToUC16Vector(),
+                      seq_sub.ToUC16Vector(),
                       pat_vector,
                       start_index);
 }
@@ -2989,29 +3424,31 @@
   if (!sub->IsFlat()) FlattenString(sub);
   if (!pat->IsFlat()) FlattenString(pat);
 
+  int position = -1;
   AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
 
-  int position = -1;
+  String::FlatContent sub_content = sub->GetFlatContent();
+  String::FlatContent pat_content = pat->GetFlatContent();
 
-  if (pat->IsAsciiRepresentation()) {
-    Vector<const char> pat_vector = pat->ToAsciiVector();
-    if (sub->IsAsciiRepresentation()) {
-      position = StringMatchBackwards(sub->ToAsciiVector(),
+  if (pat_content.IsAscii()) {
+    Vector<const char> pat_vector = pat_content.ToAsciiVector();
+    if (sub_content.IsAscii()) {
+      position = StringMatchBackwards(sub_content.ToAsciiVector(),
                                       pat_vector,
                                       start_index);
     } else {
-      position = StringMatchBackwards(sub->ToUC16Vector(),
+      position = StringMatchBackwards(sub_content.ToUC16Vector(),
                                       pat_vector,
                                       start_index);
     }
   } else {
-    Vector<const uc16> pat_vector = pat->ToUC16Vector();
-    if (sub->IsAsciiRepresentation()) {
-      position = StringMatchBackwards(sub->ToAsciiVector(),
+    Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
+    if (sub_content.IsAscii()) {
+      position = StringMatchBackwards(sub_content.ToAsciiVector(),
                                       pat_vector,
                                       start_index);
     } else {
-      position = StringMatchBackwards(sub->ToUC16Vector(),
+      position = StringMatchBackwards(sub_content.ToUC16Vector(),
                                       pat_vector,
                                       start_index);
     }
@@ -3074,17 +3511,17 @@
   ASSERT(args.length() == 3);
 
   CONVERT_CHECKED(String, value, args[0]);
-  Object* from = args[1];
-  Object* to = args[2];
   int start, end;
   // We have a fast integer-only case here to avoid a conversion to double in
   // the common case where from and to are Smis.
-  if (from->IsSmi() && to->IsSmi()) {
-    start = Smi::cast(from)->value();
-    end = Smi::cast(to)->value();
+  if (args[1]->IsSmi() && args[2]->IsSmi()) {
+    CONVERT_SMI_ARG_CHECKED(from_number, 1);
+    CONVERT_SMI_ARG_CHECKED(to_number, 2);
+    start = from_number;
+    end = to_number;
   } else {
-    CONVERT_DOUBLE_CHECKED(from_number, from);
-    CONVERT_DOUBLE_CHECKED(to_number, to);
+    CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
+    CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
     start = FastD2I(from_number);
     end = FastD2I(to_number);
   }
@@ -3114,11 +3551,11 @@
   }
   int length = subject->length();
 
-  CompilationZoneScope zone_space(DELETE_ON_EXIT);
+  ZoneScope zone_space(isolate, DELETE_ON_EXIT);
   ZoneList<int> offsets(8);
+  int start;
+  int end;
   do {
-    int start;
-    int end;
     {
       AssertNoAllocation no_alloc;
       FixedArray* elements = FixedArray::cast(regexp_info->elements());
@@ -3127,20 +3564,23 @@
     }
     offsets.Add(start);
     offsets.Add(end);
-    int index = start < end ? end : end + 1;
-    if (index > length) break;
-    match = RegExpImpl::Exec(regexp, subject, index, regexp_info);
+    if (start == end) if (++end > length) break;
+    match = RegExpImpl::Exec(regexp, subject, end, regexp_info);
     if (match.is_null()) {
       return Failure::Exception();
     }
   } while (!match->IsNull());
   int matches = offsets.length() / 2;
   Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
-  for (int i = 0; i < matches ; i++) {
+  Handle<String> substring = isolate->factory()->
+    NewSubString(subject, offsets.at(0), offsets.at(1));
+  elements->set(0, *substring);
+  for (int i = 1; i < matches ; i++) {
     int from = offsets.at(i * 2);
     int to = offsets.at(i * 2 + 1);
-    Handle<String> match = isolate->factory()->NewSubString(subject, from, to);
-    elements->set(i, *match);
+    Handle<String> substring = isolate->factory()->
+        NewProperSubString(subject, from, to);
+    elements->set(i, *substring);
   }
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
   result->set_length(Smi::FromInt(matches));
@@ -3226,36 +3666,38 @@
   for (;;) {  // Break when search complete.
     builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
     AssertNoAllocation no_gc;
-    if (subject->IsAsciiRepresentation()) {
-      Vector<const char> subject_vector = subject->ToAsciiVector();
-      if (pattern->IsAsciiRepresentation()) {
+    String::FlatContent subject_content = subject->GetFlatContent();
+    String::FlatContent pattern_content = pattern->GetFlatContent();
+    if (subject_content.IsAscii()) {
+      Vector<const char> subject_vector = subject_content.ToAsciiVector();
+      if (pattern_content.IsAscii()) {
         if (SearchStringMultiple(isolate,
                                  subject_vector,
-                                 pattern->ToAsciiVector(),
+                                 pattern_content.ToAsciiVector(),
                                  *pattern,
                                  builder,
                                  &match_pos)) break;
       } else {
         if (SearchStringMultiple(isolate,
                                  subject_vector,
-                                 pattern->ToUC16Vector(),
+                                 pattern_content.ToUC16Vector(),
                                  *pattern,
                                  builder,
                                  &match_pos)) break;
       }
     } else {
-      Vector<const uc16> subject_vector = subject->ToUC16Vector();
-      if (pattern->IsAsciiRepresentation()) {
+      Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
+      if (pattern_content.IsAscii()) {
         if (SearchStringMultiple(isolate,
                                  subject_vector,
-                                 pattern->ToAsciiVector(),
+                                 pattern_content.ToAsciiVector(),
                                  *pattern,
                                  builder,
                                  &match_pos)) break;
       } else {
         if (SearchStringMultiple(isolate,
                                  subject_vector,
-                                 pattern->ToUC16Vector(),
+                                 pattern_content.ToUC16Vector(),
                                  *pattern,
                                  builder,
                                  &match_pos)) break;
@@ -3290,6 +3732,7 @@
   OffsetsVector registers(required_registers);
   Vector<int32_t> register_vector(registers.vector(), registers.length());
   int subject_length = subject->length();
+  bool first = true;
 
   for (;;) {  // Break on failure, return on exception.
     RegExpImpl::IrregexpResult result =
@@ -3307,9 +3750,15 @@
       }
       match_end = register_vector[1];
       HandleScope loop_scope(isolate);
-      builder->Add(*isolate->factory()->NewSubString(subject,
-                                                     match_start,
-                                                     match_end));
+      if (!first) {
+        builder->Add(*isolate->factory()->NewProperSubString(subject,
+                                                             match_start,
+                                                             match_end));
+      } else {
+        builder->Add(*isolate->factory()->NewSubString(subject,
+                                                       match_start,
+                                                       match_end));
+      }
       if (match_start != match_end) {
         pos = match_end;
       } else {
@@ -3322,6 +3771,7 @@
       ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
       return result;
     }
+    first = false;
   }
 
   if (match_start >= 0) {
@@ -3373,7 +3823,7 @@
     // at the end, so we have two vectors that we swap between.
     OffsetsVector registers2(required_registers);
     Vector<int> prev_register_vector(registers2.vector(), registers2.length());
-
+    bool first = true;
     do {
       int match_start = register_vector[0];
       builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
@@ -3391,18 +3841,30 @@
         // subject, i.e., 3 + capture count in total.
         Handle<FixedArray> elements =
             isolate->factory()->NewFixedArray(3 + capture_count);
-        Handle<String> match = isolate->factory()->NewSubString(subject,
-                                                                match_start,
-                                                                match_end);
+        Handle<String> match;
+        if (!first) {
+          match = isolate->factory()->NewProperSubString(subject,
+                                                         match_start,
+                                                         match_end);
+        } else {
+          match = isolate->factory()->NewSubString(subject,
+                                                   match_start,
+                                                   match_end);
+        }
         elements->set(0, *match);
         for (int i = 1; i <= capture_count; i++) {
           int start = register_vector[i * 2];
           if (start >= 0) {
             int end = register_vector[i * 2 + 1];
             ASSERT(start <= end);
-            Handle<String> substring = isolate->factory()->NewSubString(subject,
-                                                                        start,
-                                                                        end);
+            Handle<String> substring;
+            if (!first) {
+              substring = isolate->factory()->NewProperSubString(subject,
+                                                                 start,
+                                                                 end);
+            } else {
+              substring = isolate->factory()->NewSubString(subject, start, end);
+            }
             elements->set(i, *substring);
           } else {
             ASSERT(register_vector[i * 2 + 1] < 0);
@@ -3432,6 +3894,7 @@
                                             subject,
                                             pos,
                                             register_vector);
+      first = false;
     } while (result == RegExpImpl::RE_SUCCESS);
 
     if (result != RegExpImpl::RE_EXCEPTION) {
@@ -3467,7 +3930,7 @@
   HandleScope handles(isolate);
 
   CONVERT_ARG_CHECKED(String, subject, 1);
-  if (!subject->IsFlat()) { FlattenString(subject); }
+  if (!subject->IsFlat()) FlattenString(subject);
   CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
   CONVERT_ARG_CHECKED(JSArray, result_array, 3);
@@ -3478,7 +3941,8 @@
   if (result_array->HasFastElements()) {
     result_elements =
         Handle<FixedArray>(FixedArray::cast(result_array->elements()));
-  } else {
+  }
+  if (result_elements.is_null() || result_elements->length() < 16) {
     result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
   }
   FixedArrayBuilder builder(result_elements);
@@ -3520,13 +3984,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
+  CONVERT_SMI_ARG_CHECKED(radix, 1);
+  RUNTIME_ASSERT(2 <= radix && radix <= 36);
 
   // Fast case where the result is a one character string.
-  if (args[0]->IsSmi() && args[1]->IsSmi()) {
-    int value = Smi::cast(args[0])->value();
-    int radix = Smi::cast(args[1])->value();
+  if (args[0]->IsSmi()) {
+    int value = args.smi_at(0);
     if (value >= 0 && value < radix) {
-      RUNTIME_ASSERT(radix <= 36);
       // Character array used for conversion.
       static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
       return isolate->heap()->
@@ -3535,7 +3999,7 @@
   }
 
   // Slow case.
-  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
     return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
@@ -3545,9 +4009,6 @@
     }
     return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
-  CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
-  int radix = FastD2I(radix_number);
-  RUNTIME_ASSERT(2 <= radix && radix <= 36);
   char* str = DoubleToRadixCString(value, radix);
   MaybeObject* result =
       isolate->heap()->AllocateStringFromAscii(CStrVector(str));
@@ -3560,7 +4021,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
     return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
@@ -3570,7 +4031,7 @@
     }
     return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
-  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
   RUNTIME_ASSERT(f >= 0);
   char* str = DoubleToFixedCString(value, f);
@@ -3585,7 +4046,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
     return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
@@ -3595,7 +4056,7 @@
     }
     return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
-  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
   RUNTIME_ASSERT(f >= -1 && f <= 20);
   char* str = DoubleToExponentialCString(value, f);
@@ -3610,7 +4071,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
     return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
@@ -3620,7 +4081,7 @@
     }
     return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
-  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
   RUNTIME_ASSERT(f >= 1 && f <= 21);
   char* str = DoubleToPrecisionCString(value, f);
@@ -3710,8 +4171,7 @@
   if (name->AsArrayIndex(&index)) {
     return GetElementOrCharAt(isolate, object, index);
   } else {
-    PropertyAttributes attr;
-    return object->GetProperty(*name, &attr);
+    return object->GetProperty(*name);
   }
 }
 
@@ -3783,7 +4243,7 @@
     // Fast case for string indexing using [] with a smi index.
     HandleScope scope(isolate);
     Handle<String> str = args.at<String>(0);
-    int index = Smi::cast(args[1])->value();
+    int index = args.smi_at(1);
     if (index >= 0 && index < str->length()) {
       Handle<Object> result = GetCharAt(str, index);
       return *result;
@@ -3826,7 +4286,7 @@
        || result.type() == CONSTANT_FUNCTION)) {
     Object* ok;
     { MaybeObject* maybe_ok =
-          obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+          obj->DeleteProperty(name, JSReceiver::NORMAL_DELETION);
       if (!maybe_ok->ToObject(&ok)) return maybe_ok;
     }
   }
@@ -3871,17 +4331,35 @@
       if (proto->IsNull()) return *obj_value;
       js_object = Handle<JSObject>::cast(proto);
     }
-    NormalizeElements(js_object);
-    Handle<NumberDictionary> dictionary(js_object->element_dictionary());
+
+    // Don't allow element properties to be redefined on objects with external
+    // array elements.
+    if (js_object->HasExternalArrayElements()) {
+      Handle<Object> args[2] = { js_object, name };
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("redef_external_array_element",
+                                           HandleVector(args, 2));
+      return isolate->Throw(*error);
+    }
+
+    Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
     // Make sure that we never go back to fast case.
     dictionary->set_requires_slow_elements();
     PropertyDetails details = PropertyDetails(attr, NORMAL);
-    NumberDictionarySet(dictionary, index, obj_value, details);
+    Handle<SeededNumberDictionary> extended_dictionary =
+        SeededNumberDictionarySet(dictionary, index, obj_value, details);
+    if (*extended_dictionary != *dictionary) {
+      if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+        FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
+      } else {
+        js_object->set_elements(*extended_dictionary);
+      }
+    }
     return *obj_value;
   }
 
   LookupResult result;
-  js_object->LookupRealNamedProperty(*name, &result);
+  js_object->LocalLookupRealNamedProperty(*name, &result);
 
   // To be compatible with safari we do not change the value on API objects
   // in defineProperty. Firefox disagrees here, and actually changes the value.
@@ -3921,6 +4399,28 @@
 }
 
 
+// Special case for elements if any of the flags are true.
+// If elements are in fast case we always implicitly assume that:
+// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
+                                              Handle<JSObject> js_object,
+                                              uint32_t index,
+                                              Handle<Object> value,
+                                              PropertyAttributes attr) {
+  // Normalize the elements to enable attributes on the property.
+  Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
+  // Make sure that we never go back to fast case.
+  dictionary->set_requires_slow_elements();
+  PropertyDetails details = PropertyDetails(attr, NORMAL);
+  Handle<SeededNumberDictionary> extended_dictionary =
+      SeededNumberDictionarySet(dictionary, index, value, details);
+  if (*extended_dictionary != *dictionary) {
+    js_object->set_elements(*extended_dictionary);
+  }
+  return *value;
+}
+
+
 MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
                                         Handle<Object> object,
                                         Handle<Object> key,
@@ -3956,6 +4456,10 @@
       return *value;
     }
 
+    if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+      return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
+    }
+
     Handle<Object> result = SetElement(js_object, index, value, strict_mode);
     if (result.is_null()) return Failure::Exception();
     return *value;
@@ -3964,6 +4468,13 @@
   if (key->IsString()) {
     Handle<Object> result;
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
+      if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+        return NormalizeObjectSetElement(isolate,
+                                         js_object,
+                                         index,
+                                         value,
+                                         attr);
+      }
       result = SetElement(js_object, index, value, strict_mode);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
@@ -3981,7 +4492,7 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, strict_mode);
+    return js_object->SetElement(index, *value, strict_mode, true);
   } else {
     return js_object->SetProperty(*name, *value, attr, strict_mode);
   }
@@ -4009,12 +4520,12 @@
       return *value;
     }
 
-    return js_object->SetElement(index, *value, kNonStrictMode);
+    return js_object->SetElement(index, *value, kNonStrictMode, true);
   }
 
   if (key->IsString()) {
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      return js_object->SetElement(index, *value, kNonStrictMode);
+      return js_object->SetElement(index, *value, kNonStrictMode, true);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
@@ -4031,7 +4542,7 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, kNonStrictMode);
+    return js_object->SetElement(index, *value, kNonStrictMode, true);
   } else {
     return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
   }
@@ -4039,24 +4550,25 @@
 
 
 MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
-                                                Handle<JSObject> js_object,
+                                                Handle<JSReceiver> receiver,
                                                 Handle<Object> key) {
   HandleScope scope(isolate);
 
   // Check if the given key is an array index.
   uint32_t index;
-  if (key->ToArrayIndex(&index)) {
+  if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
     // In Firefox/SpiderMonkey, Safari and Opera you can access the
     // characters of a string using [] notation.  In the case of a
     // String object we just need to redirect the deletion to the
     // underlying string if the index is in range.  Since the
     // underlying string does nothing with the deletion, we can ignore
     // such deletions.
-    if (js_object->IsStringObjectWithCharacterAt(index)) {
+    if (receiver->IsStringObjectWithCharacterAt(index)) {
       return isolate->heap()->true_value();
     }
 
-    return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
+    return JSObject::cast(*receiver)->DeleteElement(
+        index, JSReceiver::FORCE_DELETION);
   }
 
   Handle<String> key_string;
@@ -4071,7 +4583,7 @@
   }
 
   key_string->TryFlatten();
-  return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
+  return receiver->DeleteProperty(*key_string, JSReceiver::FORCE_DELETION);
 }
 
 
@@ -4082,7 +4594,7 @@
   Handle<Object> object = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
-  CONVERT_SMI_CHECKED(unchecked_attributes, args[3]);
+  CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
   RUNTIME_ASSERT(
       (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   // Compute attributes.
@@ -4091,7 +4603,7 @@
 
   StrictModeFlag strict_mode = kNonStrictMode;
   if (args.length() == 5) {
-    CONVERT_SMI_CHECKED(strict_unchecked, args[4]);
+    CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
     RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
                    strict_unchecked == kNonStrictMode);
     strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
@@ -4106,6 +4618,23 @@
 }
 
 
+// Set the native flag on the function.
+// This is used to decide if we should transform null and undefined
+// into the global object when doing call and apply.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+
+  Handle<Object> object = args.at<Object>(0);
+
+  if (object->IsJSFunction()) {
+    JSFunction* func = JSFunction::cast(*object);
+    func->shared()->set_native(true);
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -4133,12 +4662,12 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(JSReceiver, object, args[0]);
   CONVERT_CHECKED(String, key, args[1]);
-  CONVERT_SMI_CHECKED(strict, args[2]);
+  CONVERT_SMI_ARG_CHECKED(strict, 2);
   return object->DeleteProperty(key, (strict == kStrictMode)
-                                      ? JSObject::STRICT_DELETION
-                                      : JSObject::NORMAL_DELETION);
+                                      ? JSReceiver::STRICT_DELETION
+                                      : JSReceiver::NORMAL_DELETION);
 }
 
 
@@ -4165,25 +4694,33 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[1]);
 
+  uint32_t index;
+  const bool key_is_array_index = key->AsArrayIndex(&index);
+
   Object* obj = args[0];
   // Only JS objects can have properties.
   if (obj->IsJSObject()) {
     JSObject* object = JSObject::cast(obj);
-    // Fast case - no interceptors.
+    // Fast case: either the key is a real named property or it is not
+    // an array index and there are no interceptors or hidden
+    // prototypes.
     if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
-    // Slow case.  Either it's not there or we have an interceptor.  We should
-    // have handles for this kind of deal.
+    Map* map = object->map();
+    if (!key_is_array_index &&
+        !map->has_named_interceptor() &&
+        !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+      return isolate->heap()->false_value();
+    }
+    // Slow case.
     HandleScope scope(isolate);
     return HasLocalPropertyImplementation(isolate,
                                           Handle<JSObject>(object),
                                           Handle<String>(key));
-  } else if (obj->IsString()) {
+  } else if (obj->IsString() && key_is_array_index) {
     // Well, there is one exception:  Handle [] on strings.
-    uint32_t index;
-    if (key->AsArrayIndex(&index)) {
-      String* string = String::cast(obj);
-      if (index < static_cast<uint32_t>(string->length()))
-        return isolate->heap()->true_value();
+    String* string = String::cast(obj);
+    if (index < static_cast<uint32_t>(string->length())) {
+      return isolate->heap()->true_value();
     }
   }
   return isolate->heap()->false_value();
@@ -4194,11 +4731,11 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
 
-  // Only JS objects can have properties.
-  if (args[0]->IsJSObject()) {
-    JSObject* object = JSObject::cast(args[0]);
+  // Only JS receivers can have properties.
+  if (args[0]->IsJSReceiver()) {
+    JSReceiver* receiver = JSReceiver::cast(args[0]);
     CONVERT_CHECKED(String, key, args[1]);
-    if (object->HasProperty(key)) return isolate->heap()->true_value();
+    if (receiver->HasProperty(key)) return isolate->heap()->true_value();
   }
   return isolate->heap()->false_value();
 }
@@ -4343,7 +4880,7 @@
   for (int i = 0; i < length; i++) {
     jsproto->GetLocalPropertyNames(*names, next_copy_index);
     next_copy_index += local_property_count[i];
-    if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+    if (jsproto->HasHiddenProperties()) {
       proto_with_hidden_properties++;
     }
     if (i < length - 1) {
@@ -4595,11 +5132,14 @@
         return isolate->heap()->boolean_symbol();
       }
       if (heap_obj->IsNull()) {
-        return isolate->heap()->object_symbol();
+        return FLAG_harmony_typeof
+            ? isolate->heap()->null_symbol()
+            : isolate->heap()->object_symbol();
       }
       ASSERT(heap_obj->IsUndefined());
       return isolate->heap()->undefined_symbol();
-    case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
+    case JS_FUNCTION_TYPE:
+    case JS_FUNCTION_PROXY_TYPE:
       return isolate->heap()->function_symbol();
     default:
       // For any kind of object not handled above, the spec rule for
@@ -4973,6 +5513,8 @@
 // Doing JSON quoting cannot make the string more than this many times larger.
 static const int kJsonQuoteWorstCaseBlowup = 6;
 
+static const int kSpaceForQuotesAndComma = 3;
+static const int kSpaceForBrackets = 2;
 
 // Covers the entire ASCII range (all other characters are unchanged by JSON
 // quoting).
@@ -5060,13 +5602,51 @@
 }
 
 
+template <typename SinkChar, typename SourceChar>
+static inline SinkChar* WriteQuoteJsonString(
+    Isolate* isolate,
+    SinkChar* write_cursor,
+    Vector<const SourceChar> characters) {
+  // SinkChar is only char if SourceChar is guaranteed to be char.
+  ASSERT(sizeof(SinkChar) >= sizeof(SourceChar));
+  const SourceChar* read_cursor = characters.start();
+  const SourceChar* end = read_cursor + characters.length();
+  *(write_cursor++) = '"';
+  while (read_cursor < end) {
+    SourceChar c = *(read_cursor++);
+    if (sizeof(SourceChar) > 1u &&
+        static_cast<unsigned>(c) >= kQuoteTableLength) {
+      *(write_cursor++) = static_cast<SinkChar>(c);
+    } else {
+      int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+      const char* replacement = JsonQuotes +
+          static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+      write_cursor[0] = replacement[0];
+      if (len > 1) {
+        write_cursor[1] = replacement[1];
+        if (len > 2) {
+          ASSERT(len == 6);
+          write_cursor[2] = replacement[2];
+          write_cursor[3] = replacement[3];
+          write_cursor[4] = replacement[4];
+          write_cursor[5] = replacement[5];
+        }
+      }
+      write_cursor += len;
+    }
+  }
+  *(write_cursor++) = '"';
+  return write_cursor;
+}
+
+
 template <typename Char, typename StringType, bool comma>
 static MaybeObject* QuoteJsonString(Isolate* isolate,
                                     Vector<const Char> characters) {
   int length = characters.length();
   isolate->counters()->quote_json_char_count()->Increment(length);
-  const int kSpaceForQuotes = 2 + (comma ? 1 :0);
-  int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
+  int worst_case_length =
+        length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotesAndComma;
   if (worst_case_length > kMaxGuaranteedNewSpaceString) {
     return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
   }
@@ -5091,34 +5671,9 @@
   Char* write_cursor = reinterpret_cast<Char*>(
       new_string->address() + SeqAsciiString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
-  *(write_cursor++) = '"';
-
-  const Char* read_cursor = characters.start();
-  const Char* end = read_cursor + length;
-  while (read_cursor < end) {
-    Char c = *(read_cursor++);
-    if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
-      *(write_cursor++) = c;
-    } else {
-      int len = JsonQuoteLengths[static_cast<unsigned>(c)];
-      const char* replacement = JsonQuotes +
-          static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
-      write_cursor[0] = replacement[0];
-      if (len > 1) {
-        write_cursor[1] = replacement[1];
-        if (len > 2) {
-          ASSERT(len == 6);
-          write_cursor[2] = replacement[2];
-          write_cursor[3] = replacement[3];
-          write_cursor[4] = replacement[4];
-          write_cursor[5] = replacement[5];
-        }
-      }
-      write_cursor += len;
-    }
-  }
-  *(write_cursor++) = '"';
-
+  write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
+                                                  write_cursor,
+                                                  characters);
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
           new_string->address() + SeqAsciiString::kHeaderSize));
@@ -5141,12 +5696,14 @@
     str = String::cast(flat);
     ASSERT(str->IsFlat());
   }
-  if (str->IsTwoByteRepresentation()) {
+  String::FlatContent flat = str->GetFlatContent();
+  ASSERT(flat.IsFlat());
+  if (flat.IsTwoByte()) {
     return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
-                                                          str->ToUC16Vector());
+                                                          flat.ToUC16Vector());
   } else {
     return QuoteJsonString<char, SeqAsciiString, false>(isolate,
-                                                        str->ToAsciiVector());
+                                                        flat.ToAsciiVector());
   }
 }
 
@@ -5163,20 +5720,118 @@
     str = String::cast(flat);
     ASSERT(str->IsFlat());
   }
-  if (str->IsTwoByteRepresentation()) {
+  String::FlatContent flat = str->GetFlatContent();
+  if (flat.IsTwoByte()) {
     return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
-                                                         str->ToUC16Vector());
+                                                         flat.ToUC16Vector());
   } else {
     return QuoteJsonString<char, SeqAsciiString, true>(isolate,
-                                                       str->ToAsciiVector());
+                                                       flat.ToAsciiVector());
   }
 }
 
+
+template <typename Char, typename StringType>
+static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
+                                         FixedArray* array,
+                                         int worst_case_length) {
+  int length = array->length();
+
+  MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
+                                                         worst_case_length);
+  Object* new_object;
+  if (!new_alloc->ToObject(&new_object)) {
+    return new_alloc;
+  }
+  if (!isolate->heap()->new_space()->Contains(new_object)) {
+    // Even if our string is small enough to fit in new space we still have to
+    // handle it being allocated in old space as may happen in the third
+    // attempt.  See CALL_AND_RETRY in heap-inl.h and similar code in
+    // CEntryStub::GenerateCore.
+    return isolate->heap()->undefined_value();
+  }
+  AssertNoAllocation no_gc;
+  StringType* new_string = StringType::cast(new_object);
+  ASSERT(isolate->heap()->new_space()->Contains(new_string));
+
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  Char* write_cursor = reinterpret_cast<Char*>(
+      new_string->address() + SeqAsciiString::kHeaderSize);
+  *(write_cursor++) = '[';
+  for (int i = 0; i < length; i++) {
+    if (i != 0) *(write_cursor++) = ',';
+    String* str = String::cast(array->get(i));
+    String::FlatContent content = str->GetFlatContent();
+    ASSERT(content.IsFlat());
+    if (content.IsTwoByte()) {
+      write_cursor = WriteQuoteJsonString<Char, uc16>(isolate,
+                                                      write_cursor,
+                                                      content.ToUC16Vector());
+    } else {
+      write_cursor = WriteQuoteJsonString<Char, char>(isolate,
+                                                      write_cursor,
+                                                      content.ToAsciiVector());
+    }
+  }
+  *(write_cursor++) = ']';
+
+  int final_length = static_cast<int>(
+      write_cursor - reinterpret_cast<Char*>(
+          new_string->address() + SeqAsciiString::kHeaderSize));
+  isolate->heap()->new_space()->
+      template ShrinkStringAtAllocationBoundary<StringType>(
+          new_string, final_length);
+  return new_string;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSArray, array, args[0]);
+
+  if (!array->HasFastElements()) return isolate->heap()->undefined_value();
+  FixedArray* elements = FixedArray::cast(array->elements());
+  int n = elements->length();
+  bool ascii = true;
+  int total_length = 0;
+
+  for (int i = 0; i < n; i++) {
+    Object* elt = elements->get(i);
+    if (!elt->IsString()) return isolate->heap()->undefined_value();
+    String* element = String::cast(elt);
+    if (!element->IsFlat()) return isolate->heap()->undefined_value();
+    total_length += element->length();
+    if (ascii && element->IsTwoByteRepresentation()) {
+      ascii = false;
+    }
+  }
+
+  int worst_case_length =
+      kSpaceForBrackets + n * kSpaceForQuotesAndComma
+      + total_length * kJsonQuoteWorstCaseBlowup;
+
+  if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+    return isolate->heap()->undefined_value();
+  }
+
+  if (ascii) {
+    return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
+                                                      elements,
+                                                      worst_case_length);
+  } else {
+    return QuoteJsonStringArray<uc16, SeqTwoByteString>(isolate,
+                                                        elements,
+                                                        worst_case_length);
+  }
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
   NoHandleAllocation ha;
 
   CONVERT_CHECKED(String, s, args[0]);
-  CONVERT_SMI_CHECKED(radix, args[1]);
+  CONVERT_SMI_ARG_CHECKED(radix, 1);
 
   s->TryFlatten();
 
@@ -5523,28 +6178,6 @@
 }
 
 
-template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Isolate* isolate,
-                       Vector<const SubjectChar> subject,
-                       Vector<const PatternChar> pattern,
-                       ZoneList<int>* indices,
-                       unsigned int limit) {
-  ASSERT(limit > 0);
-  // Collect indices of pattern in subject, and the end-of-string index.
-  // Stop after finding at most limit values.
-  StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
-  int pattern_length = pattern.length();
-  int index = 0;
-  while (limit > 0) {
-    index = search.Search(subject, index);
-    if (index < 0) return;
-    indices->Add(index);
-    index += pattern_length;
-    limit--;
-  }
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
   ASSERT(args.length() == 3);
   HandleScope handle_scope(isolate);
@@ -5556,6 +6189,19 @@
   int pattern_length = pattern->length();
   RUNTIME_ASSERT(pattern_length > 0);
 
+  if (limit == 0xffffffffu) {
+    Handle<Object> cached_answer(StringSplitCache::Lookup(
+        isolate->heap()->string_split_cache(),
+        *subject,
+        *pattern));
+    if (*cached_answer != Smi::FromInt(0)) {
+      Handle<JSArray> result =
+          isolate->factory()->NewJSArrayWithElements(
+              Handle<FixedArray>::cast(cached_answer));
+      return *result;
+    }
+  }
+
   // The limit can be very large (0xffffffffu), but since the pattern
   // isn't empty, we can never create more parts than ~half the length
   // of the subject.
@@ -5564,48 +6210,14 @@
 
   static const int kMaxInitialListCapacity = 16;
 
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(isolate, DELETE_ON_EXIT);
 
   // Find (up to limit) indices of separator and end-of-string in subject
   int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
   ZoneList<int> indices(initial_capacity);
   if (!pattern->IsFlat()) FlattenString(pattern);
 
-  // No allocation block.
-  {
-    AssertNoAllocation nogc;
-    if (subject->IsAsciiRepresentation()) {
-      Vector<const char> subject_vector = subject->ToAsciiVector();
-      if (pattern->IsAsciiRepresentation()) {
-        FindStringIndices(isolate,
-                          subject_vector,
-                          pattern->ToAsciiVector(),
-                          &indices,
-                          limit);
-      } else {
-        FindStringIndices(isolate,
-                          subject_vector,
-                          pattern->ToUC16Vector(),
-                          &indices,
-                          limit);
-      }
-    } else {
-      Vector<const uc16> subject_vector = subject->ToUC16Vector();
-      if (pattern->IsAsciiRepresentation()) {
-        FindStringIndices(isolate,
-                          subject_vector,
-                          pattern->ToAsciiVector(),
-                          &indices,
-                          limit);
-      } else {
-        FindStringIndices(isolate,
-                          subject_vector,
-                          pattern->ToUC16Vector(),
-                          &indices,
-                          limit);
-      }
-    }
-  }
+  FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit);
 
   if (static_cast<uint32_t>(indices.length()) < limit) {
     indices.Add(subject_length);
@@ -5632,11 +6244,21 @@
     HandleScope local_loop_handle;
     int part_end = indices.at(i);
     Handle<String> substring =
-        isolate->factory()->NewSubString(subject, part_start, part_end);
+        isolate->factory()->NewProperSubString(subject, part_start, part_end);
     elements->set(i, *substring);
     part_start = part_end + pattern_length;
   }
 
+  if (limit == 0xffffffffu) {
+    if (result->HasFastElements()) {
+      StringSplitCache::Enter(isolate->heap(),
+                              isolate->heap()->string_split_cache(),
+                              *subject,
+                              *pattern,
+                              *elements);
+    }
+  }
+
   return *result;
 }
 
@@ -5649,7 +6271,7 @@
                                        const char* chars,
                                        FixedArray* elements,
                                        int length) {
-  AssertNoAllocation nogc;
+  AssertNoAllocation no_gc;
   FixedArray* ascii_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
@@ -5682,36 +6304,39 @@
   CONVERT_ARG_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
 
-  s->TryFlatten();
+  s = FlattenGetString(s);
   const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
 
   Handle<FixedArray> elements;
+  int position = 0;
   if (s->IsFlat() && s->IsAsciiRepresentation()) {
+    // Try using cached chars where possible.
     Object* obj;
     { MaybeObject* maybe_obj =
           isolate->heap()->AllocateUninitializedFixedArray(length);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
-
-    Vector<const char> chars = s->ToAsciiVector();
-    // Note, this will initialize all elements (not only the prefix)
-    // to prevent GC from seeing partially initialized array.
-    int num_copied_from_cache = CopyCachedAsciiCharsToArray(isolate->heap(),
-                                                            chars.start(),
-                                                            *elements,
-                                                            length);
-
-    for (int i = num_copied_from_cache; i < length; ++i) {
-      Handle<Object> str = LookupSingleCharacterStringFromCode(chars[i]);
-      elements->set(i, *str);
+    String::FlatContent content = s->GetFlatContent();
+    if (content.IsAscii()) {
+      Vector<const char> chars = content.ToAsciiVector();
+      // Note, this will initialize all elements (not only the prefix)
+      // to prevent GC from seeing partially initialized array.
+      position = CopyCachedAsciiCharsToArray(isolate->heap(),
+                                             chars.start(),
+                                             *elements,
+                                             length);
+    } else {
+      MemsetPointer(elements->data_start(),
+                    isolate->heap()->undefined_value(),
+                    length);
     }
   } else {
     elements = isolate->factory()->NewFixedArray(length);
-    for (int i = 0; i < length; ++i) {
-      Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
-      elements->set(i, *str);
-    }
+  }
+  for (int i = position; i < length; ++i) {
+    Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
+    elements->set(i, *str);
   }
 
 #ifdef DEBUG
@@ -5765,7 +6390,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(number, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(number, 0);
 
   // We do not include 0 so that we don't have to treat +0 / -0 cases.
   if (number > 0 && number <= Smi::kMaxValue) {
@@ -5779,7 +6404,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(number, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(number, 0);
 
   // We do not include 0 so that we don't have to treat +0 / -0 cases.
   if (number > 0 && number <= Smi::kMaxValue) {
@@ -5807,7 +6432,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(number, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(number, 0);
 
   // We do not include 0 so that we don't have to treat +0 / -0 cases.
   if (number > 0 && number <= Smi::kMaxValue) {
@@ -5849,8 +6474,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   return isolate->heap()->NumberFromDouble(x + y);
 }
 
@@ -5859,8 +6484,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   return isolate->heap()->NumberFromDouble(x - y);
 }
 
@@ -5869,8 +6494,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   return isolate->heap()->NumberFromDouble(x * y);
 }
 
@@ -5879,7 +6504,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->heap()->NumberFromDouble(-x);
 }
 
@@ -5896,8 +6521,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   return isolate->heap()->NumberFromDouble(x / y);
 }
 
@@ -5906,8 +6531,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
 
   x = modulo(x, y);
   // NumberFromDouble may return a Smi instead of a Number object
@@ -5972,7 +6597,7 @@
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
-  int array_length = Smi::cast(args[1])->value();
+  int array_length = args.smi_at(1);
   CONVERT_CHECKED(String, special, args[2]);
 
   // This assumption is used by the slice encoding in one or two smis.
@@ -6085,7 +6710,7 @@
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
-  int array_length = Smi::cast(args[1])->value();
+  int array_length = args.smi_at(1);
   CONVERT_CHECKED(String, separator, args[2]);
 
   if (!array->HasFastElements()) {
@@ -6160,6 +6785,140 @@
   return answer;
 }
 
+template <typename Char>
+static void JoinSparseArrayWithSeparator(FixedArray* elements,
+                                         int elements_length,
+                                         uint32_t array_length,
+                                         String* separator,
+                                         Vector<Char> buffer) {
+  int previous_separator_position = 0;
+  int separator_length = separator->length();
+  int cursor = 0;
+  for (int i = 0; i < elements_length; i += 2) {
+    int position = NumberToInt32(elements->get(i));
+    String* string = String::cast(elements->get(i + 1));
+    int string_length = string->length();
+    if (string->length() > 0) {
+      while (previous_separator_position < position) {
+        String::WriteToFlat<Char>(separator, &buffer[cursor],
+                                  0, separator_length);
+        cursor += separator_length;
+        previous_separator_position++;
+      }
+      String::WriteToFlat<Char>(string, &buffer[cursor],
+                                0, string_length);
+      cursor += string->length();
+    }
+  }
+  if (separator_length > 0) {
+    // Array length must be representable as a signed 32-bit number,
+    // otherwise the total string length would have been too large.
+    ASSERT(array_length <= 0x7fffffff);  // Is int32_t.
+    int last_array_index = static_cast<int>(array_length - 1);
+    while (previous_separator_position < last_array_index) {
+      String::WriteToFlat<Char>(separator, &buffer[cursor],
+                                0, separator_length);
+      cursor += separator_length;
+      previous_separator_position++;
+    }
+  }
+  ASSERT(cursor <= buffer.length());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+  CONVERT_CHECKED(JSArray, elements_array, args[0]);
+  RUNTIME_ASSERT(elements_array->HasFastElements());
+  CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
+  CONVERT_CHECKED(String, separator, args[2]);
+  // elements_array is fast-mode JSarray of alternating positions
+  // (increasing order) and strings.
+  // array_length is length of original array (used to add separators);
+  // separator is string to put between elements. Assumed to be non-empty.
+
+  // Find total length of join result.
+  int string_length = 0;
+  bool is_ascii = separator->IsAsciiRepresentation();
+  int max_string_length;
+  if (is_ascii) {
+    max_string_length = SeqAsciiString::kMaxLength;
+  } else {
+    max_string_length = SeqTwoByteString::kMaxLength;
+  }
+  bool overflow = false;
+  CONVERT_NUMBER_CHECKED(int, elements_length,
+                         Int32, elements_array->length());
+  RUNTIME_ASSERT((elements_length & 1) == 0);  // Even length.
+  FixedArray* elements = FixedArray::cast(elements_array->elements());
+  for (int i = 0; i < elements_length; i += 2) {
+    RUNTIME_ASSERT(elements->get(i)->IsNumber());
+    CONVERT_CHECKED(String, string, elements->get(i + 1));
+    int length = string->length();
+    if (is_ascii && !string->IsAsciiRepresentation()) {
+      is_ascii = false;
+      max_string_length = SeqTwoByteString::kMaxLength;
+    }
+    if (length > max_string_length ||
+        max_string_length - length < string_length) {
+      overflow = true;
+      break;
+    }
+    string_length += length;
+  }
+  int separator_length = separator->length();
+  if (!overflow && separator_length > 0) {
+    if (array_length <= 0x7fffffffu) {
+      int separator_count = static_cast<int>(array_length) - 1;
+      int remaining_length = max_string_length - string_length;
+      if ((remaining_length / separator_length) >= separator_count) {
+        string_length += separator_length * (array_length - 1);
+      } else {
+        // Not room for the separators within the maximal string length.
+        overflow = true;
+      }
+    } else {
+      // Nonempty separator and at least 2^31-1 separators necessary
+      // means that the string is too large to create.
+      STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+      overflow = true;
+    }
+  }
+  if (overflow) {
+    // Throw OutOfMemory exception for creating too large a string.
+    V8::FatalProcessOutOfMemory("Array join result too large.");
+  }
+
+  if (is_ascii) {
+    MaybeObject* result_allocation =
+        isolate->heap()->AllocateRawAsciiString(string_length);
+    if (result_allocation->IsFailure()) return result_allocation;
+    SeqAsciiString* result_string =
+        SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
+    JoinSparseArrayWithSeparator<char>(elements,
+                                       elements_length,
+                                       array_length,
+                                       separator,
+                                       Vector<char>(result_string->GetChars(),
+                                                    string_length));
+    return result_string;
+  } else {
+    MaybeObject* result_allocation =
+        isolate->heap()->AllocateRawTwoByteString(string_length);
+    if (result_allocation->IsFailure()) return result_allocation;
+    SeqTwoByteString* result_string =
+        SeqTwoByteString::cast(result_allocation->ToObjectUnchecked());
+    JoinSparseArrayWithSeparator<uc16>(elements,
+                                       elements_length,
+                                       array_length,
+                                       separator,
+                                       Vector<uc16>(result_string->GetChars(),
+                                                    string_length));
+    return result_string;
+  }
+}
+
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
   NoHandleAllocation ha;
@@ -6234,8 +6993,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
   if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
   if (x == y) return Smi::FromInt(EQUAL);
@@ -6271,8 +7030,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (isnan(x) || isnan(y)) return args[2];
   if (x == y) return Smi::FromInt(EQUAL);
   if (isless(x, y)) return Smi::FromInt(LESS);
@@ -6295,50 +7054,69 @@
   // If the integers are equal so are the string representations.
   if (x_value == y_value) return Smi::FromInt(EQUAL);
 
-  // If one of the integers are zero the normal integer order is the
+  // If one of the integers is zero the normal integer order is the
   // same as the lexicographic order of the string representations.
-  if (x_value == 0 || y_value == 0) return Smi::FromInt(x_value - y_value);
+  if (x_value == 0 || y_value == 0)
+    return Smi::FromInt(x_value < y_value ? LESS : GREATER);
 
   // If only one of the integers is negative the negative number is
   // smallest because the char code of '-' is less than the char code
   // of any digit.  Otherwise, we make both values positive.
+
+  // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+  // architectures using 32-bit Smis.
+  uint32_t x_scaled = x_value;
+  uint32_t y_scaled = y_value;
   if (x_value < 0 || y_value < 0) {
     if (y_value >= 0) return Smi::FromInt(LESS);
     if (x_value >= 0) return Smi::FromInt(GREATER);
-    x_value = -x_value;
-    y_value = -y_value;
+    x_scaled = -x_value;
+    y_scaled = -y_value;
   }
 
-  // Arrays for the individual characters of the two Smis.  Smis are
-  // 31 bit integers and 10 decimal digits are therefore enough.
-  // TODO(isolates): maybe we should simply allocate 20 bytes on the stack.
-  int* x_elms = isolate->runtime_state()->smi_lexicographic_compare_x_elms();
-  int* y_elms = isolate->runtime_state()->smi_lexicographic_compare_y_elms();
+  static const uint32_t kPowersOf10[] = {
+    1, 10, 100, 1000, 10*1000, 100*1000,
+    1000*1000, 10*1000*1000, 100*1000*1000,
+    1000*1000*1000
+  };
 
+  // If the integers have the same number of decimal digits they can be
+  // compared directly as the numeric order is the same as the
+  // lexicographic order.  If one integer has fewer digits, it is scaled
+  // by some power of 10 to have the same number of digits as the longer
+  // integer.  If the scaled integers are equal it means the shorter
+  // integer comes first in the lexicographic order.
 
-  // Convert the integers to arrays of their decimal digits.
-  int x_index = 0;
-  int y_index = 0;
-  while (x_value > 0) {
-    x_elms[x_index++] = x_value % 10;
-    x_value /= 10;
-  }
-  while (y_value > 0) {
-    y_elms[y_index++] = y_value % 10;
-    y_value /= 10;
+  // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+  int x_log2 = IntegerLog2(x_scaled);
+  int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+  x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+  int y_log2 = IntegerLog2(y_scaled);
+  int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+  y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+  int tie = EQUAL;
+
+  if (x_log10 < y_log10) {
+    // X has fewer digits.  We would like to simply scale up X but that
+    // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+    // be scaled up to 9_000_000_000. So we scale up by the next
+    // smallest power and scale down Y to drop one digit. It is OK to
+    // drop one digit from the longer integer since the final digit is
+    // past the length of the shorter integer.
+    x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+    y_scaled /= 10;
+    tie = LESS;
+  } else if (y_log10 < x_log10) {
+    y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+    x_scaled /= 10;
+    tie = GREATER;
   }
 
-  // Loop through the arrays of decimal digits finding the first place
-  // where they differ.
-  while (--x_index >= 0 && --y_index >= 0) {
-    int diff = x_elms[x_index] - y_elms[y_index];
-    if (diff != 0) return Smi::FromInt(diff);
-  }
-
-  // If one array is a suffix of the other array, the longest array is
-  // the representation of the largest of the Smis in the
-  // lexicographic ordering.
-  return Smi::FromInt(x_index - y_index);
+  if (x_scaled < y_scaled) return Smi::FromInt(LESS);
+  if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
+  return Smi::FromInt(tie);
 }
 
 
@@ -6374,22 +7152,24 @@
     equal_prefix_result = Smi::FromInt(LESS);
   }
   int r;
-  if (x->IsAsciiRepresentation()) {
-    Vector<const char> x_chars = x->ToAsciiVector();
-    if (y->IsAsciiRepresentation()) {
-      Vector<const char> y_chars = y->ToAsciiVector();
+  String::FlatContent x_content = x->GetFlatContent();
+  String::FlatContent y_content = y->GetFlatContent();
+  if (x_content.IsAscii()) {
+    Vector<const char> x_chars = x_content.ToAsciiVector();
+    if (y_content.IsAscii()) {
+      Vector<const char> y_chars = y_content.ToAsciiVector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     } else {
-      Vector<const uc16> y_chars = y->ToUC16Vector();
+      Vector<const uc16> y_chars = y_content.ToUC16Vector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     }
   } else {
-    Vector<const uc16> x_chars = x->ToUC16Vector();
-    if (y->IsAsciiRepresentation()) {
-      Vector<const char> y_chars = y->ToAsciiVector();
+    Vector<const uc16> x_chars = x_content.ToUC16Vector();
+    if (y_content.IsAscii()) {
+      Vector<const char> y_chars = y_content.ToAsciiVector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     } else {
-      Vector<const uc16> y_chars = y->ToUC16Vector();
+      Vector<const uc16> y_chars = y_content.ToUC16Vector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     }
   }
@@ -6445,7 +7225,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_acos()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
 }
 
@@ -6455,7 +7235,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_asin()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
 }
 
@@ -6465,7 +7245,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_atan()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
 }
 
@@ -6478,8 +7258,8 @@
   ASSERT(args.length() == 2);
   isolate->counters()->math_atan2()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   double result;
   if (isinf(x) && isinf(y)) {
     // Make sure that the result in case of two infinite arguments
@@ -6501,7 +7281,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_ceil()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->heap()->NumberFromDouble(ceiling(x));
 }
 
@@ -6511,7 +7291,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_cos()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
 }
 
@@ -6521,7 +7301,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_exp()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
 }
 
@@ -6531,7 +7311,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_floor()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->heap()->NumberFromDouble(floor(x));
 }
 
@@ -6541,7 +7321,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_log()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
 }
 
@@ -6551,16 +7331,16 @@
   ASSERT(args.length() == 2);
   isolate->counters()->math_pow()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
 
   // If the second argument is a smi, it is much faster to call the
   // custom powi() function than the generic pow().
   if (args[1]->IsSmi()) {
-    int y = Smi::cast(args[1])->value();
+    int y = args.smi_at(1);
     return isolate->heap()->NumberFromDouble(power_double_int(x, y));
   }
 
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
 }
 
@@ -6569,8 +7349,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (y == 0) {
     return Smi::FromInt(1);
   } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
@@ -6598,9 +7378,16 @@
   int exponent = number->get_exponent();
   int sign = number->get_sign();
 
-  // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
-  // should be rounded to 2^30, which is not smi.
-  if (!sign && exponent <= kSmiValueSize - 3) {
+  if (exponent < -1) {
+    // Number in range ]-0.5..0.5[. These always round to +/-zero.
+    if (sign) return isolate->heap()->minus_zero_value();
+    return Smi::FromInt(0);
+  }
+
+  // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
+  // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
+  // agument holds for 32-bit smis).
+  if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
 
@@ -6622,7 +7409,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_sin()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
 }
 
@@ -6632,7 +7419,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_sqrt()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->heap()->AllocateHeapNumber(sqrt(x));
 }
 
@@ -6642,7 +7429,7 @@
   ASSERT(args.length() == 1);
   isolate->counters()->math_tan()->Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
 }
 
@@ -6696,9 +7483,9 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_SMI_CHECKED(year, args[0]);
-  CONVERT_SMI_CHECKED(month, args[1]);
-  CONVERT_SMI_CHECKED(date, args[2]);
+  CONVERT_SMI_ARG_CHECKED(year, 0);
+  CONVERT_SMI_ARG_CHECKED(month, 1);
+  CONVERT_SMI_ARG_CHECKED(date, 2);
 
   return Smi::FromInt(MakeDay(year, month, date));
 }
@@ -6995,7 +7782,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_CHECKED(t, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(t, 0);
   CONVERT_CHECKED(JSArray, res_array, args[1]);
 
   int year, month, day;
@@ -7015,12 +7802,109 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+
+  Handle<JSFunction> callee = args.at<JSFunction>(0);
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  const int argument_count = Smi::cast(args[2])->value();
+
+  Handle<JSObject> result =
+      isolate->factory()->NewArgumentsObject(callee, argument_count);
+  // Allocate the elements if needed.
+  int parameter_count = callee->shared()->formal_parameter_count();
+  if (argument_count > 0) {
+    if (parameter_count > 0) {
+      int mapped_count = Min(argument_count, parameter_count);
+      Handle<FixedArray> parameter_map =
+          isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
+      parameter_map->set_map(
+          isolate->heap()->non_strict_arguments_elements_map());
+
+      Handle<Map> old_map(result->map());
+      Handle<Map> new_map =
+          isolate->factory()->CopyMapDropTransitions(old_map);
+      new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+
+      result->set_map(*new_map);
+      result->set_elements(*parameter_map);
+
+      // Store the context and the arguments array at the beginning of the
+      // parameter map.
+      Handle<Context> context(isolate->context());
+      Handle<FixedArray> arguments =
+          isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+      parameter_map->set(0, *context);
+      parameter_map->set(1, *arguments);
+
+      // Loop over the actual parameters backwards.
+      int index = argument_count - 1;
+      while (index >= mapped_count) {
+        // These go directly in the arguments array and have no
+        // corresponding slot in the parameter map.
+        arguments->set(index, *(parameters - index - 1));
+        --index;
+      }
+
+      ScopeInfo<> scope_info(callee->shared()->scope_info());
+      while (index >= 0) {
+        // Detect duplicate names to the right in the parameter list.
+        Handle<String> name = scope_info.parameter_name(index);
+        int context_slot_count = scope_info.number_of_context_slots();
+        bool duplicate = false;
+        for (int j = index + 1; j < parameter_count; ++j) {
+          if (scope_info.parameter_name(j).is_identical_to(name)) {
+            duplicate = true;
+            break;
+          }
+        }
+
+        if (duplicate) {
+          // This goes directly in the arguments array with a hole in the
+          // parameter map.
+          arguments->set(index, *(parameters - index - 1));
+          parameter_map->set_the_hole(index + 2);
+        } else {
+          // The context index goes in the parameter map with a hole in the
+          // arguments array.
+          int context_index = -1;
+          for (int j = Context::MIN_CONTEXT_SLOTS;
+               j < context_slot_count;
+               ++j) {
+            if (scope_info.context_slot_name(j).is_identical_to(name)) {
+              context_index = j;
+              break;
+            }
+          }
+          ASSERT(context_index >= 0);
+          arguments->set_the_hole(index);
+          parameter_map->set(index + 2, Smi::FromInt(context_index));
+        }
+
+        --index;
+      }
+    } else {
+      // If there is no aliasing, the arguments object elements are not
+      // special in any way.
+      Handle<FixedArray> elements =
+          isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+      result->set_elements(*elements);
+      for (int i = 0; i < argument_count; ++i) {
+        elements->set(i, *(parameters - i - 1));
+      }
+    }
+  }
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
   JSFunction* callee = JSFunction::cast(args[0]);
   Object** parameters = reinterpret_cast<Object**>(args[1]);
-  const int length = Smi::cast(args[2])->value();
+  const int length = args.smi_at(2);
 
   Object* result;
   { MaybeObject* maybe_result =
@@ -7057,10 +7941,8 @@
   CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
   CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
 
-  // Allocate global closures in old space and allocate local closures
-  // in new space. Additionally pretenure closures that are assigned
+  // The caller ensures that we pretenure closures that are assigned
   // directly to properties.
-  pretenure = pretenure || (context->global_context() == *context);
   PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
   Handle<JSFunction> result =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -7070,8 +7952,8 @@
 }
 
 
-static SmartPointer<Object**> GetNonBoundArguments(int bound_argc,
-                                                   int* total_argc) {
+static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
+                                                        int* total_argc) {
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
@@ -7087,7 +7969,7 @@
                                             &args_slots);
 
     *total_argc = bound_argc + args_count;
-    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = args_slots[i].GetValue();
       param_data[bound_argc + i] = val.location();
@@ -7099,7 +7981,7 @@
     int args_count = frame->ComputeParametersCount();
 
     *total_argc = bound_argc + args_count;
-    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = Handle<Object>(frame->GetParameter(i));
       param_data[bound_argc + i] = val.location();
@@ -7126,7 +8008,7 @@
   }
 
   int total_argc = 0;
-  SmartPointer<Object**> param_data =
+  SmartArrayPointer<Object**> param_data =
       GetNonBoundArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
     Handle<Object> val = Handle<Object>(bound_args->get(i));
@@ -7180,7 +8062,7 @@
 
   // If function should not have prototype, construction is not allowed. In this
   // case generated code bailouts here, since function has no initial_map.
-  if (!function->should_have_prototype()) {
+  if (!function->should_have_prototype() && !function->shared()->bound()) {
     Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
     Handle<Object> type_error =
         isolate->factory()->NewTypeError("not_constructor", arguments);
@@ -7267,15 +8149,9 @@
   }
 #endif
 
-  // Compile the target function.  Here we compile using CompileLazyInLoop in
-  // order to get the optimized version.  This helps code like delta-blue
-  // that calls performance-critical routines through constructors.  A
-  // constructor call doesn't use a CallIC, it uses a LoadIC followed by a
-  // direct call.  Since the in-loop tracking takes place through CallICs
-  // this means that things called through constructors are never known to
-  // be in loops.  We compile them as if they are in loops here just in case.
+  // Compile the target function.
   ASSERT(!function->is_compiled());
-  if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
+  if (!CompileLazy(function, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -7289,16 +8165,25 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
+
+  // If the function is not compiled ignore the lazy
+  // recompilation. This can happen if the debugger is activated and
+  // the function is returned to the not compiled state.
+  if (!function->shared()->is_compiled()) {
+    function->ReplaceCode(function->shared()->code());
+    return function->code();
+  }
+
   // If the function is not optimizable or debugger is active continue using the
   // code from the full compiler.
   if (!function->shared()->code()->optimizable() ||
-      isolate->debug()->has_break_points()) {
+      isolate->DebuggerHasBreakPoints()) {
     if (FLAG_trace_opt) {
       PrintF("[failed to optimize ");
       function->PrintName();
       PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
           function->shared()->code()->optimizable() ? "T" : "F",
-          isolate->debug()->has_break_points() ? "T" : "F");
+          isolate->DebuggerHasBreakPoints() ? "T" : "F");
     }
     function->ReplaceCode(function->shared()->code());
     return function->code();
@@ -7321,7 +8206,7 @@
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(args[0]->IsSmi());
   Deoptimizer::BailoutType type =
-      static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
+      static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   ASSERT(isolate->heap()->IsAllocationAllowed());
   int frames = deoptimizer->output_count();
@@ -7352,11 +8237,8 @@
     }
   }
 
-  isolate->compilation_cache()->MarkForLazyOptimizing(function);
   if (type == Deoptimizer::EAGER) {
     RUNTIME_ASSERT(function->IsOptimized());
-  } else {
-    RUNTIME_ASSERT(!function->IsOptimized());
   }
 
   // Avoid doing too much work when running with --always-opt and keep
@@ -7375,8 +8257,6 @@
     it.Advance();
   }
 
-  // TODO(kasperl): For now, we cannot support removing the optimized
-  // code when we have recursive invocations of the same function.
   if (activations == 0) {
     if (FLAG_trace_deopt) {
       PrintF("[removing optimized code for: ");
@@ -7384,6 +8264,8 @@
       PrintF("]\n");
     }
     function->ReplaceCode(function->shared()->code());
+  } else {
+    Deoptimizer::DeoptimizeFunction(*function);
   }
   return isolate->heap()->undefined_value();
 }
@@ -7408,6 +8290,15 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
+#if defined(USE_SIMULATOR)
+  return isolate->heap()->true_value();
+#else
+  return isolate->heap()->false_value();
+#endif
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7418,13 +8309,36 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  if (!V8::UseCrankshaft()) {
+    return Smi::FromInt(4);  // 4 == "never".
+  }
+  if (FLAG_always_opt) {
+    return Smi::FromInt(3);  // 3 == "always".
+  }
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
+                                 : Smi::FromInt(2);  // 2 == "no".
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  return Smi::FromInt(function->shared()->opt_count());
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
   // We're not prepared to handle a function with arguments object.
-  ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
+  ASSERT(!function->shared()->uses_arguments());
 
   // We have hit a back edge in an unoptimized frame for a function that was
   // selected for on-stack replacement.  Find the unoptimized code object.
@@ -7532,6 +8446,55 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
+  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 5);
+  CONVERT_CHECKED(JSReceiver, fun, args[0]);
+  Object* receiver = args[1];
+  CONVERT_CHECKED(JSObject, arguments, args[2]);
+  CONVERT_CHECKED(Smi, shift, args[3]);
+  CONVERT_CHECKED(Smi, arity, args[4]);
+
+  int offset = shift->value();
+  int argc = arity->value();
+  ASSERT(offset >= 0);
+  ASSERT(argc >= 0);
+
+  // If there are too many arguments, allocate argv via malloc.
+  const int argv_small_size = 10;
+  Handle<Object> argv_small_buffer[argv_small_size];
+  SmartArrayPointer<Handle<Object> > argv_large_buffer;
+  Handle<Object>* argv = argv_small_buffer;
+  if (argc > argv_small_size) {
+    argv = new Handle<Object>[argc];
+    if (argv == NULL) return isolate->StackOverflow();
+    argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+  }
+
+  for (int i = 0; i < argc; ++i) {
+     MaybeObject* maybe = arguments->GetElement(offset + i);
+     Object* object;
+     if (!maybe->To<Object>(&object)) return maybe;
+     argv[i] = Handle<Object>(object);
+  }
+
+  bool threw = false;
+  Handle<JSReceiver> hfun(fun);
+  Handle<Object> hreceiver(receiver);
+  Handle<Object> result = Execution::Call(
+      hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
+
+  if (threw) return Failure::Exception();
+  return *result;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7548,7 +8511,7 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewContext) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -7566,50 +8529,97 @@
 }
 
 
-MUST_USE_RESULT static MaybeObject* PushContextHelper(Isolate* isolate,
-                                                      Object* object,
-                                                      bool is_catch_context) {
-  // Convert the object to a proper JavaScript object.
-  Object* js_object = object;
-  if (!js_object->IsJSObject()) {
-    MaybeObject* maybe_js_object = js_object->ToObject();
-    if (!maybe_js_object->ToObject(&js_object)) {
-      if (!Failure::cast(maybe_js_object)->IsInternalError()) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  JSObject* extension_object;
+  if (args[0]->IsJSObject()) {
+    extension_object = JSObject::cast(args[0]);
+  } else {
+    // Convert the object to a proper JavaScript object.
+    MaybeObject* maybe_js_object = args[0]->ToObject();
+    if (!maybe_js_object->To(&extension_object)) {
+      if (Failure::cast(maybe_js_object)->IsInternalError()) {
+        HandleScope scope(isolate);
+        Handle<Object> handle = args.at<Object>(0);
+        Handle<Object> result =
+            isolate->factory()->NewTypeError("with_expression",
+                                             HandleVector(&handle, 1));
+        return isolate->Throw(*result);
+      } else {
         return maybe_js_object;
       }
-      HandleScope scope(isolate);
-      Handle<Object> handle(object, isolate);
-      Handle<Object> result =
-          isolate->factory()->NewTypeError("with_expression",
-                                           HandleVector(&handle, 1));
-      return isolate->Throw(*result);
     }
   }
 
-  Object* result;
-  { MaybeObject* maybe_result = isolate->heap()->AllocateWithContext(
-      isolate->context(), JSObject::cast(js_object), is_catch_context);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+  JSFunction* function;
+  if (args[1]->IsSmi()) {
+    // A smi sentinel indicates a context nested inside global code rather
+    // than some function.  There is a canonical empty function that can be
+    // gotten from the global context.
+    function = isolate->context()->global_context()->closure();
+  } else {
+    function = JSFunction::cast(args[1]);
   }
 
-  Context* context = Context::cast(result);
+  Context* context;
+  MaybeObject* maybe_context =
+      isolate->heap()->AllocateWithContext(function,
+                                           isolate->context(),
+                                           extension_object);
+  if (!maybe_context->To(&context)) return maybe_context;
   isolate->set_context(context);
-
-  return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushContext) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
-  return PushContextHelper(isolate, args[0], false);
+  return context;
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
-  return PushContextHelper(isolate, args[0], true);
+  ASSERT(args.length() == 3);
+  String* name = String::cast(args[0]);
+  Object* thrown_object = args[1];
+  JSFunction* function;
+  if (args[2]->IsSmi()) {
+    // A smi sentinel indicates a context nested inside global code rather
+    // than some function.  There is a canonical empty function that can be
+    // gotten from the global context.
+    function = isolate->context()->global_context()->closure();
+  } else {
+    function = JSFunction::cast(args[2]);
+  }
+  Context* context;
+  MaybeObject* maybe_context =
+      isolate->heap()->AllocateCatchContext(function,
+                                            isolate->context(),
+                                            name,
+                                            thrown_object);
+  if (!maybe_context->To(&context)) return maybe_context;
+  isolate->set_context(context);
+  return context;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
+  JSFunction* function;
+  if (args[1]->IsSmi()) {
+    // A smi sentinel indicates a context nested inside global code rather
+    // than some function.  There is a canonical empty function that can be
+    // gotten from the global context.
+    function = isolate->context()->global_context()->closure();
+  } else {
+    function = JSFunction::cast(args[1]);
+  }
+  Context* context;
+  MaybeObject* maybe_context =
+      isolate->heap()->AllocateBlockContext(function,
+                                            isolate->context(),
+                                            scope_info);
+  if (!maybe_context->To(&context)) return maybe_context;
+  isolate->set_context(context);
+  return context;
 }
 
 
@@ -7623,7 +8633,12 @@
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = FOLLOW_CHAINS;
-  Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
+  BindingFlags binding_flags;
+  Handle<Object> holder = context->Lookup(name,
+                                          flags,
+                                          &index,
+                                          &attributes,
+                                          &binding_flags);
 
   // If the slot was not found the result is true.
   if (holder.is_null()) {
@@ -7644,9 +8659,9 @@
   // index is non-negative.
   Handle<JSObject> object = Handle<JSObject>::cast(holder);
   if (index >= 0) {
-    return object->DeleteElement(index, JSObject::NORMAL_DELETION);
+    return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
   } else {
-    return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
+    return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
   }
 }
 
@@ -7689,8 +8704,8 @@
 }
 
 
-static JSObject* ComputeReceiverForNonGlobal(Isolate* isolate,
-                                             JSObject* holder) {
+static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
+                                           JSObject* holder) {
   ASSERT(!holder->IsGlobalObject());
   Context* top = isolate->context();
   // Get the context extension function.
@@ -7702,10 +8717,11 @@
   // explicitly via a with-statement.
   Object* constructor = holder->map()->constructor();
   if (constructor != context_extension_function) return holder;
-  // Fall back to using the global object as the receiver if the
-  // property turns out to be a local variable allocated in a context
-  // extension object - introduced via eval.
-  return top->global()->global_receiver();
+  // Fall back to using the global object as the implicit receiver if
+  // the property turns out to be a local variable allocated in a
+  // context extension object - introduced via eval. Implicit global
+  // receivers are indicated with the hole value.
+  return isolate->heap()->the_hole_value();
 }
 
 
@@ -7724,7 +8740,12 @@
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = FOLLOW_CHAINS;
-  Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
+  BindingFlags binding_flags;
+  Handle<Object> holder = context->Lookup(name,
+                                          flags,
+                                          &index,
+                                          &attributes,
+                                          &binding_flags);
 
   // If the index is non-negative, the slot has been found in a local
   // variable or a parameter. Read it from the context object or the
@@ -7733,30 +8754,48 @@
     // If the "property" we were looking for is a local variable or an
     // argument in a context, the receiver is the global object; see
     // ECMA-262, 3rd., 10.1.6 and 10.2.3.
-    JSObject* receiver =
-        isolate->context()->global()->global_receiver();
+    //
+    // Use the hole as the receiver to signal that the receiver is
+    // implicit and that the global receiver should be used.
+    Handle<Object> receiver = isolate->factory()->the_hole_value();
     MaybeObject* value = (holder->IsContext())
         ? Context::cast(*holder)->get(index)
         : JSObject::cast(*holder)->GetElement(index);
-    return MakePair(Unhole(isolate->heap(), value, attributes), receiver);
+    // Check for uninitialized bindings.
+    if (holder->IsContext() &&
+        binding_flags == MUTABLE_CHECK_INITIALIZED &&
+        value->IsTheHole()) {
+      Handle<Object> reference_error =
+          isolate->factory()->NewReferenceError("not_defined",
+                                                HandleVector(&name, 1));
+      return MakePair(isolate->Throw(*reference_error), NULL);
+    } else {
+      return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+    }
   }
 
   // If the holder is found, we read the property from it.
   if (!holder.is_null() && holder->IsJSObject()) {
     ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
     JSObject* object = JSObject::cast(*holder);
-    JSObject* receiver;
+    Object* receiver;
     if (object->IsGlobalObject()) {
       receiver = GlobalObject::cast(object)->global_receiver();
     } else if (context->is_exception_holder(*holder)) {
-      receiver = isolate->context()->global()->global_receiver();
+      // Use the hole as the receiver to signal that the receiver is
+      // implicit and that the global receiver should be used.
+      receiver = isolate->heap()->the_hole_value();
     } else {
       receiver = ComputeReceiverForNonGlobal(isolate, object);
     }
+
+    // GetProperty below can cause GC.
+    Handle<Object> receiver_handle(receiver);
+
     // No need to unhole the value here. This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
-    return MakePair(value, receiver);
+    return MakePair(value, *receiver_handle);
   }
 
   if (throw_error) {
@@ -7766,7 +8805,7 @@
                                               HandleVector(&name, 1));
     return MakePair(isolate->Throw(*reference_error), NULL);
   } else {
-    // The property doesn't exist - return undefined
+    // The property doesn't exist - return undefined.
     return MakePair(isolate->heap()->undefined_value(),
                     isolate->heap()->undefined_value());
   }
@@ -7790,7 +8829,7 @@
   Handle<Object> value(args[0], isolate);
   CONVERT_ARG_CHECKED(Context, context, 1);
   CONVERT_ARG_CHECKED(String, name, 2);
-  CONVERT_SMI_CHECKED(strict_unchecked, args[3]);
+  CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
   RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
                  strict_unchecked == kNonStrictMode);
   StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
@@ -7798,14 +8837,27 @@
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = FOLLOW_CHAINS;
-  Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
+  BindingFlags binding_flags;
+  Handle<Object> holder = context->Lookup(name,
+                                          flags,
+                                          &index,
+                                          &attributes,
+                                          &binding_flags);
 
   if (index >= 0) {
     if (holder->IsContext()) {
+      Handle<Context> context = Handle<Context>::cast(holder);
+      if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+          context->get(index)->IsTheHole()) {
+        Handle<Object> error =
+            isolate->factory()->NewReferenceError("not_defined",
+                                                  HandleVector(&name, 1));
+        return isolate->Throw(*error);
+      }
       // Ignore if read_only variable.
       if ((attributes & READ_ONLY) == 0) {
         // Context is a fixed array and set cannot fail.
-        Context::cast(*holder)->set(index, *value);
+        context->set(index, *value);
       } else if (strict_mode == kStrictMode) {
         // Setting read only property in strict mode.
         Handle<Object> error =
@@ -7919,7 +8971,7 @@
 static void PrintString(String* str) {
   // not uncommon to have empty strings
   if (str->length() > 0) {
-    SmartPointer<char> s =
+    SmartArrayPointer<char> s =
         str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
     PrintF("%s", *s);
   }
@@ -8081,13 +9133,14 @@
   FixedArray* output_array = FixedArray::cast(output->elements());
   RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
   bool result;
-  if (str->IsAsciiRepresentation()) {
-    result = DateParser::Parse(str->ToAsciiVector(),
+  String::FlatContent str_content = str->GetFlatContent();
+  if (str_content.IsAscii()) {
+    result = DateParser::Parse(str_content.ToAsciiVector(),
                                output_array,
                                isolate->unicode_cache());
   } else {
-    ASSERT(str->IsTwoByteRepresentation());
-    result = DateParser::Parse(str->ToUC16Vector(),
+    ASSERT(str_content.IsTwoByte());
+    result = DateParser::Parse(str_content.ToUC16Vector(),
                                output_array,
                                isolate->unicode_cache());
   }
@@ -8104,7 +9157,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   const char* zone = OS::LocalTimezone(x);
   return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
 }
@@ -8122,7 +9175,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
 }
 
@@ -8140,7 +9193,14 @@
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
 
-  Handle<Object> result = JsonParser::Parse(source);
+  source = Handle<String>(source->TryFlattenGetString());
+  // Optimized fast case where we only have ascii characters.
+  Handle<Object> result;
+  if (source->IsSeqAsciiString()) {
+    result = JsonParser<true>::Parse(source);
+  } else {
+    result = JsonParser<false>::Parse(source);
+  }
   if (result.is_null()) {
     // Syntax error or stack overflow in scanner.
     ASSERT(isolate->has_pending_exception());
@@ -8150,13 +9210,41 @@
 }
 
 
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+                                      Handle<Context> context) {
+  if (context->allow_code_gen_from_strings()->IsFalse()) {
+    // Check with callback if set.
+    AllowCodeGenerationFromStringsCallback callback =
+        isolate->allow_code_gen_callback();
+    if (callback == NULL) {
+      // No callback set and code generation disallowed.
+      return false;
+    } else {
+      // Callback set. Let it decide if code generation is allowed.
+      VMState state(isolate, EXTERNAL);
+      return callback(v8::Utils::ToLocal(context));
+    }
+  }
+  return true;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
 
-  // Compile source string in the global context.
+  // Extract global context.
   Handle<Context> context(isolate->context()->global_context());
+
+  // Check if global context allows code generation from
+  // strings. Throw an exception if it doesn't.
+  if (!CodeGenerationFromStringsAllowed(isolate, context)) {
+    return isolate->Throw(*isolate->factory()->NewError(
+        "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+  }
+
+  // Compile source string in the global context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
                                                             context,
                                                             true,
@@ -8174,17 +9262,28 @@
                                     Handle<String> source,
                                     Handle<Object> receiver,
                                     StrictModeFlag strict_mode) {
+  Handle<Context> context = Handle<Context>(isolate->context());
+  Handle<Context> global_context = Handle<Context>(context->global_context());
+
+  // Check if global context allows code generation from
+  // strings. Throw an exception if it doesn't.
+  if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
+    isolate->Throw(*isolate->factory()->NewError(
+        "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+    return MakePair(Failure::Exception(), NULL);
+  }
+
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(isolate->context()),
-      isolate->context()->IsGlobalContext(),
+      context->IsGlobalContext(),
       strict_mode);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          shared, Handle<Context>(isolate->context()), NOT_TENURED);
+          shared, context, NOT_TENURED);
   return MakePair(*compiled, *receiver);
 }
 
@@ -8210,19 +9309,17 @@
   // it is bound in the global context.
   int index = -1;
   PropertyAttributes attributes = ABSENT;
+  BindingFlags binding_flags;
   while (true) {
     receiver = context->Lookup(isolate->factory()->eval_symbol(),
                                FOLLOW_PROTOTYPE_CHAIN,
-                               &index, &attributes);
+                               &index,
+                               &attributes,
+                               &binding_flags);
     // Stop search when eval is found or when the global context is
     // reached.
     if (attributes != ABSENT || context->IsGlobalContext()) break;
-    if (context->is_function_context()) {
-      context = Handle<Context>(Context::cast(context->closure()->context()),
-                                isolate);
-    } else {
-      context = Handle<Context>(context->previous(), isolate);
-    }
+    context = Handle<Context>(context->previous(), isolate);
   }
 
   // If eval could not be resolved, it has been deleted and we need to
@@ -8238,12 +9335,8 @@
   if (!context->IsGlobalContext()) {
     // 'eval' is not bound in the global context. Just call the function
     // with the given arguments. This is not necessarily the global eval.
-    if (receiver->IsContext()) {
-      context = Handle<Context>::cast(receiver);
-      receiver = Handle<Object>(context->get(index), isolate);
-    } else if (receiver->IsJSContextExtensionObject()) {
-      receiver = Handle<JSObject>(
-          isolate->context()->global()->global_receiver(), isolate);
+    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
+      receiver = isolate->factory()->the_hole_value();
     }
     return MakePair(*callee, *receiver);
   }
@@ -8252,16 +9345,14 @@
   // Compare it to the builtin 'GlobalEval' function to make sure.
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
-    return MakePair(*callee,
-                    isolate->context()->global()->global_receiver());
+    return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
   ASSERT(args[3]->IsSmi());
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           static_cast<StrictModeFlag>(
-                                Smi::cast(args[3])->value()));
+                           static_cast<StrictModeFlag>(args.smi_at(3)));
 }
 
 
@@ -8275,16 +9366,14 @@
   // Compare it to the builtin 'GlobalEval' function to make sure.
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
-    return MakePair(*callee,
-                    isolate->context()->global()->global_receiver());
+    return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
   ASSERT(args[3]->IsSmi());
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           static_cast<StrictModeFlag>(
-                                Smi::cast(args[3])->value()));
+                           static_cast<StrictModeFlag>(args.smi_at(3)));
 }
 
 
@@ -8345,8 +9434,8 @@
   }
   Object* obj;
   // Strict not needed. Used for cycle detection in Array join implementation.
-  { MaybeObject* maybe_obj = array->SetFastElement(length, element,
-                                                   kNonStrictMode);
+  { MaybeObject* maybe_obj =
+        array->SetFastElement(length, element, kNonStrictMode, true);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   return isolate->heap()->true_value();
@@ -8396,8 +9485,9 @@
       // Fall-through to dictionary mode.
     }
     ASSERT(!fast_elements_);
-    Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
-    Handle<NumberDictionary> result =
+    Handle<SeededNumberDictionary> dict(
+        SeededNumberDictionary::cast(*storage_));
+    Handle<SeededNumberDictionary> result =
         isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
     if (!result.is_identical_to(dict)) {
       // Dictionary needed to grow.
@@ -8435,14 +9525,15 @@
   void SetDictionaryMode(uint32_t index) {
     ASSERT(fast_elements_);
     Handle<FixedArray> current_storage(*storage_);
-    Handle<NumberDictionary> slow_storage(
-        isolate_->factory()->NewNumberDictionary(current_storage->length()));
+    Handle<SeededNumberDictionary> slow_storage(
+        isolate_->factory()->NewSeededNumberDictionary(
+            current_storage->length()));
     uint32_t current_length = static_cast<uint32_t>(current_storage->length());
     for (uint32_t i = 0; i < current_length; i++) {
       HandleScope loop_scope;
       Handle<Object> element(current_storage->get(i));
       if (!element->IsTheHole()) {
-        Handle<NumberDictionary> new_storage =
+        Handle<SeededNumberDictionary> new_storage =
           isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
         if (!new_storage.is_identical_to(slow_storage)) {
           slow_storage = loop_scope.CloseAndEscape(new_storage);
@@ -8477,7 +9568,7 @@
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
-    case JSObject::FAST_ELEMENTS: {
+    case FAST_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
       ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
@@ -8488,9 +9579,9 @@
       }
       break;
     }
-    case JSObject::DICTIONARY_ELEMENTS: {
-      Handle<NumberDictionary> dictionary(
-          NumberDictionary::cast(array->elements()));
+    case DICTIONARY_ELEMENTS: {
+      Handle<SeededNumberDictionary> dictionary(
+          SeededNumberDictionary::cast(array->elements()));
       int capacity = dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Handle<Object> key(dictionary->KeyAt(i));
@@ -8526,13 +9617,13 @@
     if (elements_are_guaranteed_smis) {
       for (uint32_t j = 0; j < len; j++) {
         HandleScope loop_scope;
-        Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
+        Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))));
         visitor->visit(j, e);
       }
     } else {
       for (uint32_t j = 0; j < len; j++) {
         HandleScope loop_scope;
-        int64_t val = static_cast<int64_t>(array->get(j));
+        int64_t val = static_cast<int64_t>(array->get_scalar(j));
         if (Smi::IsValid(static_cast<intptr_t>(val))) {
           Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
           visitor->visit(j, e);
@@ -8546,7 +9637,7 @@
   } else {
     for (uint32_t j = 0; j < len; j++) {
       HandleScope loop_scope(isolate);
-      Handle<Object> e = isolate->factory()->NewNumber(array->get(j));
+      Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
       visitor->visit(j, e);
     }
   }
@@ -8564,9 +9655,9 @@
 static void CollectElementIndices(Handle<JSObject> object,
                                   uint32_t range,
                                   List<uint32_t>* indices) {
-  JSObject::ElementsKind kind = object->GetElementsKind();
+  ElementsKind kind = object->GetElementsKind();
   switch (kind) {
-    case JSObject::FAST_ELEMENTS: {
+    case FAST_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(object->elements()));
       uint32_t length = static_cast<uint32_t>(elements->length());
       if (range < length) length = range;
@@ -8577,8 +9668,9 @@
       }
       break;
     }
-    case JSObject::DICTIONARY_ELEMENTS: {
-      Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
+    case DICTIONARY_ELEMENTS: {
+      Handle<SeededNumberDictionary> dict(
+          SeededNumberDictionary::cast(object->elements()));
       uint32_t capacity = dict->Capacity();
       for (uint32_t j = 0; j < capacity; j++) {
         HandleScope loop_scope;
@@ -8596,44 +9688,49 @@
     default: {
       int dense_elements_length;
       switch (kind) {
-        case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
-        dense_elements_length =
-            ExternalPixelArray::cast(object->elements())->length();
+        case EXTERNAL_PIXEL_ELEMENTS: {
+          dense_elements_length =
+              ExternalPixelArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_BYTE_ELEMENTS: {
-        dense_elements_length =
-            ExternalByteArray::cast(object->elements())->length();
+        case EXTERNAL_BYTE_ELEMENTS: {
+          dense_elements_length =
+              ExternalByteArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedByteArray::cast(object->elements())->length();
+        case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+          dense_elements_length =
+              ExternalUnsignedByteArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_SHORT_ELEMENTS: {
-        dense_elements_length =
-            ExternalShortArray::cast(object->elements())->length();
+        case EXTERNAL_SHORT_ELEMENTS: {
+          dense_elements_length =
+              ExternalShortArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedShortArray::cast(object->elements())->length();
+        case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+          dense_elements_length =
+              ExternalUnsignedShortArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_INT_ELEMENTS: {
-        dense_elements_length =
-            ExternalIntArray::cast(object->elements())->length();
+        case EXTERNAL_INT_ELEMENTS: {
+          dense_elements_length =
+              ExternalIntArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedIntArray::cast(object->elements())->length();
+        case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+          dense_elements_length =
+              ExternalUnsignedIntArray::cast(object->elements())->length();
           break;
         }
-        case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
-        dense_elements_length =
-            ExternalFloatArray::cast(object->elements())->length();
+        case EXTERNAL_FLOAT_ELEMENTS: {
+          dense_elements_length =
+              ExternalFloatArray::cast(object->elements())->length();
+          break;
+        }
+        case EXTERNAL_DOUBLE_ELEMENTS: {
+          dense_elements_length =
+              ExternalDoubleArray::cast(object->elements())->length();
           break;
         }
         default:
@@ -8680,7 +9777,7 @@
                             ArrayConcatVisitor* visitor) {
   uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
   switch (receiver->GetElementsKind()) {
-    case JSObject::FAST_ELEMENTS: {
+    case FAST_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
       Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
@@ -8701,8 +9798,8 @@
       }
       break;
     }
-    case JSObject::DICTIONARY_ELEMENTS: {
-      Handle<NumberDictionary> dict(receiver->element_dictionary());
+    case DICTIONARY_ELEMENTS: {
+      Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
       // Collect all indices in the object and the prototypes less
       // than length. This might introduce duplicates in the indices list.
@@ -8723,50 +9820,55 @@
       }
       break;
     }
-    case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+    case EXTERNAL_PIXEL_ELEMENTS: {
       Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
           receiver->elements()));
       for (uint32_t j = 0; j < length; j++) {
-        Handle<Smi> e(Smi::FromInt(pixels->get(j)));
+        Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)));
         visitor->visit(j, e);
       }
       break;
     }
-    case JSObject::EXTERNAL_BYTE_ELEMENTS: {
+    case EXTERNAL_BYTE_ELEMENTS: {
       IterateExternalArrayElements<ExternalByteArray, int8_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
-    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
       IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
-    case JSObject::EXTERNAL_SHORT_ELEMENTS: {
+    case EXTERNAL_SHORT_ELEMENTS: {
       IterateExternalArrayElements<ExternalShortArray, int16_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
-    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
       IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
-    case JSObject::EXTERNAL_INT_ELEMENTS: {
+    case EXTERNAL_INT_ELEMENTS: {
       IterateExternalArrayElements<ExternalIntArray, int32_t>(
           isolate, receiver, true, false, visitor);
       break;
     }
-    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
       IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
           isolate, receiver, true, false, visitor);
       break;
     }
-    case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS: {
       IterateExternalArrayElements<ExternalFloatArray, float>(
           isolate, receiver, false, false, visitor);
       break;
     }
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      IterateExternalArrayElements<ExternalDoubleArray, double>(
+          isolate, receiver, false, false, visitor);
+      break;
+    }
     default:
       UNREACHABLE();
       break;
@@ -8846,7 +9948,7 @@
     uint32_t at_least_space_for = estimate_nof_elements +
                                   (estimate_nof_elements >> 2);
     storage = Handle<FixedArray>::cast(
-        isolate->factory()->NewNumberDictionary(at_least_space_for));
+        isolate->factory()->NewSeededNumberDictionary(at_least_space_for));
   }
 
   ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -8901,11 +10003,14 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, from, args[0]);
   CONVERT_CHECKED(JSArray, to, args[1]);
-  HeapObject* new_elements = from->elements();
+  FixedArrayBase* new_elements = from->elements();
   MaybeObject* maybe_new_map;
   if (new_elements->map() == isolate->heap()->fixed_array_map() ||
       new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
     maybe_new_map = to->map()->GetFastElementsMap();
+  } else if (new_elements->map() ==
+             isolate->heap()->fixed_double_array_map()) {
+    maybe_new_map = to->map()->GetFastDoubleElementsMap();
   } else {
     maybe_new_map = to->map()->GetSlowElementsMap();
   }
@@ -8929,7 +10034,8 @@
   CONVERT_CHECKED(JSObject, object, args[0]);
   HeapObject* elements = object->elements();
   if (elements->IsDictionary()) {
-    return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
+    int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
+    return Smi::FromInt(result);
   } else if (object->IsJSArray()) {
     return JSArray::cast(object)->length();
   } else {
@@ -8993,12 +10099,13 @@
     }
     return *isolate->factory()->NewJSArrayWithElements(keys);
   } else {
-    ASSERT(array->HasFastElements());
+    ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
     Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
     // -1 means start of array.
     single_interval->set(0, Smi::FromInt(-1));
+    FixedArrayBase* elements = FixedArrayBase::cast(array->elements());
     uint32_t actual_length =
-        static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
+        static_cast<uint32_t>(elements->length());
     uint32_t min_length = actual_length < length ? actual_length : length;
     Handle<Object> length_object =
         isolate->factory()->NewNumber(static_cast<double>(min_length));
@@ -9110,7 +10217,7 @@
       return result->GetConstantFunction();
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
-      if (structure->IsProxy() || structure->IsAccessorInfo()) {
+      if (structure->IsForeign() || structure->IsAccessorInfo()) {
         MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
             receiver, structure, name, result->holder());
         if (!maybe_value->ToObject(&value)) {
@@ -9130,7 +10237,7 @@
     }
     case INTERCEPTOR:
     case MAP_TRANSITION:
-    case EXTERNAL_ARRAY_TRANSITION:
+    case ELEMENTS_TRANSITION:
     case CONSTANT_TRANSITION:
     case NULL_DESCRIPTOR:
       return heap->undefined_value();
@@ -9236,9 +10343,7 @@
       details->set(0, *value);
       details->set(1, property_details);
       if (hasJavaScriptAccessors) {
-        details->set(2,
-                     caught_exception ? isolate->heap()->true_value()
-                                      : isolate->heap()->false_value());
+        details->set(2, isolate->heap()->ToBoolean(caught_exception));
         details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
         details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
       }
@@ -9362,11 +10467,79 @@
     // If there is no JavaScript stack frame count is 0.
     return Smi::FromInt(0);
   }
-  for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
+
+  for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
+    n += it.frame()->GetInlineCount();
+  }
   return Smi::FromInt(n);
 }
 
 
+class FrameInspector {
+ public:
+  FrameInspector(JavaScriptFrame* frame,
+                 int inlined_frame_index,
+                 Isolate* isolate)
+      : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
+    // Calculate the deoptimized frame.
+    if (frame->is_optimized()) {
+      deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
+          frame, inlined_frame_index, isolate);
+    }
+    has_adapted_arguments_ = frame_->has_adapted_arguments();
+    is_optimized_ = frame_->is_optimized();
+  }
+
+  ~FrameInspector() {
+    // Get rid of the calculated deoptimized frame if any.
+    if (deoptimized_frame_ != NULL) {
+      Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_,
+                                                  isolate_);
+    }
+  }
+
+  int GetParametersCount() {
+    return is_optimized_
+        ? deoptimized_frame_->parameters_count()
+        : frame_->ComputeParametersCount();
+  }
+  int expression_count() { return deoptimized_frame_->expression_count(); }
+  Object* GetFunction() {
+    return is_optimized_
+        ? deoptimized_frame_->GetFunction()
+        : frame_->function();
+  }
+  Object* GetParameter(int index) {
+    return is_optimized_
+        ? deoptimized_frame_->GetParameter(index)
+        : frame_->GetParameter(index);
+  }
+  Object* GetExpression(int index) {
+    return is_optimized_
+        ? deoptimized_frame_->GetExpression(index)
+        : frame_->GetExpression(index);
+  }
+
+  // To inspect all the provided arguments the frame might need to be
+  // replaced with the arguments frame.
+  void SetArgumentsFrame(JavaScriptFrame* frame) {
+    ASSERT(has_adapted_arguments_);
+    frame_ = frame;
+    is_optimized_ = frame_->is_optimized();
+    ASSERT(!is_optimized_);
+  }
+
+ private:
+  JavaScriptFrame* frame_;
+  DeoptimizedFrameInfo* deoptimized_frame_;
+  Isolate* isolate_;
+  bool is_optimized_;
+  bool has_adapted_arguments_;
+
+  DISALLOW_COPY_AND_ASSIGN(FrameInspector);
+};
+
+
 static const int kFrameDetailsFrameIdIndex = 0;
 static const int kFrameDetailsReceiverIndex = 1;
 static const int kFrameDetailsFunctionIndex = 2;
@@ -9375,7 +10548,7 @@
 static const int kFrameDetailsSourcePositionIndex = 5;
 static const int kFrameDetailsConstructCallIndex = 6;
 static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsDebuggerFrameIndex = 8;
+static const int kFrameDetailsFlagsIndex = 8;
 static const int kFrameDetailsFirstDynamicIndex = 9;
 
 // Return an array with frame details
@@ -9391,7 +10564,7 @@
 // 5: Source position
 // 6: Constructor call
 // 7: Is at return
-// 8: Debugger frame
+// 8: Flags
 // Arguments name, value
 // Locals name, value
 // Return value if any
@@ -9414,16 +10587,22 @@
     // If there are no JavaScript stack frames return undefined.
     return heap->undefined_value();
   }
+
+  int inlined_frame_index = 0;  // Inlined frame index in optimized frame.
+
   int count = 0;
   JavaScriptFrameIterator it(isolate, id);
   for (; !it.done(); it.Advance()) {
-    if (count == index) break;
-    count++;
+    if (index < count + it.frame()->GetInlineCount()) break;
+    count += it.frame()->GetInlineCount();
   }
   if (it.done()) return heap->undefined_value();
 
-  bool is_optimized_frame =
-      it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
+  if (it.frame()->is_optimized()) {
+    inlined_frame_index =
+        it.frame()->GetInlineCount() - (index - count) - 1;
+  }
+  FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -9440,17 +10619,18 @@
   int position =
       it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
 
-  // Check for constructor frame.
-  bool constructor = it.frame()->IsConstructor();
+  // Check for constructor frame. Inlined frames cannot be construct calls.
+  bool inlined_frame =
+      it.frame()->is_optimized() && inlined_frame_index != 0;
+  bool constructor = !inlined_frame && it.frame()->IsConstructor();
 
   // Get scope info and read from it for local variable information.
   Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
-  Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+  Handle<SharedFunctionInfo> shared(function->shared());
+  Handle<SerializedScopeInfo> scope_info(shared->scope_info());
+  ASSERT(*scope_info != SerializedScopeInfo::Empty());
   ScopeInfo<> info(*scope_info);
 
-  // Get the context.
-  Handle<Context> context(Context::cast(it.frame()->context()));
-
   // Get the locals names and values into a temporary array.
   //
   // TODO(1240907): Hide compiler-introduced stack variables
@@ -9459,31 +10639,20 @@
   Handle<FixedArray> locals =
       isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
 
-  // Fill in the names of the locals.
-  for (int i = 0; i < info.NumberOfLocals(); i++) {
-    locals->set(i * 2, *info.LocalName(i));
-  }
-
   // Fill in the values of the locals.
-  for (int i = 0; i < info.NumberOfLocals(); i++) {
-    if (is_optimized_frame) {
-      // If we are inspecting an optimized frame use undefined as the
-      // value for all locals.
-      //
-      // TODO(1140): We should be able to get the correct values
-      // for locals in optimized frames.
-      locals->set(i * 2 + 1, isolate->heap()->undefined_value());
-    } else if (i < info.number_of_stack_slots()) {
-      // Get the value from the stack.
-      locals->set(i * 2 + 1, it.frame()->GetExpression(i));
-    } else {
-      // Traverse the context chain to the function context as all local
-      // variables stored in the context will be on the function context.
+  int i = 0;
+  for (; i < info.number_of_stack_slots(); ++i) {
+    // Use the value from the stack.
+    locals->set(i * 2, *info.LocalName(i));
+    locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
+  }
+  if (i < info.NumberOfLocals()) {
+    // Get the context containing declarations.
+    Handle<Context> context(
+        Context::cast(it.frame()->context())->declaration_context());
+    for (; i < info.NumberOfLocals(); ++i) {
       Handle<String> name = info.LocalName(i);
-      while (!context->is_function_context()) {
-        context = Handle<Context>(context->previous());
-      }
-      ASSERT(context->is_function_context());
+      locals->set(i * 2, *name);
       locals->set(i * 2 + 1,
                   context->get(scope_info->ContextSlotIndex(*name, NULL)));
     }
@@ -9492,7 +10661,7 @@
   // Check whether this frame is positioned at return. If not top
   // frame or if the frame is optimized it cannot be at a return.
   bool at_return = false;
-  if (!is_optimized_frame && index == 0) {
+  if (!it.frame()->is_optimized() && index == 0) {
     at_return = isolate->debug()->IsBreakAtReturn(it.frame());
   }
 
@@ -9532,14 +10701,22 @@
   // the provided parameters whereas the function frame always have the number
   // of arguments matching the functions parameters. The rest of the
   // information (except for what is collected above) is the same.
-  it.AdvanceToArgumentsFrame();
+  if (it.frame()->has_adapted_arguments()) {
+    it.AdvanceToArgumentsFrame();
+    frame_inspector.SetArgumentsFrame(it.frame());
+  }
 
   // Find the number of arguments to fill. At least fill the number of
   // parameters for the function and fill more if more parameters are provided.
   int argument_count = info.number_of_parameters();
-  if (argument_count < it.frame()->ComputeParametersCount()) {
-    argument_count = it.frame()->ComputeParametersCount();
+  if (argument_count < frame_inspector.GetParametersCount()) {
+    argument_count = frame_inspector.GetParametersCount();
   }
+#ifdef DEBUG
+  if (it.frame()->is_optimized()) {
+    ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
+  }
+#endif
 
   // Calculate the size of the result.
   int details_size = kFrameDetailsFirstDynamicIndex +
@@ -9551,7 +10728,7 @@
   details->set(kFrameDetailsFrameIdIndex, *frame_id);
 
   // Add the function (same as in function frame).
-  details->set(kFrameDetailsFunctionIndex, it.frame()->function());
+  details->set(kFrameDetailsFunctionIndex, frame_inspector.GetFunction());
 
   // Add the arguments count.
   details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
@@ -9573,10 +10750,19 @@
   // Add the at return information.
   details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
 
-  // Add information on whether this frame is invoked in the debugger context.
-  details->set(kFrameDetailsDebuggerFrameIndex,
-               heap->ToBoolean(*save->context() ==
-                   *isolate->debug()->debug_context()));
+  // Add flags to indicate information on whether this frame is
+  //   bit 0: invoked in the debugger context.
+  //   bit 1: optimized frame.
+  //   bit 2: inlined in optimized frame
+  int flags = 0;
+  if (*save->context() == *isolate->debug()->debug_context()) {
+    flags |= 1 << 0;
+  }
+  if (it.frame()->is_optimized()) {
+    flags |= 1 << 1;
+    flags |= inlined_frame_index << 2;
+  }
+  details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
 
   // Fill the dynamic part.
   int details_index = kFrameDetailsFirstDynamicIndex;
@@ -9590,14 +10776,10 @@
       details->set(details_index++, heap->undefined_value());
     }
 
-    // Parameter value. If we are inspecting an optimized frame, use
-    // undefined as the value.
-    //
-    // TODO(3141533): We should be able to get the actual parameter
-    // value for optimized frames.
-    if (!is_optimized_frame &&
-        (i < it.frame()->ComputeParametersCount())) {
-      details->set(details_index++, it.frame()->GetParameter(i));
+    // Parameter value.
+    if (i < it.frame()->ComputeParametersCount()) {
+      // Get the value from the stack.
+      details->set(details_index++, frame_inspector.GetParameter(i));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
@@ -9617,10 +10799,11 @@
   // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
   // THE FRAME ITERATOR TO WRAP THE RECEIVER.
   Handle<Object> receiver(it.frame()->receiver(), isolate);
-  if (!receiver->IsJSObject()) {
-    // If the receiver is NOT a JSObject we have hit an optimization
-    // where a value object is not converted into a wrapped JS objects.
-    // To hide this optimization from the debugger, we wrap the receiver
+  if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
+    // If the receiver is not a JSObject and the function is not a
+    // builtin or strict-mode we have hit an optimization where a
+    // value object is not converted into a wrapped JS objects. To
+    // hide this optimization from the debugger, we wrap the receiver
     // by creating correct wrapper object based on the calling frame's
     // global context.
     it.Advance();
@@ -9650,18 +10833,14 @@
     int context_index = serialized_scope_info->ContextSlotIndex(
         *scope_info.context_slot_name(i), NULL);
 
-    // Don't include the arguments shadow (.arguments) context variable.
-    if (*scope_info.context_slot_name(i) !=
-        isolate->heap()->arguments_shadow_symbol()) {
-      RETURN_IF_EMPTY_HANDLE_VALUE(
-          isolate,
-          SetProperty(scope_object,
-                      scope_info.context_slot_name(i),
-                      Handle<Object>(context->get(context_index), isolate),
-                      NONE,
-                      kNonStrictMode),
-          false);
-    }
+    RETURN_IF_EMPTY_HANDLE_VALUE(
+        isolate,
+        SetProperty(scope_object,
+                    scope_info.context_slot_name(i),
+                    Handle<Object>(context->get(context_index), isolate),
+                    NONE,
+                    kNonStrictMode),
+        false);
   }
 
   return true;
@@ -9670,12 +10849,15 @@
 
 // Create a plain JSObject which materializes the local scope for the specified
 // frame.
-static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
-                                              JavaScriptFrame* frame) {
+static Handle<JSObject> MaterializeLocalScope(
+    Isolate* isolate,
+    JavaScriptFrame* frame,
+    int inlined_frame_index) {
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
   Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
   ScopeInfo<> scope_info(*serialized_scope_info);
+  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -9688,55 +10870,58 @@
         isolate,
         SetProperty(local_scope,
                     scope_info.parameter_name(i),
-                    Handle<Object>(frame->GetParameter(i), isolate),
+                    Handle<Object>(frame_inspector.GetParameter(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
   // Second fill all stack locals.
-  for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+  for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
                     scope_info.stack_slot_name(i),
-                    Handle<Object>(frame->GetExpression(i), isolate),
+                    Handle<Object>(frame_inspector.GetExpression(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
-  // Third fill all context locals.
-  Handle<Context> frame_context(Context::cast(frame->context()));
-  Handle<Context> function_context(frame_context->fcontext());
-  if (!CopyContextLocalsToScopeObject(isolate,
-                                      serialized_scope_info, scope_info,
-                                      function_context, local_scope)) {
-    return Handle<JSObject>();
-  }
+  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+    // Third fill all context locals.
+    Handle<Context> frame_context(Context::cast(frame->context()));
+    Handle<Context> function_context(frame_context->declaration_context());
+    if (!CopyContextLocalsToScopeObject(isolate,
+                                        serialized_scope_info, scope_info,
+                                        function_context, local_scope)) {
+      return Handle<JSObject>();
+    }
 
-  // Finally copy any properties from the function context extension. This will
-  // be variables introduced by eval.
-  if (function_context->closure() == *function) {
-    if (function_context->has_extension() &&
-        !function_context->IsGlobalContext()) {
-      Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-      Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
-      for (int i = 0; i < keys->length(); i++) {
-        // Names of variables introduced by eval are strings.
-        ASSERT(keys->get(i)->IsString());
-        Handle<String> key(String::cast(keys->get(i)));
-        RETURN_IF_EMPTY_HANDLE_VALUE(
-            isolate,
-            SetProperty(local_scope,
-                        key,
-                        GetProperty(ext, key),
-                        NONE,
-                        kNonStrictMode),
-            Handle<JSObject>());
+    // Finally copy any properties from the function context extension.
+    // These will be variables introduced by eval.
+    if (function_context->closure() == *function) {
+      if (function_context->has_extension() &&
+          !function_context->IsGlobalContext()) {
+        Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+        Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+        for (int i = 0; i < keys->length(); i++) {
+          // Names of variables introduced by eval are strings.
+          ASSERT(keys->get(i)->IsString());
+          Handle<String> key(String::cast(keys->get(i)));
+          RETURN_IF_EMPTY_HANDLE_VALUE(
+              isolate,
+              SetProperty(local_scope,
+                          key,
+                          GetProperty(ext, key),
+                          NONE,
+                          kNonStrictMode),
+              Handle<JSObject>());
+        }
       }
     }
   }
+
   return local_scope;
 }
 
@@ -9745,7 +10930,7 @@
 // context.
 static Handle<JSObject> MaterializeClosure(Isolate* isolate,
                                            Handle<Context> context) {
-  ASSERT(context->is_function_context());
+  ASSERT(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
@@ -9756,29 +10941,6 @@
   Handle<JSObject> closure_scope =
       isolate->factory()->NewJSObject(isolate->object_function());
 
-  // Check whether the arguments shadow object exists.
-  int arguments_shadow_index =
-      shared->scope_info()->ContextSlotIndex(
-          isolate->heap()->arguments_shadow_symbol(), NULL);
-  if (arguments_shadow_index >= 0) {
-    // In this case all the arguments are available in the arguments shadow
-    // object.
-    Handle<JSObject> arguments_shadow(
-        JSObject::cast(context->get(arguments_shadow_index)));
-    for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
-      // We don't expect exception-throwing getters on the arguments shadow.
-      Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
-      RETURN_IF_EMPTY_HANDLE_VALUE(
-          isolate,
-          SetProperty(closure_scope,
-                      scope_info.parameter_name(i),
-                      Handle<Object>(element, isolate),
-                      NONE,
-                      kNonStrictMode),
-          Handle<JSObject>());
-    }
-  }
-
   // Fill all context locals to the context extension.
   if (!CopyContextLocalsToScopeObject(isolate,
                                       serialized_scope_info, scope_info,
@@ -9810,6 +10972,51 @@
 }
 
 
+// Create a plain JSObject which materializes the scope for the specified
+// catch context.
+static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
+                                              Handle<Context> context) {
+  ASSERT(context->IsCatchContext());
+  Handle<String> name(String::cast(context->extension()));
+  Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX));
+  Handle<JSObject> catch_scope =
+      isolate->factory()->NewJSObject(isolate->object_function());
+  RETURN_IF_EMPTY_HANDLE_VALUE(
+      isolate,
+      SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
+      Handle<JSObject>());
+  return catch_scope;
+}
+
+
+// Create a plain JSObject which materializes the block scope for the specified
+// block context.
+static Handle<JSObject> MaterializeBlockScope(
+    Isolate* isolate,
+    Handle<Context> context) {
+  ASSERT(context->IsBlockContext());
+  Handle<SerializedScopeInfo> serialized_scope_info(
+      SerializedScopeInfo::cast(context->extension()));
+  ScopeInfo<> scope_info(*serialized_scope_info);
+
+  // Allocate and initialize a JSObject with all the arguments, stack locals
+  // heap locals and extension properties of the debugged function.
+  Handle<JSObject> block_scope =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  // Fill all context locals.
+  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+    if (!CopyContextLocalsToScopeObject(isolate,
+                                        serialized_scope_info, scope_info,
+                                        context, block_scope)) {
+      return Handle<JSObject>();
+    }
+  }
+
+  return block_scope;
+}
+
+
 // Iterate over the actual scopes visible from a stack frame. All scopes are
 // backed by an actual context except the local scope, which is inserted
 // "artifically" in the context chain.
@@ -9820,35 +11027,38 @@
     ScopeTypeLocal,
     ScopeTypeWith,
     ScopeTypeClosure,
-    // Every catch block contains an implicit with block (its parameter is
-    // a JSContextExtensionObject) that extends current scope with a variable
-    // holding exception object. Such with blocks are treated as scopes of their
-    // own type.
-    ScopeTypeCatch
+    ScopeTypeCatch,
+    ScopeTypeBlock
   };
 
-  ScopeIterator(Isolate* isolate, JavaScriptFrame* frame)
+  ScopeIterator(Isolate* isolate,
+                JavaScriptFrame* frame,
+                int inlined_frame_index)
     : isolate_(isolate),
       frame_(frame),
+      inlined_frame_index_(inlined_frame_index),
       function_(JSFunction::cast(frame->function())),
       context_(Context::cast(frame->context())),
       local_done_(false),
       at_local_(false) {
 
     // Check whether the first scope is actually a local scope.
-    if (context_->IsGlobalContext()) {
-      // If there is a stack slot for .result then this local scope has been
-      // created for evaluating top level code and it is not a real local scope.
-      // Checking for the existence of .result seems fragile, but the scope info
-      // saved with the code object does not otherwise have that information.
-      int index = function_->shared()->scope_info()->
-          StackSlotIndex(isolate_->heap()->result_symbol());
-      at_local_ = index < 0;
-    } else if (context_->is_function_context()) {
+    // If there is a stack slot for .result then this local scope has been
+    // created for evaluating top level code and it is not a real local scope.
+    // Checking for the existence of .result seems fragile, but the scope info
+    // saved with the code object does not otherwise have that information.
+    int index = function_->shared()->scope_info()->
+        StackSlotIndex(isolate_->heap()->result_symbol());
+    if (index >= 0) {
+      local_done_ = true;
+    } else if (context_->IsGlobalContext() ||
+               context_->IsFunctionContext()) {
       at_local_ = true;
     } else if (context_->closure() != *function_) {
-      // The context_ is a with block from the outer function.
-      ASSERT(context_->has_extension());
+      // The context_ is a block or with or catch block from the outer function.
+      ASSERT(context_->IsWithContext() ||
+             context_->IsCatchContext() ||
+             context_->IsBlockContext());
       at_local_ = true;
     }
   }
@@ -9878,22 +11088,18 @@
     }
 
     // Move to the next context.
-    if (context_->is_function_context()) {
-      context_ = Handle<Context>(Context::cast(context_->closure()->context()));
-    } else {
-      context_ = Handle<Context>(context_->previous());
-    }
+    context_ = Handle<Context>(context_->previous(), isolate_);
 
     // If passing the local scope indicate that the current scope is now the
     // local scope.
     if (!local_done_ &&
-        (context_->IsGlobalContext() || (context_->is_function_context()))) {
+        (context_->IsGlobalContext() || context_->IsFunctionContext())) {
       at_local_ = true;
     }
   }
 
   // Return the type of the current scope.
-  int Type() {
+  ScopeType Type() {
     if (at_local_) {
       return ScopeTypeLocal;
     }
@@ -9901,18 +11107,16 @@
       ASSERT(context_->global()->IsGlobalObject());
       return ScopeTypeGlobal;
     }
-    if (context_->is_function_context()) {
+    if (context_->IsFunctionContext()) {
       return ScopeTypeClosure;
     }
-    ASSERT(context_->has_extension());
-    // Current scope is either an explicit with statement or a with statement
-    // implicitely generated for a catch block.
-    // If the extension object here is a JSContextExtensionObject then
-    // current with statement is one frome a catch block otherwise it's a
-    // regular with statement.
-    if (context_->extension()->IsJSContextExtensionObject()) {
+    if (context_->IsCatchContext()) {
       return ScopeTypeCatch;
     }
+    if (context_->IsBlockContext()) {
+      return ScopeTypeBlock;
+    }
+    ASSERT(context_->IsWithContext());
     return ScopeTypeWith;
   }
 
@@ -9921,20 +11125,19 @@
     switch (Type()) {
       case ScopeIterator::ScopeTypeGlobal:
         return Handle<JSObject>(CurrentContext()->global());
-        break;
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
-        return MaterializeLocalScope(isolate_, frame_);
-        break;
+        return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
       case ScopeIterator::ScopeTypeWith:
-      case ScopeIterator::ScopeTypeCatch:
         // Return the with object.
-        return Handle<JSObject>(CurrentContext()->extension());
-        break;
+        return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
+      case ScopeIterator::ScopeTypeCatch:
+        return MaterializeCatchScope(isolate_, CurrentContext());
       case ScopeIterator::ScopeTypeClosure:
         // Materialize the content of the closure scope into a JSObject.
         return MaterializeClosure(isolate_, CurrentContext());
-        break;
+      case ScopeIterator::ScopeTypeBlock:
+        return MaterializeBlockScope(isolate_, CurrentContext());
     }
     UNREACHABLE();
     return Handle<JSObject>();
@@ -9965,8 +11168,7 @@
         if (!CurrentContext().is_null()) {
           CurrentContext()->Print();
           if (CurrentContext()->has_extension()) {
-            Handle<JSObject> extension =
-                Handle<JSObject>(CurrentContext()->extension());
+            Handle<Object> extension(CurrentContext()->extension());
             if (extension->IsJSContextExtensionObject()) {
               extension->Print();
             }
@@ -9975,34 +11177,27 @@
         break;
       }
 
-      case ScopeIterator::ScopeTypeWith: {
+      case ScopeIterator::ScopeTypeWith:
         PrintF("With:\n");
-        Handle<JSObject> extension =
-            Handle<JSObject>(CurrentContext()->extension());
-        extension->Print();
+        CurrentContext()->extension()->Print();
         break;
-      }
 
-      case ScopeIterator::ScopeTypeCatch: {
+      case ScopeIterator::ScopeTypeCatch:
         PrintF("Catch:\n");
-        Handle<JSObject> extension =
-            Handle<JSObject>(CurrentContext()->extension());
-        extension->Print();
+        CurrentContext()->extension()->Print();
+        CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print();
         break;
-      }
 
-      case ScopeIterator::ScopeTypeClosure: {
+      case ScopeIterator::ScopeTypeClosure:
         PrintF("Closure:\n");
         CurrentContext()->Print();
         if (CurrentContext()->has_extension()) {
-          Handle<JSObject> extension =
-              Handle<JSObject>(CurrentContext()->extension());
+          Handle<Object> extension(CurrentContext()->extension());
           if (extension->IsJSContextExtensionObject()) {
             extension->Print();
           }
         }
         break;
-      }
 
       default:
         UNREACHABLE();
@@ -10014,6 +11209,7 @@
  private:
   Isolate* isolate_;
   JavaScriptFrame* frame_;
+  int inlined_frame_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
   bool local_done_;
@@ -10042,7 +11238,9 @@
 
   // Count the visible scopes.
   int n = 0;
-  for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
+  for (ScopeIterator it(isolate, frame, 0);
+       !it.Done();
+       it.Next()) {
     n++;
   }
 
@@ -10057,14 +11255,15 @@
 // Return an array with scope details
 // args[0]: number: break id
 // args[1]: number: frame index
-// args[2]: number: scope index
+// args[2]: number: inlined frame index
+// args[3]: number: scope index
 //
 // The array returned contains the following information:
 // 0: Scope type
 // 1: Scope object
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
 
   // Check arguments.
   Object* check;
@@ -10073,7 +11272,8 @@
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
-  CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
+  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -10082,7 +11282,7 @@
 
   // Find the requested scope.
   int n = 0;
-  ScopeIterator it(isolate, frame);
+  ScopeIterator it(isolate, frame, inlined_frame_index);
   for (; !it.Done() && n < index; it.Next()) {
     n++;
   }
@@ -10112,7 +11312,9 @@
   // Print the scopes for the top frame.
   StackFrameLocator locator;
   JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
-  for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
+  for (ScopeIterator it(isolate, frame, 0);
+       !it.Done();
+       it.Next()) {
     it.DebugPrint();
   }
 #endif
@@ -10469,19 +11671,46 @@
 
 // Creates a copy of the with context chain. The copy of the context chain is
 // is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
-                                            Handle<Context> function_context) {
-  // At the bottom of the chain. Return the function context to link to.
-  if (context_chain->is_function_context()) {
-    return function_context;
+static Handle<Context> CopyWithContextChain(Isolate* isolate,
+                                            Handle<JSFunction> function,
+                                            Handle<Context> current,
+                                            Handle<Context> base) {
+  // At the end of the chain. Return the base context to link to.
+  if (current->IsFunctionContext() || current->IsGlobalContext()) {
+    return base;
   }
 
-  // Recursively copy the with contexts.
-  Handle<Context> previous(context_chain->previous());
-  Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
-  Handle<Context> context = CopyWithContextChain(function_context, previous);
-  return context->GetIsolate()->factory()->NewWithContext(
-      context, extension, context_chain->IsCatchContext());
+  // Recursively copy the with and catch contexts.
+  HandleScope scope(isolate);
+  Handle<Context> previous(current->previous());
+  Handle<Context> new_previous =
+      CopyWithContextChain(isolate, function, previous, base);
+  Handle<Context> new_current;
+  if (current->IsCatchContext()) {
+    Handle<String> name(String::cast(current->extension()));
+    Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+    new_current =
+        isolate->factory()->NewCatchContext(function,
+                                            new_previous,
+                                            name,
+                                            thrown_object);
+  } else if (current->IsBlockContext()) {
+    Handle<SerializedScopeInfo> scope_info(
+        SerializedScopeInfo::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewBlockContext(function, new_previous, scope_info);
+    // Copy context slots.
+    int num_context_slots = scope_info->NumberOfContextSlots();
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
+      new_current->set(i, current->get(i));
+    }
+  } else {
+    ASSERT(current->IsWithContext());
+    Handle<JSObject> extension(JSObject::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewWithContext(function, new_previous, extension);
+  }
+  return scope.CloseAndEscape(new_current);
 }
 
 
@@ -10489,6 +11718,7 @@
 // Runtime_DebugEvaluate.
 static Handle<Object> GetArgumentsObject(Isolate* isolate,
                                          JavaScriptFrame* frame,
+                                         int inlined_frame_index,
                                          Handle<JSFunction> function,
                                          Handle<SerializedScopeInfo> scope_info,
                                          const ScopeInfo<>* sinfo,
@@ -10512,7 +11742,9 @@
     }
   }
 
-  const int length = frame->ComputeParametersCount();
+  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
+
+  int length = frame_inspector.GetParametersCount();
   Handle<JSObject> arguments =
       isolate->factory()->NewArgumentsObject(function, length);
   Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@@ -10520,7 +11752,7 @@
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < length; i++) {
-    array->set(i, frame->GetParameter(i), mode);
+    array->set(i, frame_inspector.GetParameter(i), mode);
   }
   arguments->set_elements(*array);
   return arguments;
@@ -10547,7 +11779,7 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  ASSERT(args.length() == 5);
+  ASSERT(args.length() == 6);
   Object* check_result;
   { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
       RUNTIME_ARGUMENTS(isolate, args));
@@ -10556,9 +11788,10 @@
     }
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
-  CONVERT_ARG_CHECKED(String, source, 2);
-  CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
-  Handle<Object> additional_context(args[4]);
+  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+  CONVERT_ARG_CHECKED(String, source, 3);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
+  Handle<Object> additional_context(args[5]);
 
   // Handle the processing of break.
   DisableBreak disable_break_save(disable_break);
@@ -10598,7 +11831,8 @@
 #endif
 
   // Materialize the content of the local scope into a JSObject.
-  Handle<JSObject> local_scope = MaterializeLocalScope(isolate, frame);
+  Handle<JSObject> local_scope = MaterializeLocalScope(
+      isolate, frame, inlined_frame_index);
   RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
 
   // Allocate a new context for the debug evaluation and set the extension
@@ -10609,12 +11843,17 @@
   context->set_extension(*local_scope);
   // Copy any with contexts present and chain them in front of this context.
   Handle<Context> frame_context(Context::cast(frame->context()));
-  Handle<Context> function_context(frame_context->fcontext());
-  context = CopyWithContextChain(frame_context, context);
+  Handle<Context> function_context;
+  // Get the function's context if it has one.
+  if (scope_info->HasHeapAllocatedLocals()) {
+    function_context = Handle<Context>(frame_context->declaration_context());
+  }
+  context = CopyWithContextChain(isolate, go_between, frame_context, context);
 
   if (additional_context->IsJSObject()) {
-    context = isolate->factory()->NewWithContext(context,
-        Handle<JSObject>::cast(additional_context), false);
+    Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
+    context =
+        isolate->factory()->NewWithContext(go_between, context, extension);
   }
 
   // Wrap the evaluation statement in a new function compiled in the newly
@@ -10646,7 +11885,8 @@
                       &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
-  Handle<Object> arguments = GetArgumentsObject(isolate, frame,
+  Handle<Object> arguments = GetArgumentsObject(isolate,
+                                                frame, inlined_frame_index,
                                                 function, scope_info,
                                                 &sinfo, function_context);
 
@@ -11288,7 +12528,7 @@
 // Sets a v8 flag.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
   CONVERT_CHECKED(String, arg, args[0]);
-  SmartPointer<char> flags =
+  SmartArrayPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   FlagList::SetFlagsFromString(*flags, StrLength(*flags));
   return isolate->heap()->undefined_value();
@@ -11336,10 +12576,9 @@
 // Deletes the specified live object list.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
 #ifdef LIVE_OBJECT_LIST
-  CONVERT_SMI_CHECKED(id, args[0]);
+  CONVERT_SMI_ARG_CHECKED(id, 0);
   bool success = LiveObjectList::Delete(id);
-  return success ? isolate->heap()->true_value() :
-                   isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean(success);
 #else
   return isolate->heap()->undefined_value();
 #endif
@@ -11354,10 +12593,10 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_SMI_CHECKED(id1, args[0]);
-  CONVERT_SMI_CHECKED(id2, args[1]);
-  CONVERT_SMI_CHECKED(start, args[2]);
-  CONVERT_SMI_CHECKED(count, args[3]);
+  CONVERT_SMI_ARG_CHECKED(id1, 0);
+  CONVERT_SMI_ARG_CHECKED(id2, 1);
+  CONVERT_SMI_ARG_CHECKED(start, 2);
+  CONVERT_SMI_ARG_CHECKED(count, 3);
   CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
   EnterDebugger enter_debugger;
   return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
@@ -11371,7 +12610,7 @@
 // This is only used for obj ids shown in live object lists.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
 #ifdef LIVE_OBJECT_LIST
-  CONVERT_SMI_CHECKED(obj_id, args[0]);
+  CONVERT_SMI_ARG_CHECKED(obj_id, 0);
   Object* result = LiveObjectList::GetObj(obj_id);
   return result;
 #else
@@ -11398,7 +12637,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_SMI_CHECKED(obj_id, args[0]);
+  CONVERT_SMI_ARG_CHECKED(obj_id, 0);
   RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
   RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
   RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
@@ -11415,11 +12654,11 @@
   }
   int start = 0;
   if (args[3]->IsSmi()) {
-    start = Smi::cast(args[3])->value();
+    start = args.smi_at(3);
   }
   int limit = Smi::kMaxValue;
   if (args[4]->IsSmi()) {
-    limit = Smi::cast(args[4])->value();
+    limit = args.smi_at(4);
   }
 
   return LiveObjectList::GetObjRetainers(obj_id,
@@ -11438,8 +12677,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_SMI_CHECKED(obj_id1, args[0]);
-  CONVERT_SMI_CHECKED(obj_id2, args[1]);
+  CONVERT_SMI_ARG_CHECKED(obj_id1, 0);
+  CONVERT_SMI_ARG_CHECKED(obj_id2, 1);
   RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
 
   Handle<JSObject> instance_filter;
@@ -11460,8 +12699,8 @@
 // previously captured live object lists.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
 #ifdef LIVE_OBJECT_LIST
-  CONVERT_SMI_CHECKED(start, args[0]);
-  CONVERT_SMI_CHECKED(count, args[1]);
+  CONVERT_SMI_ARG_CHECKED(start, 0);
+  CONVERT_SMI_ARG_CHECKED(count, 1);
   return LiveObjectList::Info(start, count);
 #else
   return isolate->heap()->undefined_value();
@@ -11474,7 +12713,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_SMI_CHECKED(obj_id, args[0]);
+  CONVERT_SMI_ARG_CHECKED(obj_id, 0);
   Object* result = LiveObjectList::PrintObj(obj_id);
   return result;
 #else
@@ -11502,8 +12741,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_SMI_CHECKED(id1, args[0]);
-  CONVERT_SMI_CHECKED(id2, args[1]);
+  CONVERT_SMI_ARG_CHECKED(id1, 0);
+  CONVERT_SMI_ARG_CHECKED(id2, 1);
   CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
 
   EnterDebugger enter_debugger;
@@ -11516,29 +12755,19 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-
-  CONVERT_CHECKED(Smi, smi_modules, args[0]);
-  CONVERT_CHECKED(Smi, smi_tag, args[1]);
-  v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
+  v8::V8::ResumeProfiler();
   return isolate->heap()->undefined_value();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-
-  CONVERT_CHECKED(Smi, smi_modules, args[0]);
-  CONVERT_CHECKED(Smi, smi_tag, args[1]);
-  v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
+  v8::V8::PauseProfiler();
   return isolate->heap()->undefined_value();
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 // Finds the script object from the script data. NOTE: This operation uses
 // heap traversal to find the function generated for the source position
@@ -11595,8 +12824,9 @@
 // call to this function is encountered it is skipped.  The seen_caller
 // in/out parameter is used to remember if the caller has been seen
 // yet.
-static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
-    bool* seen_caller) {
+static bool ShowFrameInStackTrace(StackFrame* raw_frame,
+                                  Object* caller,
+                                  bool* seen_caller) {
   // Only display JS frames.
   if (!raw_frame->is_java_script())
     return false;
@@ -11609,11 +12839,25 @@
     *seen_caller = true;
     return false;
   }
-  // Skip all frames until we've seen the caller.  Also, skip the most
-  // obvious builtin calls.  Some builtin calls (such as Number.ADD
-  // which is invoked using 'call') are very difficult to recognize
-  // so we're leaving them in for now.
-  return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
+  // Skip all frames until we've seen the caller.
+  if (!(*seen_caller)) return false;
+  // Also, skip the most obvious builtin calls. We recognize builtins
+  // as (1) functions called with the builtins object as the receiver and
+  // as (2) functions from native scripts called with undefined as the
+  // receiver (direct calls to helper functions in the builtins
+  // code). Some builtin calls (such as Number.ADD which is invoked
+  // using 'call') are very difficult to recognize so we're leaving
+  // them in for now.
+  if (frame->receiver()->IsJSBuiltinsObject()) {
+    return false;
+  }
+  JSFunction* fun = JSFunction::cast(raw_fun);
+  Object* raw_script = fun->shared()->script();
+  if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
+    int script_type = Script::cast(raw_script)->type()->value();
+    return script_type != Script::TYPE_NATIVE;
+  }
+  return true;
 }
 
 
@@ -11693,8 +12937,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
   ASSERT(args.length() == 2);
-  OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
-                                    Smi::cast(args[1])->value());
+  OS::PrintError("abort: %s\n",
+                 reinterpret_cast<char*>(args[0]) + args.smi_at(1));
   isolate->PrintStack();
   OS::Abort();
   UNREACHABLE();
@@ -11884,7 +13128,9 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, format, args[0]);
   CONVERT_CHECKED(JSArray, elms, args[1]);
-  Vector<const char> chars = format->ToAsciiVector();
+  String::FlatContent format_content = format->GetFlatContent();
+  RUNTIME_ASSERT(format_content.IsAscii());
+  Vector<const char> chars = format_content.ToAsciiVector();
   LOGGER->LogRuntime(chars, elms);
   return isolate->heap()->undefined_value();
 }
@@ -11896,6 +13142,28 @@
 }
 
 
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name)        \
+  RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) {     \
+    CONVERT_CHECKED(JSObject, obj, args[0]);              \
+    return isolate->heap()->ToBoolean(obj->Has##Name());  \
+  }
+
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
+
+#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
diff --git a/src/runtime.h b/src/runtime.h
index bf1ba68..1538b7d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_RUNTIME_H_
 #define V8_RUNTIME_H_
 
+#include "allocation.h"
 #include "zone.h"
 
 namespace v8 {
@@ -64,8 +65,9 @@
   F(ToSlowProperties, 1, 1) \
   F(FinishArrayPrototypeSetup, 1, 1) \
   F(SpecialArrayFunctions, 1, 1) \
-  F(GetGlobalReceiver, 0, 1) \
+  F(GetDefaultReceiver, 1, 1) \
   \
+  F(GetPrototype, 1, 1) \
   F(IsInPrototypeChain, 2, 1) \
   F(SetHiddenPrototype, 2, 1) \
   \
@@ -77,18 +79,25 @@
   F(PreventExtensions, 1, 1)\
   \
   /* Utilities */ \
+  F(CheckIsBootstrapping, 0, 1) \
+  F(Apply, 5, 1) \
   F(GetFunctionDelegate, 1, 1) \
   F(GetConstructorDelegate, 1, 1) \
   F(NewArgumentsFast, 3, 1) \
+  F(NewStrictArgumentsFast, 3, 1) \
   F(LazyCompile, 1, 1) \
   F(LazyRecompile, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
+  F(RunningInSimulator, 0, 1) \
   F(OptimizeFunctionOnNextCall, 1, 1) \
+  F(GetOptimizationStatus, 1, 1) \
+  F(GetOptimizationCount, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
+  F(SetNativeFlag, 1, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -110,6 +119,7 @@
   F(URIUnescape, 1, 1) \
   F(QuoteJSONString, 1, 1) \
   F(QuoteJSONStringComma, 1, 1) \
+  F(QuoteJSONStringArray, 1, 1) \
   \
   F(NumberToString, 1, 1) \
   F(NumberToStringSkipCache, 1, 1) \
@@ -132,6 +142,7 @@
   F(StringAdd, 2, 1) \
   F(StringBuilderConcat, 3, 1) \
   F(StringBuilderJoin, 3, 1) \
+  F(SparseJoinWithSeparator, 3, 1)            \
   \
   /* Bit operations */ \
   F(NumberOr, 2, 1) \
@@ -200,9 +211,14 @@
   /* Reflection */ \
   F(FunctionSetInstanceClassName, 2, 1) \
   F(FunctionSetLength, 2, 1) \
+  F(BoundFunctionSetLength, 2, 1)    \
   F(FunctionSetPrototype, 2, 1) \
+  F(FunctionSetReadOnlyPrototype, 1, 1) \
   F(FunctionGetName, 1, 1) \
   F(FunctionSetName, 2, 1) \
+  F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
+  F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+  F(FunctionSetBound, 1, 1) \
   F(FunctionRemovePrototype, 1, 1) \
   F(FunctionGetSourceCode, 1, 1) \
   F(FunctionGetScript, 1, 1) \
@@ -270,8 +286,20 @@
   F(CreateArrayLiteral, 3, 1) \
   F(CreateArrayLiteralShallow, 3, 1) \
   \
-  /* Catch context extension objects */ \
-  F(CreateCatchExtensionObject, 2, 1) \
+  /* Harmony proxies */ \
+  F(CreateJSProxy, 2, 1) \
+  F(CreateJSFunctionProxy, 4, 1) \
+  F(IsJSProxy, 1, 1) \
+  F(IsJSFunctionProxy, 1, 1) \
+  F(GetHandler, 1, 1) \
+  F(GetCallTrap, 1, 1) \
+  F(GetConstructTrap, 1, 1) \
+  F(Fix, 1, 1) \
+  \
+  /* Harmony weakmaps */ \
+  F(WeakMapInitialize, 1, 1) \
+  F(WeakMapGet, 2, 1) \
+  F(WeakMapSet, 3, 1) \
   \
   /* Statements */ \
   F(NewClosure, 3, 1) \
@@ -285,16 +313,17 @@
   F(PromoteScheduledException, 0, 1) \
   \
   /* Contexts */ \
-  F(NewContext, 1, 1) \
-  F(PushContext, 1, 1) \
-  F(PushCatchContext, 1, 1) \
+  F(NewFunctionContext, 1, 1) \
+  F(PushWithContext, 2, 1) \
+  F(PushCatchContext, 3, 1) \
+  F(PushBlockContext, 2, 1) \
   F(DeleteContextSlot, 2, 1) \
   F(LoadContextSlot, 2, 2) \
   F(LoadContextSlotNoReferenceError, 2, 2) \
   F(StoreContextSlot, 4, 1) \
   \
   /* Declarations and initialization */ \
-  F(DeclareGlobals, 4, 1) \
+  F(DeclareGlobals, 3, 1) \
   F(DeclareContextSlot, 4, 1) \
   F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
   F(InitializeConstGlobal, 2, 1) \
@@ -322,7 +351,26 @@
   F(MessageGetScript, 1, 1) \
   \
   /* Pseudo functions - handled as macros by parser */ \
-  F(IS_VAR, 1, 1)
+  F(IS_VAR, 1, 1) \
+  \
+  /* expose boolean functions from objects-inl.h */ \
+  F(HasFastElements, 1, 1) \
+  F(HasFastDoubleElements, 1, 1) \
+  F(HasDictionaryElements, 1, 1) \
+  F(HasExternalPixelElements, 1, 1) \
+  F(HasExternalArrayElements, 1, 1) \
+  F(HasExternalByteElements, 1, 1) \
+  F(HasExternalUnsignedByteElements, 1, 1) \
+  F(HasExternalShortElements, 1, 1) \
+  F(HasExternalUnsignedShortElements, 1, 1) \
+  F(HasExternalIntElements, 1, 1) \
+  F(HasExternalUnsignedIntElements, 1, 1) \
+  F(HasExternalFloatElements, 1, 1) \
+  F(HasExternalDoubleElements, 1, 1) \
+  /* profiler */ \
+  F(ProfilerResume, 0, 1) \
+  F(ProfilerPause, 0, 1)
+
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
@@ -341,7 +389,7 @@
   F(GetFrameCount, 1, 1) \
   F(GetFrameDetails, 2, 1) \
   F(GetScopeCount, 2, 1) \
-  F(GetScopeDetails, 3, 1) \
+  F(GetScopeDetails, 4, 1) \
   F(DebugPrintScopes, 0, 1) \
   F(GetThreadCount, 1, 1) \
   F(GetThreadDetails, 2, 1) \
@@ -354,7 +402,7 @@
   F(IsBreakOnException, 1, 1) \
   F(PrepareStep, 3, 1) \
   F(ClearStepping, 0, 1) \
-  F(DebugEvaluate, 5, 1) \
+  F(DebugEvaluate, 6, 1) \
   F(DebugEvaluateGlobal, 4, 1) \
   F(DebugGetLoadedScripts, 0, 1) \
   F(DebugReferencedBy, 3, 1) \
@@ -399,14 +447,6 @@
 #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
 #endif
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
-  F(ProfilerResume, 2, 1) \
-  F(ProfilerPause, 2, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
-#endif
-
 #ifdef DEBUG
 #define RUNTIME_FUNCTION_LIST_DEBUG(F) \
   /* Testing */ \
@@ -424,8 +464,7 @@
   RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
   RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
   RUNTIME_FUNCTION_LIST_DEBUG(F) \
-  RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
-  RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
+  RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
 
 // ----------------------------------------------------------------------------
 // INLINE_FUNCTION_LIST defines all inlined functions accessed
@@ -486,7 +525,6 @@
 
 class RuntimeState {
  public:
-
   StaticResource<StringInputBuffer>* string_input_buffer() {
     return &string_input_buffer_;
   }
@@ -508,12 +546,6 @@
   StringInputBuffer* string_locale_compare_buf2() {
     return &string_locale_compare_buf2_;
   }
-  int* smi_lexicographic_compare_x_elms() {
-    return smi_lexicographic_compare_x_elms_;
-  }
-  int* smi_lexicographic_compare_y_elms() {
-    return smi_lexicographic_compare_y_elms_;
-  }
 
  private:
   RuntimeState() {}
@@ -525,8 +557,6 @@
   StringInputBuffer string_input_buffer_compare_bufy_;
   StringInputBuffer string_locale_compare_buf1_;
   StringInputBuffer string_locale_compare_buf2_;
-  int smi_lexicographic_compare_x_elms_[10];
-  int smi_lexicographic_compare_y_elms_[10];
 
   friend class Isolate;
   friend class Runtime;
@@ -622,7 +652,7 @@
 
   MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
       Isolate* isolate,
-      Handle<JSObject> object,
+      Handle<JSReceiver> object,
       Handle<Object> key);
 
   MUST_USE_RESULT static MaybeObject* GetObjectProperty(
@@ -639,6 +669,16 @@
   static void PerformGC(Object* result);
 };
 
+
+//---------------------------------------------------------------------------
+// Constants used by interface to runtime functions.
+
+enum kDeclareGlobalsFlags {
+  kDeclareGlobalsEvalFlag = 1 << 0,
+  kDeclareGlobalsStrictModeFlag = 1 << 1,
+  kDeclareGlobalsNativeFlag = 1 << 2
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_RUNTIME_H_
diff --git a/src/runtime.js b/src/runtime.js
index 66d839b..14ff1b6 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -48,42 +48,49 @@
 const $Function = global.Function;
 const $Boolean = global.Boolean;
 const $NaN = 0/0;
+const builtins = this;
 
-
-// ECMA-262, section 11.9.1, page 55.
+// ECMA-262 Section 11.9.3.
 function EQUALS(y) {
   if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
   var x = this;
 
-  // NOTE: We use iteration instead of recursion, because it is
-  // difficult to call EQUALS with the correct setting of 'this' in
-  // an efficient way.
   while (true) {
     if (IS_NUMBER(x)) {
-      if (y == null) return 1;  // not equal
-      return %NumberEquals(x, %ToNumber(y));
-    } else if (IS_STRING(x)) {
-      if (IS_STRING(y)) return %StringEquals(x, y);
-      if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
-      if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
-      if (y == null) return 1;  // not equal
-      y = %ToPrimitive(y, NO_HINT);
-    } else if (IS_BOOLEAN(x)) {
-      if (IS_BOOLEAN(y)) {
-        return %_ObjectEquals(x, y) ? 0 : 1;
+      while (true) {
+        if (IS_NUMBER(y)) return %NumberEquals(x, y);
+        if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+        if (!IS_SPEC_OBJECT(y)) {
+          // String or boolean.
+          return %NumberEquals(x, %ToNumber(y));
+        }
+        y = %ToPrimitive(y, NO_HINT);
       }
-      if (y == null) return 1;  // not equal
-      return %NumberEquals(%ToNumber(x), %ToNumber(y));
-    } else if (x == null) {
-      // NOTE: This checks for both null and undefined.
-      return (y == null) ? 0 : 1;
+    } else if (IS_STRING(x)) {
+      while (true) {
+        if (IS_STRING(y)) return %StringEquals(x, y);
+        if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+        if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+        if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+        y = %ToPrimitive(y, NO_HINT);
+      }
+    } else if (IS_BOOLEAN(x)) {
+      if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+      if (IS_NULL_OR_UNDEFINED(y)) return 1;
+      if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+      if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+      // y is object.
+      x = %ToNumber(x);
+      y = %ToPrimitive(y, NO_HINT);
+    } else if (IS_NULL_OR_UNDEFINED(x)) {
+      return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
     } else {
-      // x is not a number, boolean, null or undefined.
-      if (y == null) return 1;  // not equal
+      // x is an object.
       if (IS_SPEC_OBJECT(y)) {
         return %_ObjectEquals(x, y) ? 0 : 1;
       }
-
+      if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+      if (IS_BOOLEAN(y)) y = %ToNumber(y);
       x = %ToPrimitive(x, NO_HINT);
     }
   }
@@ -348,7 +355,8 @@
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
+  return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+    %HasElement(x, this) : %HasProperty(x, %ToString(this));
 }
 
 
@@ -358,7 +366,7 @@
 // an expensive ToBoolean conversion in the generated code.
 function INSTANCE_OF(F) {
   var V = this;
-  if (!IS_FUNCTION(F)) {
+  if (!IS_SPEC_FUNCTION(F)) {
     throw %MakeTypeError('instanceof_function_expected', [V]);
   }
 
@@ -400,7 +408,7 @@
   if (!IS_FUNCTION(delegate)) {
     throw %MakeTypeError('called_non_callable', [typeof this]);
   }
-  return delegate.apply(this, arguments);
+  return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
 }
 
 
@@ -409,7 +417,32 @@
   if (!IS_FUNCTION(delegate)) {
     throw %MakeTypeError('called_non_callable', [typeof this]);
   }
-  return delegate.apply(this, arguments);
+  return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
+}
+
+
+function CALL_FUNCTION_PROXY() {
+  var arity = %_ArgumentsLength() - 1;
+  var proxy = %_Arguments(arity);  // The proxy comes in as an additional arg.
+  var trap = %GetCallTrap(proxy);
+  return %Apply(trap, this, arguments, 0, arity);
+}
+
+
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
+  var arity = %_ArgumentsLength() - 1;
+  var trap = %GetConstructTrap(proxy);
+  var receiver = void 0;
+  if (!IS_UNDEFINED(trap)) {
+    trap = %GetCallTrap(proxy);
+    var proto = proxy.prototype;
+    if (!IS_SPEC_OBJECT(proto) && proto !== null) {
+      throw MakeTypeError("proto_object_or_null", [proto]);
+    }
+    receiver = new global.Object();
+    receiver.__proto__ = proto;
+  }
+  return %Apply(trap, this, arguments, 1, arity);
 }
 
 
@@ -420,7 +453,8 @@
   // that takes care of more eventualities.
   if (IS_ARRAY(args)) {
     length = args.length;
-    if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
+    if (%_IsSmi(length) && length >= 0 && length < 0x800000 &&
+        IS_SPEC_FUNCTION(this)) {
       return length;
     }
   }
@@ -434,7 +468,7 @@
     throw %MakeRangeError('stack_overflow', []);
   }
 
-  if (!IS_FUNCTION(this)) {
+  if (!IS_SPEC_FUNCTION(this)) {
     throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
   }
 
@@ -602,13 +636,13 @@
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultNumber(x) {
   var valueOf = x.valueOf;
-  if (IS_FUNCTION(valueOf)) {
+  if (IS_SPEC_FUNCTION(valueOf)) {
     var v = %_CallFunction(x, valueOf);
     if (%IsPrimitive(v)) return v;
   }
 
   var toString = x.toString;
-  if (IS_FUNCTION(toString)) {
+  if (IS_SPEC_FUNCTION(toString)) {
     var s = %_CallFunction(x, toString);
     if (%IsPrimitive(s)) return s;
   }
@@ -620,13 +654,13 @@
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultString(x) {
   var toString = x.toString;
-  if (IS_FUNCTION(toString)) {
+  if (IS_SPEC_FUNCTION(toString)) {
     var s = %_CallFunction(x, toString);
     if (%IsPrimitive(s)) return s;
   }
 
   var valueOf = x.valueOf;
-  if (IS_FUNCTION(valueOf)) {
+  if (IS_SPEC_FUNCTION(valueOf)) {
     var v = %_CallFunction(x, valueOf);
     if (%IsPrimitive(v)) return v;
   }
@@ -638,6 +672,6 @@
 // NOTE: Setting the prototype for Array must take place as early as
 // possible due to code generation for array literals.  When
 // generating code for a array literal a boilerplate array is created
-// that is cloned when running the code.  It is essiential that the
+// that is cloned when running the code.  It is essential that the
 // boilerplate gets the right prototype.
 %FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index 28cf6e6..89ad8af 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -68,8 +68,8 @@
   entries_ = pc_and_deoptimization_indexes_ +
             (length_ * kPcAndDeoptimizationIndexSize);
   ASSERT(entry_size_ > 0);
-  ASSERT_EQ(SafepointEntry::DeoptimizationIndexField::max(),
-            Safepoint::kNoDeoptimizationIndex);
+  STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
+                Safepoint::kNoDeoptimizationIndex);
 }
 
 
@@ -122,17 +122,20 @@
 
 
 Safepoint SafepointTableBuilder::DefineSafepoint(
-    Assembler* assembler, Safepoint::Kind kind, int arguments,
-    int deoptimization_index) {
-  ASSERT(deoptimization_index != -1);
+    Assembler* assembler,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
   ASSERT(arguments >= 0);
-  DeoptimizationInfo pc_and_deoptimization_index;
-  pc_and_deoptimization_index.pc = assembler->pc_offset();
-  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
-  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
-  pc_and_deoptimization_index.arguments = arguments;
-  pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
-  deoptimization_info_.Add(pc_and_deoptimization_index);
+  DeoptimizationInfo info;
+  info.pc = assembler->pc_offset();
+  info.arguments = arguments;
+  info.has_doubles = (kind & Safepoint::kWithDoubles);
+  deoptimization_info_.Add(info);
+  deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
+  if (deopt_mode == Safepoint::kNoLazyDeopt) {
+    last_lazy_safepoint_ = deopt_index_list_.length();
+  }
   indexes_.Add(new ZoneList<int>(8));
   registers_.Add((kind & Safepoint::kWithRegisters)
       ? new ZoneList<int>(4)
@@ -141,6 +144,12 @@
 }
 
 
+void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
+  while (last_lazy_safepoint_ < deopt_index_list_.length()) {
+    deopt_index_list_[last_lazy_safepoint_++] = index;
+  }
+}
+
 unsigned SafepointTableBuilder::GetCodeOffset() const {
   ASSERT(emitted_);
   return offset_;
@@ -173,11 +182,11 @@
   assembler->dd(length);
   assembler->dd(bytes_per_entry);
 
-  // Emit sorted table of pc offsets together with deoptimization indexes and
-  // pc after gap information.
+  // Emit sorted table of pc offsets together with deoptimization indexes.
   for (int i = 0; i < length; i++) {
     assembler->dd(deoptimization_info_[i].pc);
-    assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
+    assembler->dd(EncodeExceptPC(deoptimization_info_[i],
+                                 deopt_index_list_[i]));
   }
 
   // Emit table of bitmaps.
@@ -222,35 +231,14 @@
 }
 
 
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
-  unsigned index = info.deoptimization_index;
-  unsigned gap_size = info.pc_after_gap - info.pc;
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
+                                               unsigned index) {
   uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
-  encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
   encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
   encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
   return encoding;
 }
 
 
-int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
-  int result = 0;
-  if (!deoptimization_info_.is_empty()) {
-    unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
-    for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
-      DeoptimizationInfo info = deoptimization_info_[i];
-      if (static_cast<int>(info.deoptimization_index) !=
-          Safepoint::kNoDeoptimizationIndex) {
-        if (previous_gap_end + limit > info.pc) {
-          result++;
-        }
-        previous_gap_end = info.pc_after_gap;
-      }
-    }
-  }
-  return result;
-}
-
-
 
 } }  // namespace v8::internal
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 084a0b4..57fceec 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -28,6 +28,7 @@
 #ifndef V8_SAFEPOINT_TABLE_H_
 #define V8_SAFEPOINT_TABLE_H_
 
+#include "allocation.h"
 #include "heap.h"
 #include "v8memory.h"
 #include "zone.h"
@@ -61,10 +62,20 @@
     return DeoptimizationIndexField::decode(info_);
   }
 
-  int gap_code_size() const {
-    ASSERT(is_valid());
-    return GapCodeSizeField::decode(info_);
-  }
+  static const int kArgumentsFieldBits = 3;
+  static const int kSaveDoublesFieldBits = 1;
+  static const int kDeoptIndexBits =
+      32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
+  class DeoptimizationIndexField:
+    public BitField<int, 0, kDeoptIndexBits> {};  // NOLINT
+  class ArgumentsField:
+    public BitField<unsigned,
+                    kDeoptIndexBits,
+                    kArgumentsFieldBits> {};  // NOLINT
+  class SaveDoublesField:
+    public BitField<bool,
+                    kDeoptIndexBits + kArgumentsFieldBits,
+                    kSaveDoublesFieldBits> { }; // NOLINT
 
   int argument_count() const {
     ASSERT(is_valid());
@@ -84,27 +95,6 @@
   bool HasRegisters() const;
   bool HasRegisterAt(int reg_index) const;
 
-  // Reserve 13 bits for the gap code size. On ARM a constant pool can be
-  // emitted when generating the gap code. The size of the const pool is less
-  // than what can be represented in 12 bits, so 13 bits gives room for having
-  // instructions before potentially emitting a constant pool.
-  static const int kGapCodeSizeBits = 13;
-  static const int kArgumentsFieldBits = 3;
-  static const int kSaveDoublesFieldBits = 1;
-  static const int kDeoptIndexBits =
-      32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
-  class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
-  class DeoptimizationIndexField: public BitField<int,
-                                                  kGapCodeSizeBits,
-                                                  kDeoptIndexBits> {};  // NOLINT
-  class ArgumentsField: public BitField<unsigned,
-                                        kGapCodeSizeBits + kDeoptIndexBits,
-                                        kArgumentsFieldBits> {};  // NOLINT
-  class SaveDoublesField: public BitField<bool,
-                                          kGapCodeSizeBits + kDeoptIndexBits +
-                                          kArgumentsFieldBits,
-                                          kSaveDoublesFieldBits> { }; // NOLINT
-
  private:
   unsigned info_;
   uint8_t* bits_;
@@ -185,6 +175,11 @@
     kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
   } Kind;
 
+  enum DeoptMode {
+    kNoLazyDeopt,
+    kLazyDeopt
+  };
+
   static const int kNoDeoptimizationIndex =
       (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
 
@@ -205,9 +200,11 @@
  public:
   SafepointTableBuilder()
       : deoptimization_info_(32),
+        deopt_index_list_(32),
         indexes_(32),
         registers_(32),
-        emitted_(false) { }
+        emitted_(false),
+        last_lazy_safepoint_(0) { }
 
   // Get the offset of the emitted safepoint table in the code.
   unsigned GetCodeOffset() const;
@@ -216,50 +213,34 @@
   Safepoint DefineSafepoint(Assembler* assembler,
                             Safepoint::Kind kind,
                             int arguments,
-                            int deoptimization_index);
+                            Safepoint::DeoptMode mode);
 
-  // Update the last safepoint with the size of the code generated until the
-  // end of the gap following it.
-  void SetPcAfterGap(int pc) {
-    ASSERT(!deoptimization_info_.is_empty());
-    int index = deoptimization_info_.length() - 1;
-    deoptimization_info_[index].pc_after_gap = pc;
-  }
-
-  // Get the end pc offset of the last safepoint, including the code generated
-  // until the end of the gap following it.
-  unsigned GetPcAfterGap() {
-    int index = deoptimization_info_.length();
-    if (index == 0) return 0;
-    return deoptimization_info_[index - 1].pc_after_gap;
-  }
+  // Record deoptimization index for lazy deoptimization for the last
+  // outstanding safepoints.
+  void RecordLazyDeoptimizationIndex(int index);
 
   // Emit the safepoint table after the body. The number of bits per
   // entry must be enough to hold all the pointer indexes.
   void Emit(Assembler* assembler, int bits_per_entry);
 
-  // Count the number of deoptimization points where the next
-  // following deoptimization point comes less than limit bytes
-  // after the end of this point's gap.
-  int CountShortDeoptimizationIntervals(unsigned limit);
 
  private:
   struct DeoptimizationInfo {
     unsigned pc;
-    unsigned deoptimization_index;
-    unsigned pc_after_gap;
     unsigned arguments;
     bool has_doubles;
   };
 
-  uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
+  uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
 
   ZoneList<DeoptimizationInfo> deoptimization_info_;
+  ZoneList<unsigned> deopt_index_list_;
   ZoneList<ZoneList<int>*> indexes_;
   ZoneList<ZoneList<int>*> registers_;
 
   unsigned offset_;
   bool emitted_;
+  int last_lazy_safepoint_;
 
   DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
 };
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
deleted file mode 100644
index 9715ca9..0000000
--- a/src/scanner-base.cc
+++ /dev/null
@@ -1,946 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#include "../include/v8stdint.h"
-#include "scanner-base.h"
-#include "char-predicates-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Scanner
-
-Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache),
-      octal_pos_(kNoOctalLocation) { }
-
-
-uc32 Scanner::ScanHexEscape(uc32 c, int length) {
-  ASSERT(length <= 4);  // prevent overflow
-
-  uc32 digits[4];
-  uc32 x = 0;
-  for (int i = 0; i < length; i++) {
-    digits[i] = c0_;
-    int d = HexValue(c0_);
-    if (d < 0) {
-      // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
-      // should be illegal, but other JS VMs just return the
-      // non-escaped version of the original character.
-
-      // Push back digits read, except the last one (in c0_).
-      for (int j = i-1; j >= 0; j--) {
-        PushBack(digits[j]);
-      }
-      // Notice: No handling of error - treat it as "\u"->"u".
-      return c;
-    }
-    x = x * 16 + d;
-    Advance();
-  }
-
-  return x;
-}
-
-
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
-  uc32 x = c - '0';
-  int i = 0;
-  for (; i < length; i++) {
-    int d = c0_ - '0';
-    if (d < 0 || d > 7) break;
-    int nx = x * 8 + d;
-    if (nx >= 256) break;
-    x = nx;
-    Advance();
-  }
-  // Anything excelt '\0' is an octal escape sequence, illegal in strict mode.
-  // Remember the position of octal escape sequences so that better error
-  // can be reported later (in strict mode).
-  if (c != '0' || i > 0) {
-    octal_pos_ = source_pos() - i - 1;     // Already advanced
-  }
-  return x;
-}
-
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
-    : Scanner(scanner_contants) { }
-
-
-Token::Value JavaScriptScanner::Next() {
-  current_ = next_;
-  has_line_terminator_before_next_ = false;
-  Scan();
-  return current_.token;
-}
-
-
-static inline bool IsByteOrderMark(uc32 c) {
-  // The Unicode value U+FFFE is guaranteed never to be assigned as a
-  // Unicode character; this implies that in a Unicode context the
-  // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
-  // character expressed in little-endian byte order (since it could
-  // not be a U+FFFE character expressed in big-endian byte
-  // order). Nevertheless, we check for it to be compatible with
-  // Spidermonkey.
-  return c == 0xFEFF || c == 0xFFFE;
-}
-
-
-bool JavaScriptScanner::SkipWhiteSpace() {
-  int start_position = source_pos();
-
-  while (true) {
-    // We treat byte-order marks (BOMs) as whitespace for better
-    // compatibility with Spidermonkey and other JavaScript engines.
-    while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
-      // IsWhiteSpace() includes line terminators!
-      if (unicode_cache_->IsLineTerminator(c0_)) {
-        // Ignore line terminators, but remember them. This is necessary
-        // for automatic semicolon insertion.
-        has_line_terminator_before_next_ = true;
-      }
-      Advance();
-    }
-
-    // If there is an HTML comment end '-->' at the beginning of a
-    // line (with only whitespace in front of it), we treat the rest
-    // of the line as a comment. This is in line with the way
-    // SpiderMonkey handles it.
-    if (c0_ == '-' && has_line_terminator_before_next_) {
-      Advance();
-      if (c0_ == '-') {
-        Advance();
-        if (c0_ == '>') {
-          // Treat the rest of the line as a comment.
-          SkipSingleLineComment();
-          // Continue skipping white space after the comment.
-          continue;
-        }
-        PushBack('-');  // undo Advance()
-      }
-      PushBack('-');  // undo Advance()
-    }
-    // Return whether or not we skipped any characters.
-    return source_pos() != start_position;
-  }
-}
-
-
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
-  Advance();
-
-  // The line terminator at the end of the line is not considered
-  // to be part of the single-line comment; it is recognized
-  // separately by the lexical grammar and becomes part of the
-  // stream of input elements for the syntactic grammar (see
-  // ECMA-262, section 7.4, page 12).
-  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
-    Advance();
-  }
-
-  return Token::WHITESPACE;
-}
-
-
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
-  ASSERT(c0_ == '*');
-  Advance();
-
-  while (c0_ >= 0) {
-    char ch = c0_;
-    Advance();
-    // If we have reached the end of the multi-line comment, we
-    // consume the '/' and insert a whitespace. This way all
-    // multi-line comments are treated as whitespace - even the ones
-    // containing line terminators. This contradicts ECMA-262, section
-    // 7.4, page 12, that says that multi-line comments containing
-    // line terminators should be treated as a line terminator, but it
-    // matches the behaviour of SpiderMonkey and KJS.
-    if (ch == '*' && c0_ == '/') {
-      c0_ = ' ';
-      return Token::WHITESPACE;
-    }
-  }
-
-  // Unterminated multi-line comment.
-  return Token::ILLEGAL;
-}
-
-
-Token::Value JavaScriptScanner::ScanHtmlComment() {
-  // Check for <!-- comments.
-  ASSERT(c0_ == '!');
-  Advance();
-  if (c0_ == '-') {
-    Advance();
-    if (c0_ == '-') return SkipSingleLineComment();
-    PushBack('-');  // undo Advance()
-  }
-  PushBack('!');  // undo Advance()
-  ASSERT(c0_ == '!');
-  return Token::LT;
-}
-
-
-void JavaScriptScanner::Scan() {
-  next_.literal_chars = NULL;
-  Token::Value token;
-  do {
-    // Remember the position of the next token
-    next_.location.beg_pos = source_pos();
-
-    switch (c0_) {
-      case ' ':
-      case '\t':
-        Advance();
-        token = Token::WHITESPACE;
-        break;
-
-      case '\n':
-        Advance();
-        has_line_terminator_before_next_ = true;
-        token = Token::WHITESPACE;
-        break;
-
-      case '"': case '\'':
-        token = ScanString();
-        break;
-
-      case '<':
-        // < <= << <<= <!--
-        Advance();
-        if (c0_ == '=') {
-          token = Select(Token::LTE);
-        } else if (c0_ == '<') {
-          token = Select('=', Token::ASSIGN_SHL, Token::SHL);
-        } else if (c0_ == '!') {
-          token = ScanHtmlComment();
-        } else {
-          token = Token::LT;
-        }
-        break;
-
-      case '>':
-        // > >= >> >>= >>> >>>=
-        Advance();
-        if (c0_ == '=') {
-          token = Select(Token::GTE);
-        } else if (c0_ == '>') {
-          // >> >>= >>> >>>=
-          Advance();
-          if (c0_ == '=') {
-            token = Select(Token::ASSIGN_SAR);
-          } else if (c0_ == '>') {
-            token = Select('=', Token::ASSIGN_SHR, Token::SHR);
-          } else {
-            token = Token::SAR;
-          }
-        } else {
-          token = Token::GT;
-        }
-        break;
-
-      case '=':
-        // = == ===
-        Advance();
-        if (c0_ == '=') {
-          token = Select('=', Token::EQ_STRICT, Token::EQ);
-        } else {
-          token = Token::ASSIGN;
-        }
-        break;
-
-      case '!':
-        // ! != !==
-        Advance();
-        if (c0_ == '=') {
-          token = Select('=', Token::NE_STRICT, Token::NE);
-        } else {
-          token = Token::NOT;
-        }
-        break;
-
-      case '+':
-        // + ++ +=
-        Advance();
-        if (c0_ == '+') {
-          token = Select(Token::INC);
-        } else if (c0_ == '=') {
-          token = Select(Token::ASSIGN_ADD);
-        } else {
-          token = Token::ADD;
-        }
-        break;
-
-      case '-':
-        // - -- --> -=
-        Advance();
-        if (c0_ == '-') {
-          Advance();
-          if (c0_ == '>' && has_line_terminator_before_next_) {
-            // For compatibility with SpiderMonkey, we skip lines that
-            // start with an HTML comment end '-->'.
-            token = SkipSingleLineComment();
-          } else {
-            token = Token::DEC;
-          }
-        } else if (c0_ == '=') {
-          token = Select(Token::ASSIGN_SUB);
-        } else {
-          token = Token::SUB;
-        }
-        break;
-
-      case '*':
-        // * *=
-        token = Select('=', Token::ASSIGN_MUL, Token::MUL);
-        break;
-
-      case '%':
-        // % %=
-        token = Select('=', Token::ASSIGN_MOD, Token::MOD);
-        break;
-
-      case '/':
-        // /  // /* /=
-        Advance();
-        if (c0_ == '/') {
-          token = SkipSingleLineComment();
-        } else if (c0_ == '*') {
-          token = SkipMultiLineComment();
-        } else if (c0_ == '=') {
-          token = Select(Token::ASSIGN_DIV);
-        } else {
-          token = Token::DIV;
-        }
-        break;
-
-      case '&':
-        // & && &=
-        Advance();
-        if (c0_ == '&') {
-          token = Select(Token::AND);
-        } else if (c0_ == '=') {
-          token = Select(Token::ASSIGN_BIT_AND);
-        } else {
-          token = Token::BIT_AND;
-        }
-        break;
-
-      case '|':
-        // | || |=
-        Advance();
-        if (c0_ == '|') {
-          token = Select(Token::OR);
-        } else if (c0_ == '=') {
-          token = Select(Token::ASSIGN_BIT_OR);
-        } else {
-          token = Token::BIT_OR;
-        }
-        break;
-
-      case '^':
-        // ^ ^=
-        token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
-        break;
-
-      case '.':
-        // . Number
-        Advance();
-        if (IsDecimalDigit(c0_)) {
-          token = ScanNumber(true);
-        } else {
-          token = Token::PERIOD;
-        }
-        break;
-
-      case ':':
-        token = Select(Token::COLON);
-        break;
-
-      case ';':
-        token = Select(Token::SEMICOLON);
-        break;
-
-      case ',':
-        token = Select(Token::COMMA);
-        break;
-
-      case '(':
-        token = Select(Token::LPAREN);
-        break;
-
-      case ')':
-        token = Select(Token::RPAREN);
-        break;
-
-      case '[':
-        token = Select(Token::LBRACK);
-        break;
-
-      case ']':
-        token = Select(Token::RBRACK);
-        break;
-
-      case '{':
-        token = Select(Token::LBRACE);
-        break;
-
-      case '}':
-        token = Select(Token::RBRACE);
-        break;
-
-      case '?':
-        token = Select(Token::CONDITIONAL);
-        break;
-
-      case '~':
-        token = Select(Token::BIT_NOT);
-        break;
-
-      default:
-        if (unicode_cache_->IsIdentifierStart(c0_)) {
-          token = ScanIdentifierOrKeyword();
-        } else if (IsDecimalDigit(c0_)) {
-          token = ScanNumber(false);
-        } else if (SkipWhiteSpace()) {
-          token = Token::WHITESPACE;
-        } else if (c0_ < 0) {
-          token = Token::EOS;
-        } else {
-          token = Select(Token::ILLEGAL);
-        }
-        break;
-    }
-
-    // Continue scanning for tokens as long as we're just skipping
-    // whitespace.
-  } while (token == Token::WHITESPACE);
-
-  next_.location.end_pos = source_pos();
-  next_.token = token;
-}
-
-
-void JavaScriptScanner::SeekForward(int pos) {
-  // After this call, we will have the token at the given position as
-  // the "next" token. The "current" token will be invalid.
-  if (pos == next_.location.beg_pos) return;
-  int current_pos = source_pos();
-  ASSERT_EQ(next_.location.end_pos, current_pos);
-  // Positions inside the lookahead token aren't supported.
-  ASSERT(pos >= current_pos);
-  if (pos != current_pos) {
-    source_->SeekForward(pos - source_->pos());
-    Advance();
-    // This function is only called to seek to the location
-    // of the end of a function (at the "}" token). It doesn't matter
-    // whether there was a line terminator in the part we skip.
-    has_line_terminator_before_next_ = false;
-  }
-  Scan();
-}
-
-
-void JavaScriptScanner::ScanEscape() {
-  uc32 c = c0_;
-  Advance();
-
-  // Skip escaped newlines.
-  if (unicode_cache_->IsLineTerminator(c)) {
-    // Allow CR+LF newlines in multiline string literals.
-    if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
-    // Allow LF+CR newlines in multiline string literals.
-    if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
-    return;
-  }
-
-  switch (c) {
-    case '\'':  // fall through
-    case '"' :  // fall through
-    case '\\': break;
-    case 'b' : c = '\b'; break;
-    case 'f' : c = '\f'; break;
-    case 'n' : c = '\n'; break;
-    case 'r' : c = '\r'; break;
-    case 't' : c = '\t'; break;
-    case 'u' : c = ScanHexEscape(c, 4); break;
-    case 'v' : c = '\v'; break;
-    case 'x' : c = ScanHexEscape(c, 2); break;
-    case '0' :  // fall through
-    case '1' :  // fall through
-    case '2' :  // fall through
-    case '3' :  // fall through
-    case '4' :  // fall through
-    case '5' :  // fall through
-    case '6' :  // fall through
-    case '7' : c = ScanOctalEscape(c, 2); break;
-  }
-
-  // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
-  // should be illegal, but they are commonly handled
-  // as non-escaped characters by JS VMs.
-  AddLiteralChar(c);
-}
-
-
-Token::Value JavaScriptScanner::ScanString() {
-  uc32 quote = c0_;
-  Advance();  // consume quote
-
-  LiteralScope literal(this);
-  while (c0_ != quote && c0_ >= 0
-         && !unicode_cache_->IsLineTerminator(c0_)) {
-    uc32 c = c0_;
-    Advance();
-    if (c == '\\') {
-      if (c0_ < 0) return Token::ILLEGAL;
-      ScanEscape();
-    } else {
-      AddLiteralChar(c);
-    }
-  }
-  if (c0_ != quote) return Token::ILLEGAL;
-  literal.Complete();
-
-  Advance();  // consume quote
-  return Token::STRING;
-}
-
-
-void JavaScriptScanner::ScanDecimalDigits() {
-  while (IsDecimalDigit(c0_))
-    AddLiteralCharAdvance();
-}
-
-
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
-  ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
-
-  enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
-
-  LiteralScope literal(this);
-  if (seen_period) {
-    // we have already seen a decimal point of the float
-    AddLiteralChar('.');
-    ScanDecimalDigits();  // we know we have at least one digit
-
-  } else {
-    // if the first character is '0' we must check for octals and hex
-    if (c0_ == '0') {
-      AddLiteralCharAdvance();
-
-      // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
-      if (c0_ == 'x' || c0_ == 'X') {
-        // hex number
-        kind = HEX;
-        AddLiteralCharAdvance();
-        if (!IsHexDigit(c0_)) {
-          // we must have at least one hex digit after 'x'/'X'
-          return Token::ILLEGAL;
-        }
-        while (IsHexDigit(c0_)) {
-          AddLiteralCharAdvance();
-        }
-      } else if ('0' <= c0_ && c0_ <= '7') {
-        // (possible) octal number
-        kind = OCTAL;
-        while (true) {
-          if (c0_ == '8' || c0_ == '9') {
-            kind = DECIMAL;
-            break;
-          }
-          if (c0_  < '0' || '7'  < c0_) {
-            // Octal literal finished.
-            octal_pos_ = next_.location.beg_pos;
-            break;
-          }
-          AddLiteralCharAdvance();
-        }
-      }
-    }
-
-    // Parse decimal digits and allow trailing fractional part.
-    if (kind == DECIMAL) {
-      ScanDecimalDigits();  // optional
-      if (c0_ == '.') {
-        AddLiteralCharAdvance();
-        ScanDecimalDigits();  // optional
-      }
-    }
-  }
-
-  // scan exponent, if any
-  if (c0_ == 'e' || c0_ == 'E') {
-    ASSERT(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
-    if (kind == OCTAL) return Token::ILLEGAL;  // no exponent for octals allowed
-    // scan exponent
-    AddLiteralCharAdvance();
-    if (c0_ == '+' || c0_ == '-')
-      AddLiteralCharAdvance();
-    if (!IsDecimalDigit(c0_)) {
-      // we must have at least one decimal digit after 'e'/'E'
-      return Token::ILLEGAL;
-    }
-    ScanDecimalDigits();
-  }
-
-  // The source character immediately following a numeric literal must
-  // not be an identifier start or a decimal digit; see ECMA-262
-  // section 7.8.3, page 17 (note that we read only one decimal digit
-  // if the value is 0).
-  if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_))
-    return Token::ILLEGAL;
-
-  literal.Complete();
-
-  return Token::NUMBER;
-}
-
-
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
-  Advance();
-  if (c0_ != 'u') return unibrow::Utf8::kBadChar;
-  Advance();
-  uc32 c = ScanHexEscape('u', 4);
-  // We do not allow a unicode escape sequence to start another
-  // unicode escape sequence.
-  if (c == '\\') return unibrow::Utf8::kBadChar;
-  return c;
-}
-
-
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
-  ASSERT(unicode_cache_->IsIdentifierStart(c0_));
-  LiteralScope literal(this);
-  KeywordMatcher keyword_match;
-  // Scan identifier start character.
-  if (c0_ == '\\') {
-    uc32 c = ScanIdentifierUnicodeEscape();
-    // Only allow legal identifier start characters.
-    if (!unicode_cache_->IsIdentifierStart(c)) return Token::ILLEGAL;
-    AddLiteralChar(c);
-    return ScanIdentifierSuffix(&literal);
-  }
-
-  uc32 first_char = c0_;
-  Advance();
-  AddLiteralChar(first_char);
-  if (!keyword_match.AddChar(first_char)) {
-    return ScanIdentifierSuffix(&literal);
-  }
-
-  // Scan the rest of the identifier characters.
-  while (unicode_cache_->IsIdentifierPart(c0_)) {
-    if (c0_ != '\\') {
-      uc32 next_char = c0_;
-      Advance();
-      AddLiteralChar(next_char);
-      if (keyword_match.AddChar(next_char)) continue;
-    }
-    // Fallthrough if no loner able to complete keyword.
-    return ScanIdentifierSuffix(&literal);
-  }
-  literal.Complete();
-
-  return keyword_match.token();
-}
-
-
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
-  // Scan the rest of the identifier characters.
-  while (unicode_cache_->IsIdentifierPart(c0_)) {
-    if (c0_ == '\\') {
-      uc32 c = ScanIdentifierUnicodeEscape();
-      // Only allow legal identifier part characters.
-      if (!unicode_cache_->IsIdentifierPart(c)) return Token::ILLEGAL;
-      AddLiteralChar(c);
-    } else {
-      AddLiteralChar(c0_);
-      Advance();
-    }
-  }
-  literal->Complete();
-
-  return Token::IDENTIFIER;
-}
-
-
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
-  // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
-  bool in_character_class = false;
-
-  // Previous token is either '/' or '/=', in the second case, the
-  // pattern starts at =.
-  next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
-  next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
-
-  // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
-  // the scanner should pass uninterpreted bodies to the RegExp
-  // constructor.
-  LiteralScope literal(this);
-  if (seen_equal)
-    AddLiteralChar('=');
-
-  while (c0_ != '/' || in_character_class) {
-    if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
-    if (c0_ == '\\') {  // Escape sequence.
-      AddLiteralCharAdvance();
-      if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
-      AddLiteralCharAdvance();
-      // If the escape allows more characters, i.e., \x??, \u????, or \c?,
-      // only "safe" characters are allowed (letters, digits, underscore),
-      // otherwise the escape isn't valid and the invalid character has
-      // its normal meaning. I.e., we can just continue scanning without
-      // worrying whether the following characters are part of the escape
-      // or not, since any '/', '\\' or '[' is guaranteed to not be part
-      // of the escape sequence.
-    } else {  // Unescaped character.
-      if (c0_ == '[') in_character_class = true;
-      if (c0_ == ']') in_character_class = false;
-      AddLiteralCharAdvance();
-    }
-  }
-  Advance();  // consume '/'
-
-  literal.Complete();
-
-  return true;
-}
-
-
-bool JavaScriptScanner::ScanRegExpFlags() {
-  // Scan regular expression flags.
-  LiteralScope literal(this);
-  while (unicode_cache_->IsIdentifierPart(c0_)) {
-    if (c0_ == '\\') {
-      uc32 c = ScanIdentifierUnicodeEscape();
-      if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
-        // We allow any escaped character, unlike the restriction on
-        // IdentifierPart when it is used to build an IdentifierName.
-        AddLiteralChar(c);
-        continue;
-      }
-    }
-    AddLiteralCharAdvance();
-  }
-  literal.Complete();
-
-  next_.location.end_pos = source_pos() - 1;
-  return true;
-}
-
-// ----------------------------------------------------------------------------
-// Keyword Matcher
-
-KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
-  { "break",  KEYWORD_PREFIX, Token::BREAK },
-  { NULL,     C,              Token::ILLEGAL },
-  { NULL,     D,              Token::ILLEGAL },
-  { NULL,     E,              Token::ILLEGAL },
-  { NULL,     F,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     I,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { "let",    KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     N,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     P,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { "return", KEYWORD_PREFIX, Token::RETURN },
-  { NULL,     S,              Token::ILLEGAL },
-  { NULL,     T,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     V,              Token::ILLEGAL },
-  { NULL,     W,              Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { "yield",  KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
-};
-
-
-void KeywordMatcher::Step(unibrow::uchar input) {
-  switch (state_) {
-    case INITIAL: {
-      // matching the first character is the only state with significant fanout.
-      // Match only lower-case letters in range 'b'..'y'.
-      unsigned int offset = input - kFirstCharRangeMin;
-      if (offset < kFirstCharRangeLength) {
-        state_ = first_states_[offset].state;
-        if (state_ == KEYWORD_PREFIX) {
-          keyword_ = first_states_[offset].keyword;
-          counter_ = 1;
-          keyword_token_ = first_states_[offset].token;
-        }
-        return;
-      }
-      break;
-    }
-    case KEYWORD_PREFIX:
-      if (static_cast<unibrow::uchar>(keyword_[counter_]) == input) {
-        counter_++;
-        if (keyword_[counter_] == '\0') {
-          state_ = KEYWORD_MATCHED;
-          token_ = keyword_token_;
-        }
-        return;
-      }
-      break;
-    case KEYWORD_MATCHED:
-      token_ = Token::IDENTIFIER;
-      break;
-    case C:
-      if (MatchState(input, 'a', CA)) return;
-      if (MatchKeywordStart(input, "class", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchState(input, 'o', CO)) return;
-      break;
-    case CA:
-      if (MatchKeywordStart(input, "case", 2, Token::CASE)) return;
-      if (MatchKeywordStart(input, "catch", 2, Token::CATCH)) return;
-      break;
-    case CO:
-      if (MatchState(input, 'n', CON)) return;
-      break;
-    case CON:
-      if (MatchKeywordStart(input, "const", 3, Token::CONST)) return;
-      if (MatchKeywordStart(input, "continue", 3, Token::CONTINUE)) return;
-      break;
-    case D:
-      if (MatchState(input, 'e', DE)) return;
-      if (MatchKeyword(input, 'o', KEYWORD_MATCHED, Token::DO)) return;
-      break;
-    case DE:
-      if (MatchKeywordStart(input, "debugger", 2, Token::DEBUGGER)) return;
-      if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
-      if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
-      break;
-    case E:
-      if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
-      if (MatchKeywordStart(input, "enum", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchState(input, 'x', EX)) return;
-      break;
-    case EX:
-      if (MatchKeywordStart(input, "export", 2,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchKeywordStart(input, "extends", 2,
-          Token::FUTURE_RESERVED_WORD)) return;
-      break;
-    case F:
-      if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
-      if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
-      if (MatchKeywordStart(input, "for", 1, Token::FOR)) return;
-      if (MatchKeywordStart(input, "function", 1, Token::FUNCTION)) return;
-      break;
-    case I:
-      if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
-      if (MatchState(input, 'm', IM)) return;
-      if (MatchKeyword(input, 'n', IN, Token::IN)) return;
-      break;
-    case IM:
-      if (MatchState(input, 'p', IMP)) return;
-      break;
-    case IMP:
-      if (MatchKeywordStart(input, "implements", 3,
-         Token::FUTURE_RESERVED_WORD )) return;
-      if (MatchKeywordStart(input, "import", 3,
-         Token::FUTURE_RESERVED_WORD)) return;
-      break;
-    case IN:
-      token_ = Token::IDENTIFIER;
-      if (MatchKeywordStart(input, "interface", 2,
-         Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
-      break;
-    case N:
-      if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
-      if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
-      if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
-      break;
-    case P:
-      if (MatchKeywordStart(input, "package", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchState(input, 'r', PR)) return;
-      if (MatchKeywordStart(input, "public", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      break;
-    case PR:
-      if (MatchKeywordStart(input, "private", 2,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchKeywordStart(input, "protected", 2,
-          Token::FUTURE_RESERVED_WORD)) return;
-      break;
-    case S:
-      if (MatchKeywordStart(input, "static", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchKeywordStart(input, "super", 1,
-          Token::FUTURE_RESERVED_WORD)) return;
-      if (MatchKeywordStart(input, "switch", 1,
-          Token::SWITCH)) return;
-      break;
-    case T:
-      if (MatchState(input, 'h', TH)) return;
-      if (MatchState(input, 'r', TR)) return;
-      if (MatchKeywordStart(input, "typeof", 1, Token::TYPEOF)) return;
-      break;
-    case TH:
-      if (MatchKeywordStart(input, "this", 2, Token::THIS)) return;
-      if (MatchKeywordStart(input, "throw", 2, Token::THROW)) return;
-      break;
-    case TR:
-      if (MatchKeywordStart(input, "true", 2, Token::TRUE_LITERAL)) return;
-      if (MatchKeyword(input, 'y', KEYWORD_MATCHED, Token::TRY)) return;
-      break;
-    case V:
-      if (MatchKeywordStart(input, "var", 1, Token::VAR)) return;
-      if (MatchKeywordStart(input, "void", 1, Token::VOID)) return;
-      break;
-    case W:
-      if (MatchKeywordStart(input, "while", 1, Token::WHILE)) return;
-      if (MatchKeywordStart(input, "with", 1, Token::WITH)) return;
-      break;
-    case UNMATCHABLE:
-      break;
-  }
-  // On fallthrough, it's a failure.
-  state_ = UNMATCHABLE;
-}
-
-} }  // namespace v8::internal
diff --git a/src/scanner-base.h b/src/scanner-base.h
deleted file mode 100644
index 60b97d2..0000000
--- a/src/scanner-base.h
+++ /dev/null
@@ -1,663 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#ifndef V8_SCANNER_BASE_H_
-#define V8_SCANNER_BASE_H_
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "token.h"
-#include "unicode-inl.h"
-#include "char-predicates.h"
-#include "utils.h"
-#include "list-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Returns the value (0 .. 15) of a hexadecimal character c.
-// If c is not a legal hexadecimal character, returns a value < 0.
-inline int HexValue(uc32 c) {
-  c -= '0';
-  if (static_cast<unsigned>(c) <= 9) return c;
-  c = (c | 0x20) - ('a' - '0');  // detect 0x11..0x16 and 0x31..0x36.
-  if (static_cast<unsigned>(c) <= 5) return c + 10;
-  return -1;
-}
-
-
-// ---------------------------------------------------------------------
-// Buffered stream of characters, using an internal UC16 buffer.
-
-class UC16CharacterStream {
- public:
-  UC16CharacterStream() : pos_(0) { }
-  virtual ~UC16CharacterStream() { }
-
-  // Returns and advances past the next UC16 character in the input
-  // stream. If there are no more characters, it returns a negative
-  // value.
-  inline uc32 Advance() {
-    if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
-      pos_++;
-      return static_cast<uc32>(*(buffer_cursor_++));
-    }
-    // Note: currently the following increment is necessary to avoid a
-    // parser problem! The scanner treats the final kEndOfInput as
-    // a character with a position, and does math relative to that
-    // position.
-    pos_++;
-
-    return kEndOfInput;
-  }
-
-  // Return the current position in the character stream.
-  // Starts at zero.
-  inline unsigned pos() const { return pos_; }
-
-  // Skips forward past the next character_count UC16 characters
-  // in the input, or until the end of input if that comes sooner.
-  // Returns the number of characters actually skipped. If less
-  // than character_count,
-  inline unsigned SeekForward(unsigned character_count) {
-    unsigned buffered_chars =
-        static_cast<unsigned>(buffer_end_ - buffer_cursor_);
-    if (character_count <= buffered_chars) {
-      buffer_cursor_ += character_count;
-      pos_ += character_count;
-      return character_count;
-    }
-    return SlowSeekForward(character_count);
-  }
-
-  // Pushes back the most recently read UC16 character (or negative
-  // value if at end of input), i.e., the value returned by the most recent
-  // call to Advance.
-  // Must not be used right after calling SeekForward.
-  virtual void PushBack(int32_t character) = 0;
-
- protected:
-  static const uc32 kEndOfInput = -1;
-
-  // Ensures that the buffer_cursor_ points to the character at
-  // position pos_ of the input, if possible. If the position
-  // is at or after the end of the input, return false. If there
-  // are more characters available, return true.
-  virtual bool ReadBlock() = 0;
-  virtual unsigned SlowSeekForward(unsigned character_count) = 0;
-
-  const uc16* buffer_cursor_;
-  const uc16* buffer_end_;
-  unsigned pos_;
-};
-
-
-class UnicodeCache {
-// ---------------------------------------------------------------------
-// Caching predicates used by scanners.
- public:
-  UnicodeCache() {}
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
-  StaticResource<Utf8Decoder>* utf8_decoder() {
-    return &utf8_decoder_;
-  }
-
-  bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
-  bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
-  bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
-  bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
-
- private:
-
-  unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
-  unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
-  unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
-  unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
-  StaticResource<Utf8Decoder> utf8_decoder_;
-
-  DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
-};
-
-
-// ----------------------------------------------------------------------------
-// LiteralBuffer -  Collector of chars of literals.
-
-class LiteralBuffer {
- public:
-  LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
-
-  ~LiteralBuffer() {
-    if (backing_store_.length() > 0) {
-      backing_store_.Dispose();
-    }
-  }
-
-  inline void AddChar(uc16 character) {
-    if (position_ >= backing_store_.length()) ExpandBuffer();
-    if (is_ascii_) {
-      if (character < kMaxAsciiCharCodeU) {
-        backing_store_[position_] = static_cast<byte>(character);
-        position_ += kASCIISize;
-        return;
-      }
-      ConvertToUC16();
-    }
-    *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
-    position_ += kUC16Size;
-  }
-
-  bool is_ascii() { return is_ascii_; }
-
-  Vector<const uc16> uc16_literal() {
-    ASSERT(!is_ascii_);
-    ASSERT((position_ & 0x1) == 0);
-    return Vector<const uc16>(
-        reinterpret_cast<const uc16*>(backing_store_.start()),
-        position_ >> 1);
-  }
-
-  Vector<const char> ascii_literal() {
-    ASSERT(is_ascii_);
-    return Vector<const char>(
-        reinterpret_cast<const char*>(backing_store_.start()),
-        position_);
-  }
-
-  int length() {
-    return is_ascii_ ? position_ : (position_ >> 1);
-  }
-
-  void Reset() {
-    position_ = 0;
-    is_ascii_ = true;
-  }
- private:
-  static const int kInitialCapacity = 16;
-  static const int kGrowthFactory = 4;
-  static const int kMinConversionSlack = 256;
-  static const int kMaxGrowth = 1 * MB;
-  inline int NewCapacity(int min_capacity) {
-    int capacity = Max(min_capacity, backing_store_.length());
-    int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
-    return new_capacity;
-  }
-
-  void ExpandBuffer() {
-    Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
-    memcpy(new_store.start(), backing_store_.start(), position_);
-    backing_store_.Dispose();
-    backing_store_ = new_store;
-  }
-
-  void ConvertToUC16() {
-    ASSERT(is_ascii_);
-    Vector<byte> new_store;
-    int new_content_size = position_ * kUC16Size;
-    if (new_content_size >= backing_store_.length()) {
-      // Ensure room for all currently read characters as UC16 as well
-      // as the character about to be stored.
-      new_store = Vector<byte>::New(NewCapacity(new_content_size));
-    } else {
-      new_store = backing_store_;
-    }
-    char* src = reinterpret_cast<char*>(backing_store_.start());
-    uc16* dst = reinterpret_cast<uc16*>(new_store.start());
-    for (int i = position_ - 1; i >= 0; i--) {
-      dst[i] = src[i];
-    }
-    if (new_store.start() != backing_store_.start()) {
-      backing_store_.Dispose();
-      backing_store_ = new_store;
-    }
-    position_ = new_content_size;
-    is_ascii_ = false;
-  }
-
-  bool is_ascii_;
-  int position_;
-  Vector<byte> backing_store_;
-
-  DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
-};
-
-
-// ----------------------------------------------------------------------------
-// Scanner base-class.
-
-// Generic functionality used by both JSON and JavaScript scanners.
-class Scanner {
- public:
-  // -1 is outside of the range of any real source code.
-  static const int kNoOctalLocation = -1;
-
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
-  class LiteralScope {
-   public:
-    explicit LiteralScope(Scanner* self);
-    ~LiteralScope();
-    void Complete();
-
-   private:
-    Scanner* scanner_;
-    bool complete_;
-  };
-
-  explicit Scanner(UnicodeCache* scanner_contants);
-
-  // Returns the current token again.
-  Token::Value current_token() { return current_.token; }
-
-  // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const { return next_.token; }
-
-  struct Location {
-    Location(int b, int e) : beg_pos(b), end_pos(e) { }
-    Location() : beg_pos(0), end_pos(0) { }
-
-    bool IsValid() const {
-      return beg_pos >= 0 && end_pos >= beg_pos;
-    }
-
-    int beg_pos;
-    int end_pos;
-  };
-
-  static Location NoLocation() {
-    return Location(-1, -1);
-  }
-
-  // Returns the location information for the current token
-  // (the token returned by Next()).
-  Location location() const { return current_.location; }
-  Location peek_location() const { return next_.location; }
-
-  // Returns the location of the last seen octal literal
-  int octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = -1; }
-
-  // Returns the literal string, if any, for the current token (the
-  // token returned by Next()). The string is 0-terminated and in
-  // UTF-8 format; they may contain 0-characters. Literal strings are
-  // collected for identifiers, strings, and numbers.
-  // These functions only give the correct result if the literal
-  // was scanned between calls to StartLiteral() and TerminateLiteral().
-  bool is_literal_ascii() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->is_ascii();
-  }
-  Vector<const char> literal_ascii_string() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->ascii_literal();
-  }
-  Vector<const uc16> literal_uc16_string() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->uc16_literal();
-  }
-  int literal_length() const {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->length();
-  }
-
-  // Returns the literal string for the next token (the token that
-  // would be returned if Next() were called).
-  bool is_next_literal_ascii() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->is_ascii();
-  }
-  Vector<const char> next_literal_ascii_string() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->ascii_literal();
-  }
-  Vector<const uc16> next_literal_uc16_string() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->uc16_literal();
-  }
-  int next_literal_length() const {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->length();
-  }
-
-  static const int kCharacterLookaheadBufferSize = 1;
-
- protected:
-  // The current and look-ahead token.
-  struct TokenDesc {
-    Token::Value token;
-    Location location;
-    LiteralBuffer* literal_chars;
-  };
-
-  // Call this after setting source_ to the input.
-  void Init() {
-    // Set c0_ (one character ahead)
-    ASSERT(kCharacterLookaheadBufferSize == 1);
-    Advance();
-    // Initialize current_ to not refer to a literal.
-    current_.literal_chars = NULL;
-  }
-
-  // Literal buffer support
-  inline void StartLiteral() {
-    LiteralBuffer* free_buffer = (current_.literal_chars == &literal_buffer1_) ?
-            &literal_buffer2_ : &literal_buffer1_;
-    free_buffer->Reset();
-    next_.literal_chars = free_buffer;
-  }
-
-  inline void AddLiteralChar(uc32 c) {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    next_.literal_chars->AddChar(c);
-  }
-
-  // Complete scanning of a literal.
-  inline void TerminateLiteral() {
-    // Does nothing in the current implementation.
-  }
-
-  // Stops scanning of a literal and drop the collected characters,
-  // e.g., due to an encountered error.
-  inline void DropLiteral() {
-    next_.literal_chars = NULL;
-  }
-
-  inline void AddLiteralCharAdvance() {
-    AddLiteralChar(c0_);
-    Advance();
-  }
-
-  // Low-level scanning support.
-  void Advance() { c0_ = source_->Advance(); }
-  void PushBack(uc32 ch) {
-    source_->PushBack(c0_);
-    c0_ = ch;
-  }
-
-  inline Token::Value Select(Token::Value tok) {
-    Advance();
-    return tok;
-  }
-
-  inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_) {
-    Advance();
-    if (c0_ == next) {
-      Advance();
-      return then;
-    } else {
-      return else_;
-    }
-  }
-
-  uc32 ScanHexEscape(uc32 c, int length);
-
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
-  // Return the current source position.
-  int source_pos() {
-    return source_->pos() - kCharacterLookaheadBufferSize;
-  }
-
-  UnicodeCache* unicode_cache_;
-
-  // Buffers collecting literal strings, numbers, etc.
-  LiteralBuffer literal_buffer1_;
-  LiteralBuffer literal_buffer2_;
-
-  TokenDesc current_;  // desc for current token (as returned by Next())
-  TokenDesc next_;     // desc for next token (one token look-ahead)
-
-  // Input stream. Must be initialized to an UC16CharacterStream.
-  UC16CharacterStream* source_;
-
-  // Start position of the octal literal last scanned.
-  int octal_pos_;
-
-  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
-  uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
-  // A LiteralScope that disables recording of some types of JavaScript
-  // literals. If the scanner is configured to not record the specific
-  // type of literal, the scope will not call StartLiteral.
-  class LiteralScope {
-   public:
-    explicit LiteralScope(JavaScriptScanner* self)
-        : scanner_(self), complete_(false) {
-      scanner_->StartLiteral();
-    }
-     ~LiteralScope() {
-       if (!complete_) scanner_->DropLiteral();
-     }
-    void Complete() {
-      scanner_->TerminateLiteral();
-      complete_ = true;
-    }
-
-   private:
-    JavaScriptScanner* scanner_;
-    bool complete_;
-  };
-
-  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
-  // Returns the next token.
-  Token::Value Next();
-
-  // Returns true if there was a line terminator before the peek'ed token.
-  bool has_line_terminator_before_next() const {
-    return has_line_terminator_before_next_;
-  }
-
-  // Scans the input as a regular expression pattern, previous
-  // character(s) must be /(=). Returns true if a pattern is scanned.
-  bool ScanRegExpPattern(bool seen_equal);
-  // Returns true if regexp flags are scanned (always since flags can
-  // be empty).
-  bool ScanRegExpFlags();
-
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
-  // Seek forward to the given position.  This operation does not
-  // work in general, for instance when there are pushed back
-  // characters, but works for seeking forward until simple delimiter
-  // tokens, which is what it is used for.
-  void SeekForward(int pos);
-
- protected:
-  bool SkipWhiteSpace();
-  Token::Value SkipSingleLineComment();
-  Token::Value SkipMultiLineComment();
-
-  // Scans a single JavaScript token.
-  void Scan();
-
-  void ScanDecimalDigits();
-  Token::Value ScanNumber(bool seen_period);
-  Token::Value ScanIdentifierOrKeyword();
-  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
-  void ScanEscape();
-  Token::Value ScanString();
-
-  // Scans a possible HTML comment -- begins with '<!'.
-  Token::Value ScanHtmlComment();
-
-  // Decodes a unicode escape-sequence which is part of an identifier.
-  // If the escape sequence cannot be decoded the result is kBadChar.
-  uc32 ScanIdentifierUnicodeEscape();
-
-  bool has_line_terminator_before_next_;
-};
-
-
-// ----------------------------------------------------------------------------
-// Keyword matching state machine.
-
-class KeywordMatcher {
-//  Incrementally recognize keywords.
-//
-//  Recognized keywords:
-//      break case catch const* continue debugger* default delete do else
-//      finally false for function if in instanceof native* new null
-//      return switch this throw true try typeof var void while with
-//
-//  *: Actually "future reserved keywords". These are the only ones we
-//     recognize, the remaining are allowed as identifiers.
-//     In ES5 strict mode, we should disallow all reserved keywords.
- public:
-  KeywordMatcher()
-      : state_(INITIAL),
-        token_(Token::IDENTIFIER),
-        keyword_(NULL),
-        counter_(0),
-        keyword_token_(Token::ILLEGAL) {}
-
-  Token::Value token() { return token_; }
-
-  inline bool AddChar(unibrow::uchar input) {
-    if (state_ != UNMATCHABLE) {
-      Step(input);
-    }
-    return state_ != UNMATCHABLE;
-  }
-
-  void Fail() {
-    token_ = Token::IDENTIFIER;
-    state_ = UNMATCHABLE;
-  }
-
- private:
-  enum State {
-    UNMATCHABLE,
-    INITIAL,
-    KEYWORD_PREFIX,
-    KEYWORD_MATCHED,
-    C,
-    CA,
-    CO,
-    CON,
-    D,
-    DE,
-    E,
-    EX,
-    F,
-    I,
-    IM,
-    IMP,
-    IN,
-    N,
-    P,
-    PR,
-    S,
-    T,
-    TH,
-    TR,
-    V,
-    W
-  };
-
-  struct FirstState {
-    const char* keyword;
-    State state;
-    Token::Value token;
-  };
-
-  // Range of possible first characters of a keyword.
-  static const unsigned int kFirstCharRangeMin = 'b';
-  static const unsigned int kFirstCharRangeMax = 'y';
-  static const unsigned int kFirstCharRangeLength =
-      kFirstCharRangeMax - kFirstCharRangeMin + 1;
-  // State map for first keyword character range.
-  static FirstState first_states_[kFirstCharRangeLength];
-
-  // If input equals keyword's character at position, continue matching keyword
-  // from that position.
-  inline bool MatchKeywordStart(unibrow::uchar input,
-                                const char* keyword,
-                                int position,
-                                Token::Value token_if_match) {
-    if (input != static_cast<unibrow::uchar>(keyword[position])) {
-      return false;
-    }
-    state_ = KEYWORD_PREFIX;
-    this->keyword_ = keyword;
-    this->counter_ = position + 1;
-    this->keyword_token_ = token_if_match;
-    return true;
-  }
-
-  // If input equals match character, transition to new state and return true.
-  inline bool MatchState(unibrow::uchar input, char match, State new_state) {
-    if (input != static_cast<unibrow::uchar>(match)) {
-      return false;
-    }
-    state_ = new_state;
-    return true;
-  }
-
-  inline bool MatchKeyword(unibrow::uchar input,
-                           char match,
-                           State new_state,
-                           Token::Value keyword_token) {
-    if (input != static_cast<unibrow::uchar>(match)) {
-      return false;
-    }
-    state_ = new_state;
-    token_ = keyword_token;
-    return true;
-  }
-
-  void Step(unibrow::uchar input);
-
-  // Current state.
-  State state_;
-  // Token for currently added characters.
-  Token::Value token_;
-
-  // Matching a specific keyword string (there is only one possible valid
-  // keyword with the current prefix).
-  const char* keyword_;
-  int counter_;
-  Token::Value keyword_token_;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_SCANNER_BASE_H_
diff --git a/src/scanner-character-streams.cc b/src/scanner-character-streams.cc
new file mode 100644
index 0000000..ee10703
--- /dev/null
+++ b/src/scanner-character-streams.cc
@@ -0,0 +1,307 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "scanner-character-streams.h"
+
+#include "handles.h"
+#include "unicode-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// BufferedUC16CharacterStreams
+
+BufferedUC16CharacterStream::BufferedUC16CharacterStream()
+    : UC16CharacterStream(),
+      pushback_limit_(NULL) {
+  // Initialize buffer as being empty. First read will fill the buffer.
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_;
+}
+
+BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
+
+void BufferedUC16CharacterStream::PushBack(uc32 character) {
+  if (character == kEndOfInput) {
+    pos_--;
+    return;
+  }
+  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
+    // buffer_ is writable, buffer_cursor_ is const pointer.
+    buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
+    pos_--;
+    return;
+  }
+  SlowPushBack(static_cast<uc16>(character));
+}
+
+
+void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
+  // In pushback mode, the end of the buffer contains pushback,
+  // and the start of the buffer (from buffer start to pushback_limit_)
+  // contains valid data that comes just after the pushback.
+  // We NULL the pushback_limit_ if pushing all the way back to the
+  // start of the buffer.
+
+  if (pushback_limit_ == NULL) {
+    // Enter pushback mode.
+    pushback_limit_ = buffer_end_;
+    buffer_end_ = buffer_ + kBufferSize;
+    buffer_cursor_ = buffer_end_;
+  }
+  // Ensure that there is room for at least one pushback.
+  ASSERT(buffer_cursor_ > buffer_);
+  ASSERT(pos_ > 0);
+  buffer_[--buffer_cursor_ - buffer_] = character;
+  if (buffer_cursor_ == buffer_) {
+    pushback_limit_ = NULL;
+  } else if (buffer_cursor_ < pushback_limit_) {
+    pushback_limit_ = buffer_cursor_;
+  }
+  pos_--;
+}
+
+
+bool BufferedUC16CharacterStream::ReadBlock() {
+  buffer_cursor_ = buffer_;
+  if (pushback_limit_ != NULL) {
+    // Leave pushback mode.
+    buffer_end_ = pushback_limit_;
+    pushback_limit_ = NULL;
+    // If there were any valid characters left at the
+    // start of the buffer, use those.
+    if (buffer_cursor_ < buffer_end_) return true;
+    // Otherwise read a new block.
+  }
+  unsigned length = FillBuffer(pos_, kBufferSize);
+  buffer_end_ = buffer_ + length;
+  return length > 0;
+}
+
+
+unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
+  // Leave pushback mode (i.e., ignore that there might be valid data
+  // in the buffer before the pushback_limit_ point).
+  pushback_limit_ = NULL;
+  return BufferSeekForward(delta);
+}
+
+// ----------------------------------------------------------------------------
+// GenericStringUC16CharacterStream
+
+
+GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
+    Handle<String> data,
+    unsigned start_position,
+    unsigned end_position)
+    : string_(data),
+      length_(end_position) {
+  ASSERT(end_position >= start_position);
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_;
+  pos_ = start_position;
+}
+
+
+GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
+
+
+unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
+  unsigned old_pos = pos_;
+  pos_ = Min(pos_ + delta, length_);
+  ReadBlock();
+  return pos_ - old_pos;
+}
+
+
+unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
+                                                      unsigned length) {
+  if (from_pos >= length_) return 0;
+  if (from_pos + length > length_) {
+    length = length_ - from_pos;
+  }
+  String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
+  return length;
+}
+
+
+// ----------------------------------------------------------------------------
+// Utf8ToUC16CharacterStream
+Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
+                                                     unsigned length)
+    : BufferedUC16CharacterStream(),
+      raw_data_(data),
+      raw_data_length_(length),
+      raw_data_pos_(0),
+      raw_character_position_(0) {
+  ReadBlock();
+}
+
+
+Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
+
+
+unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
+  unsigned old_pos = pos_;
+  unsigned target_pos = pos_ + delta;
+  SetRawPosition(target_pos);
+  pos_ = raw_character_position_;
+  ReadBlock();
+  return pos_ - old_pos;
+}
+
+
+unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
+                                               unsigned length) {
+  static const unibrow::uchar kMaxUC16Character = 0xffff;
+  SetRawPosition(char_position);
+  if (raw_character_position_ != char_position) {
+    // char_position was not a valid position in the stream (hit the end
+    // while spooling to it).
+    return 0u;
+  }
+  unsigned i = 0;
+  while (i < length) {
+    if (raw_data_pos_ == raw_data_length_) break;
+    unibrow::uchar c = raw_data_[raw_data_pos_];
+    if (c <= unibrow::Utf8::kMaxOneByteChar) {
+      raw_data_pos_++;
+    } else {
+      c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
+                                         raw_data_length_ - raw_data_pos_,
+                                         &raw_data_pos_);
+      // Don't allow characters outside of the BMP.
+      if (c > kMaxUC16Character) {
+        c = unibrow::Utf8::kBadChar;
+      }
+    }
+    buffer_[i++] = static_cast<uc16>(c);
+  }
+  raw_character_position_ = char_position + i;
+  return i;
+}
+
+
+static const byte kUtf8MultiByteMask = 0xC0;
+static const byte kUtf8MultiByteCharStart = 0xC0;
+static const byte kUtf8MultiByteCharFollower = 0x80;
+
+
+#ifdef DEBUG
+static bool IsUtf8MultiCharacterStart(byte first_byte) {
+  return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
+}
+#endif
+
+
+static bool IsUtf8MultiCharacterFollower(byte later_byte) {
+  return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
+}
+
+
+// Move the cursor back to point at the preceding UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
+  byte character = buffer[--*cursor];
+  if (character > unibrow::Utf8::kMaxOneByteChar) {
+    ASSERT(IsUtf8MultiCharacterFollower(character));
+    // Last byte of a multi-byte character encoding. Step backwards until
+    // pointing to the first byte of the encoding, recognized by having the
+    // top two bits set.
+    while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
+    ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
+  }
+}
+
+
+// Move the cursor forward to point at the next following UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
+  byte character = buffer[(*cursor)++];
+  if (character > unibrow::Utf8::kMaxOneByteChar) {
+    // First character of a multi-byte character encoding.
+    // The number of most-significant one-bits determines the length of the
+    // encoding:
+    //  110..... - (0xCx, 0xDx) one additional byte (minimum).
+    //  1110.... - (0xEx) two additional bytes.
+    //  11110... - (0xFx) three additional bytes (maximum).
+    ASSERT(IsUtf8MultiCharacterStart(character));
+    // Additional bytes is:
+    // 1 if value in range 0xC0 .. 0xDF.
+    // 2 if value in range 0xE0 .. 0xEF.
+    // 3 if value in range 0xF0 .. 0xF7.
+    // Encode that in a single value.
+    unsigned additional_bytes =
+        ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
+    *cursor += additional_bytes;
+    ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
+  }
+}
+
+
+void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
+  if (raw_character_position_ > target_position) {
+    // Spool backwards in utf8 buffer.
+    do {
+      Utf8CharacterBack(raw_data_, &raw_data_pos_);
+      raw_character_position_--;
+    } while (raw_character_position_ > target_position);
+    return;
+  }
+  // Spool forwards in the utf8 buffer.
+  while (raw_character_position_ < target_position) {
+    if (raw_data_pos_ == raw_data_length_) return;
+    Utf8CharacterForward(raw_data_, &raw_data_pos_);
+    raw_character_position_++;
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// ExternalTwoByteStringUC16CharacterStream
+
+ExternalTwoByteStringUC16CharacterStream::
+    ~ExternalTwoByteStringUC16CharacterStream() { }
+
+
+ExternalTwoByteStringUC16CharacterStream
+    ::ExternalTwoByteStringUC16CharacterStream(
+        Handle<ExternalTwoByteString> data,
+        int start_position,
+        int end_position)
+    : UC16CharacterStream(),
+      source_(data),
+      raw_data_(data->GetTwoByteData(start_position)) {
+  buffer_cursor_ = raw_data_,
+  buffer_end_ = raw_data_ + (end_position - start_position);
+  pos_ = start_position;
+}
+
+} }  // namespace v8::internal
diff --git a/src/scanner-character-streams.h b/src/scanner-character-streams.h
new file mode 100644
index 0000000..5c4ea2c
--- /dev/null
+++ b/src/scanner-character-streams.h
@@ -0,0 +1,129 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
+#define V8_SCANNER_CHARACTER_STREAMS_H_
+
+#include "scanner.h"
+
+namespace v8 {
+namespace internal {
+
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos_ pointing to any position,
+// even positions before the current).
+class BufferedUC16CharacterStream: public UC16CharacterStream {
+ public:
+  BufferedUC16CharacterStream();
+  virtual ~BufferedUC16CharacterStream();
+
+  virtual void PushBack(uc32 character);
+
+ protected:
+  static const unsigned kBufferSize = 512;
+  static const unsigned kPushBackStepSize = 16;
+
+  virtual unsigned SlowSeekForward(unsigned delta);
+  virtual bool ReadBlock();
+  virtual void SlowPushBack(uc16 character);
+
+  virtual unsigned BufferSeekForward(unsigned delta) = 0;
+  virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
+
+  const uc16* pushback_limit_;
+  uc16 buffer_[kBufferSize];
+};
+
+
+// Generic string stream.
+class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+  GenericStringUC16CharacterStream(Handle<String> data,
+                                   unsigned start_position,
+                                   unsigned end_position);
+  virtual ~GenericStringUC16CharacterStream();
+
+ protected:
+  virtual unsigned BufferSeekForward(unsigned delta);
+  virtual unsigned FillBuffer(unsigned position, unsigned length);
+
+  Handle<String> string_;
+  unsigned start_position_;
+  unsigned length_;
+};
+
+
+// UC16 stream based on a literal UTF-8 string.
+class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+  Utf8ToUC16CharacterStream(const byte* data, unsigned length);
+  virtual ~Utf8ToUC16CharacterStream();
+
+ protected:
+  virtual unsigned BufferSeekForward(unsigned delta);
+  virtual unsigned FillBuffer(unsigned char_position, unsigned length);
+  void SetRawPosition(unsigned char_position);
+
+  const byte* raw_data_;
+  unsigned raw_data_length_;  // Measured in bytes, not characters.
+  unsigned raw_data_pos_;
+  // The character position of the character at raw_data[raw_data_pos_].
+  // Not necessarily the same as pos_.
+  unsigned raw_character_position_;
+};
+
+
+// UTF16 buffer to read characters from an external string.
+class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
+ public:
+  ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
+                                           int start_position,
+                                           int end_position);
+  virtual ~ExternalTwoByteStringUC16CharacterStream();
+
+  virtual void PushBack(uc32 character) {
+    ASSERT(buffer_cursor_ > raw_data_);
+    buffer_cursor_--;
+    pos_--;
+  }
+
+ protected:
+  virtual unsigned SlowSeekForward(unsigned delta) {
+    // Fast case always handles seeking.
+    return 0;
+  }
+  virtual bool ReadBlock() {
+    // Entire string is read at start.
+    return false;
+  }
+  Handle<ExternalTwoByteString> source_;
+  const uc16* raw_data_;  // Pointer to the actual array of characters.
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SCANNER_CHARACTER_STREAMS_H_
diff --git a/src/scanner.cc b/src/scanner.cc
old mode 100755
new mode 100644
index 666818e..69ea8ae
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -25,287 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+// Features shared by parsing and pre-parsing scanners.
 
-#include "ast.h"
-#include "handles.h"
 #include "scanner.h"
-#include "unicode-inl.h"
+
+#include "../include/v8stdint.h"
+#include "char-predicates-inl.h"
 
 namespace v8 {
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// BufferedUC16CharacterStreams
-
-BufferedUC16CharacterStream::BufferedUC16CharacterStream()
-    : UC16CharacterStream(),
-      pushback_limit_(NULL) {
-  // Initialize buffer as being empty. First read will fill the buffer.
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_;
-}
-
-BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
-
-void BufferedUC16CharacterStream::PushBack(uc32 character) {
-  if (character == kEndOfInput) {
-    pos_--;
-    return;
-  }
-  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
-    // buffer_ is writable, buffer_cursor_ is const pointer.
-    buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
-    pos_--;
-    return;
-  }
-  SlowPushBack(static_cast<uc16>(character));
-}
-
-
-void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
-  // In pushback mode, the end of the buffer contains pushback,
-  // and the start of the buffer (from buffer start to pushback_limit_)
-  // contains valid data that comes just after the pushback.
-  // We NULL the pushback_limit_ if pushing all the way back to the
-  // start of the buffer.
-
-  if (pushback_limit_ == NULL) {
-    // Enter pushback mode.
-    pushback_limit_ = buffer_end_;
-    buffer_end_ = buffer_ + kBufferSize;
-    buffer_cursor_ = buffer_end_;
-  }
-  // Ensure that there is room for at least one pushback.
-  ASSERT(buffer_cursor_ > buffer_);
-  ASSERT(pos_ > 0);
-  buffer_[--buffer_cursor_ - buffer_] = character;
-  if (buffer_cursor_ == buffer_) {
-    pushback_limit_ = NULL;
-  } else if (buffer_cursor_ < pushback_limit_) {
-    pushback_limit_ = buffer_cursor_;
-  }
-  pos_--;
-}
-
-
-bool BufferedUC16CharacterStream::ReadBlock() {
-  buffer_cursor_ = buffer_;
-  if (pushback_limit_ != NULL) {
-    // Leave pushback mode.
-    buffer_end_ = pushback_limit_;
-    pushback_limit_ = NULL;
-    // If there were any valid characters left at the
-    // start of the buffer, use those.
-    if (buffer_cursor_ < buffer_end_) return true;
-    // Otherwise read a new block.
-  }
-  unsigned length = FillBuffer(pos_, kBufferSize);
-  buffer_end_ = buffer_ + length;
-  return length > 0;
-}
-
-
-unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
-  // Leave pushback mode (i.e., ignore that there might be valid data
-  // in the buffer before the pushback_limit_ point).
-  pushback_limit_ = NULL;
-  return BufferSeekForward(delta);
-}
-
-// ----------------------------------------------------------------------------
-// GenericStringUC16CharacterStream
-
-
-GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
-    Handle<String> data,
-    unsigned start_position,
-    unsigned end_position)
-    : string_(data),
-      length_(end_position) {
-  ASSERT(end_position >= start_position);
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_;
-  pos_ = start_position;
-}
-
-
-GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
-
-
-unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
-  unsigned old_pos = pos_;
-  pos_ = Min(pos_ + delta, length_);
-  ReadBlock();
-  return pos_ - old_pos;
-}
-
-
-unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
-                                                      unsigned length) {
-  if (from_pos >= length_) return 0;
-  if (from_pos + length > length_) {
-    length = length_ - from_pos;
-  }
-  String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
-  return length;
-}
-
-
-// ----------------------------------------------------------------------------
-// Utf8ToUC16CharacterStream
-Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
-                                                     unsigned length)
-    : BufferedUC16CharacterStream(),
-      raw_data_(data),
-      raw_data_length_(length),
-      raw_data_pos_(0),
-      raw_character_position_(0) {
-  ReadBlock();
-}
-
-
-Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
-
-
-unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
-  unsigned old_pos = pos_;
-  unsigned target_pos = pos_ + delta;
-  SetRawPosition(target_pos);
-  pos_ = raw_character_position_;
-  ReadBlock();
-  return pos_ - old_pos;
-}
-
-
-unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
-                                               unsigned length) {
-  static const unibrow::uchar kMaxUC16Character = 0xffff;
-  SetRawPosition(char_position);
-  if (raw_character_position_ != char_position) {
-    // char_position was not a valid position in the stream (hit the end
-    // while spooling to it).
-    return 0u;
-  }
-  unsigned i = 0;
-  while (i < length) {
-    if (raw_data_pos_ == raw_data_length_) break;
-    unibrow::uchar c = raw_data_[raw_data_pos_];
-    if (c <= unibrow::Utf8::kMaxOneByteChar) {
-      raw_data_pos_++;
-    } else {
-      c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
-                                         raw_data_length_ - raw_data_pos_,
-                                         &raw_data_pos_);
-      // Don't allow characters outside of the BMP.
-      if (c > kMaxUC16Character) {
-        c = unibrow::Utf8::kBadChar;
-      }
-    }
-    buffer_[i++] = static_cast<uc16>(c);
-  }
-  raw_character_position_ = char_position + i;
-  return i;
-}
-
-
-static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
-static const byte kUtf8MultiByteCharFollower = 0x80;
-
-
-#ifdef DEBUG
-static bool IsUtf8MultiCharacterStart(byte first_byte) {
-  return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
-}
-#endif
-
-
-static bool IsUtf8MultiCharacterFollower(byte later_byte) {
-  return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
-}
-
-
-// Move the cursor back to point at the preceding UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
-  byte character = buffer[--*cursor];
-  if (character > unibrow::Utf8::kMaxOneByteChar) {
-    ASSERT(IsUtf8MultiCharacterFollower(character));
-    // Last byte of a multi-byte character encoding. Step backwards until
-    // pointing to the first byte of the encoding, recognized by having the
-    // top two bits set.
-    while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
-    ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
-  }
-}
-
-
-// Move the cursor forward to point at the next following UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
-  byte character = buffer[(*cursor)++];
-  if (character > unibrow::Utf8::kMaxOneByteChar) {
-    // First character of a multi-byte character encoding.
-    // The number of most-significant one-bits determines the length of the
-    // encoding:
-    //  110..... - (0xCx, 0xDx) one additional byte (minimum).
-    //  1110.... - (0xEx) two additional bytes.
-    //  11110... - (0xFx) three additional bytes (maximum).
-    ASSERT(IsUtf8MultiCharacterStart(character));
-    // Additional bytes is:
-    // 1 if value in range 0xC0 .. 0xDF.
-    // 2 if value in range 0xE0 .. 0xEF.
-    // 3 if value in range 0xF0 .. 0xF7.
-    // Encode that in a single value.
-    unsigned additional_bytes =
-        ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
-    *cursor += additional_bytes;
-    ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
-  }
-}
-
-
-void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
-  if (raw_character_position_ > target_position) {
-    // Spool backwards in utf8 buffer.
-    do {
-      Utf8CharacterBack(raw_data_, &raw_data_pos_);
-      raw_character_position_--;
-    } while (raw_character_position_ > target_position);
-    return;
-  }
-  // Spool forwards in the utf8 buffer.
-  while (raw_character_position_ < target_position) {
-    if (raw_data_pos_ == raw_data_length_) return;
-    Utf8CharacterForward(raw_data_, &raw_data_pos_);
-    raw_character_position_++;
-  }
-}
-
-
-// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUC16CharacterStream
-
-ExternalTwoByteStringUC16CharacterStream::
-    ~ExternalTwoByteStringUC16CharacterStream() { }
-
-
-ExternalTwoByteStringUC16CharacterStream
-    ::ExternalTwoByteStringUC16CharacterStream(
-        Handle<ExternalTwoByteString> data,
-        int start_position,
-        int end_position)
-    : UC16CharacterStream(),
-      source_(data),
-      raw_data_(data->GetTwoByteData(start_position)) {
-  buffer_cursor_ = raw_data_,
-  buffer_end_ = raw_data_ + (end_position - start_position);
-  pos_ = start_position;
-}
-
-
-// ----------------------------------------------------------------------------
 // Scanner::LiteralScope
 
 Scanner::LiteralScope::LiteralScope(Scanner* self)
@@ -324,12 +54,51 @@
   complete_ = true;
 }
 
+// ----------------------------------------------------------------------------
+// Scanner
+
+Scanner::Scanner(UnicodeCache* unicode_cache)
+    : unicode_cache_(unicode_cache) { }
+
+
+uc32 Scanner::ScanHexNumber(int expected_length) {
+  ASSERT(expected_length <= 4);  // prevent overflow
+
+  uc32 digits[4] = { 0, 0, 0, 0 };
+  uc32 x = 0;
+  for (int i = 0; i < expected_length; i++) {
+    digits[i] = c0_;
+    int d = HexValue(c0_);
+    if (d < 0) {
+      // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
+      // should be illegal, but other JS VMs just return the
+      // non-escaped version of the original character.
+
+      // Push back digits that we have advanced past.
+      for (int j = i-1; j >= 0; j--) {
+        PushBack(digits[j]);
+      }
+      return -1;
+    }
+    x = x * 16 + d;
+    Advance();
+  }
+
+  return x;
+}
+
+
 
 // ----------------------------------------------------------------------------
-// V8JavaScriptScanner
+// JavaScriptScanner
+
+JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
+    : Scanner(scanner_contants),
+      octal_pos_(Location::invalid()),
+      harmony_block_scoping_(false) { }
 
 
-void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
+void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
   source_ = source;
   // Need to capture identifiers in order to recognize "get" and "set"
   // in object literals.
@@ -342,116 +111,504 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// JsonScanner
+// Ensure that tokens can be stored in a byte.
+STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
-JsonScanner::JsonScanner(UnicodeCache* unicode_cache)
-    : Scanner(unicode_cache) { }
+// Table of one-character tokens, by character (0x00..0x7f only).
+static const byte one_char_tokens[] = {
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::LPAREN,       // 0x28
+  Token::RPAREN,       // 0x29
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::COMMA,        // 0x2c
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::COLON,        // 0x3a
+  Token::SEMICOLON,    // 0x3b
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::CONDITIONAL,  // 0x3f
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::LBRACK,     // 0x5b
+  Token::ILLEGAL,
+  Token::RBRACK,     // 0x5d
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::ILLEGAL,
+  Token::LBRACE,       // 0x7b
+  Token::ILLEGAL,
+  Token::RBRACE,       // 0x7d
+  Token::BIT_NOT,      // 0x7e
+  Token::ILLEGAL
+};
 
 
-void JsonScanner::Initialize(UC16CharacterStream* source) {
-  source_ = source;
-  Init();
-  // Skip initial whitespace.
-  SkipJsonWhiteSpace();
-  // Preload first token as look-ahead.
-  ScanJson();
-}
-
-
-Token::Value JsonScanner::Next() {
-  // BUG 1215673: Find a thread safe way to set a stack limit in
-  // pre-parse mode. Otherwise, we cannot safely pre-parse from other
-  // threads.
+Token::Value JavaScriptScanner::Next() {
   current_ = next_;
-  // Check for stack-overflow before returning any tokens.
-  ScanJson();
+  has_line_terminator_before_next_ = false;
+  has_multiline_comment_before_next_ = false;
+  if (static_cast<unsigned>(c0_) <= 0x7f) {
+    Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
+    if (token != Token::ILLEGAL) {
+      int pos = source_pos();
+      next_.token = token;
+      next_.location.beg_pos = pos;
+      next_.location.end_pos = pos + 1;
+      Advance();
+      return current_.token;
+    }
+  }
+  Scan();
   return current_.token;
 }
 
 
-bool JsonScanner::SkipJsonWhiteSpace() {
-  int start_position = source_pos();
-  // JSON WhiteSpace is tab, carrige-return, newline and space.
-  while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
-    Advance();
-  }
-  return source_pos() != start_position;
+static inline bool IsByteOrderMark(uc32 c) {
+  // The Unicode value U+FFFE is guaranteed never to be assigned as a
+  // Unicode character; this implies that in a Unicode context the
+  // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
+  // character expressed in little-endian byte order (since it could
+  // not be a U+FFFE character expressed in big-endian byte
+  // order). Nevertheless, we check for it to be compatible with
+  // Spidermonkey.
+  return c == 0xFEFF || c == 0xFFFE;
 }
 
 
-void JsonScanner::ScanJson() {
+bool JavaScriptScanner::SkipWhiteSpace() {
+  int start_position = source_pos();
+
+  while (true) {
+    // We treat byte-order marks (BOMs) as whitespace for better
+    // compatibility with Spidermonkey and other JavaScript engines.
+    while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
+      // IsWhiteSpace() includes line terminators!
+      if (unicode_cache_->IsLineTerminator(c0_)) {
+        // Ignore line terminators, but remember them. This is necessary
+        // for automatic semicolon insertion.
+        has_line_terminator_before_next_ = true;
+      }
+      Advance();
+    }
+
+    // If there is an HTML comment end '-->' at the beginning of a
+    // line (with only whitespace in front of it), we treat the rest
+    // of the line as a comment. This is in line with the way
+    // SpiderMonkey handles it.
+    if (c0_ == '-' && has_line_terminator_before_next_) {
+      Advance();
+      if (c0_ == '-') {
+        Advance();
+        if (c0_ == '>') {
+          // Treat the rest of the line as a comment.
+          SkipSingleLineComment();
+          // Continue skipping white space after the comment.
+          continue;
+        }
+        PushBack('-');  // undo Advance()
+      }
+      PushBack('-');  // undo Advance()
+    }
+    // Return whether or not we skipped any characters.
+    return source_pos() != start_position;
+  }
+}
+
+
+Token::Value JavaScriptScanner::SkipSingleLineComment() {
+  Advance();
+
+  // The line terminator at the end of the line is not considered
+  // to be part of the single-line comment; it is recognized
+  // separately by the lexical grammar and becomes part of the
+  // stream of input elements for the syntactic grammar (see
+  // ECMA-262, section 7.4).
+  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+    Advance();
+  }
+
+  return Token::WHITESPACE;
+}
+
+
+Token::Value JavaScriptScanner::SkipMultiLineComment() {
+  ASSERT(c0_ == '*');
+  Advance();
+
+  while (c0_ >= 0) {
+    uc32 ch = c0_;
+    Advance();
+    if (unicode_cache_->IsLineTerminator(ch)) {
+      // Following ECMA-262, section 7.4, a comment containing
+      // a newline will make the comment count as a line-terminator.
+      has_multiline_comment_before_next_ = true;
+    }
+    // If we have reached the end of the multi-line comment, we
+    // consume the '/' and insert a whitespace. This way all
+    // multi-line comments are treated as whitespace.
+    if (ch == '*' && c0_ == '/') {
+      c0_ = ' ';
+      return Token::WHITESPACE;
+    }
+  }
+
+  // Unterminated multi-line comment.
+  return Token::ILLEGAL;
+}
+
+
+Token::Value JavaScriptScanner::ScanHtmlComment() {
+  // Check for <!-- comments.
+  ASSERT(c0_ == '!');
+  Advance();
+  if (c0_ == '-') {
+    Advance();
+    if (c0_ == '-') return SkipSingleLineComment();
+    PushBack('-');  // undo Advance()
+  }
+  PushBack('!');  // undo Advance()
+  ASSERT(c0_ == '!');
+  return Token::LT;
+}
+
+
+void JavaScriptScanner::Scan() {
   next_.literal_chars = NULL;
   Token::Value token;
   do {
     // Remember the position of the next token
     next_.location.beg_pos = source_pos();
+
     switch (c0_) {
-      case '\t':
-      case '\r':
-      case '\n':
       case ' ':
+      case '\t':
         Advance();
         token = Token::WHITESPACE;
         break;
-      case '{':
+
+      case '\n':
         Advance();
-        token = Token::LBRACE;
+        has_line_terminator_before_next_ = true;
+        token = Token::WHITESPACE;
         break;
-      case '}':
+
+      case '"': case '\'':
+        token = ScanString();
+        break;
+
+      case '<':
+        // < <= << <<= <!--
         Advance();
-        token = Token::RBRACE;
+        if (c0_ == '=') {
+          token = Select(Token::LTE);
+        } else if (c0_ == '<') {
+          token = Select('=', Token::ASSIGN_SHL, Token::SHL);
+        } else if (c0_ == '!') {
+          token = ScanHtmlComment();
+        } else {
+          token = Token::LT;
+        }
         break;
-      case '[':
+
+      case '>':
+        // > >= >> >>= >>> >>>=
         Advance();
-        token = Token::LBRACK;
-        break;
-      case ']':
-        Advance();
-        token = Token::RBRACK;
-        break;
-      case ':':
-        Advance();
-        token = Token::COLON;
-        break;
-      case ',':
-        Advance();
-        token = Token::COMMA;
-        break;
-      case '"':
-        token = ScanJsonString();
-        break;
-      case '-':
-      case '0':
-      case '1':
-      case '2':
-      case '3':
-      case '4':
-      case '5':
-      case '6':
-      case '7':
-      case '8':
-      case '9':
-        token = ScanJsonNumber();
-        break;
-      case 't':
-        token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
-        break;
-      case 'f':
-        token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
-        break;
-      case 'n':
-        token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
-        break;
-      default:
-        if (c0_ < 0) {
+        if (c0_ == '=') {
+          token = Select(Token::GTE);
+        } else if (c0_ == '>') {
+          // >> >>= >>> >>>=
           Advance();
+          if (c0_ == '=') {
+            token = Select(Token::ASSIGN_SAR);
+          } else if (c0_ == '>') {
+            token = Select('=', Token::ASSIGN_SHR, Token::SHR);
+          } else {
+            token = Token::SAR;
+          }
+        } else {
+          token = Token::GT;
+        }
+        break;
+
+      case '=':
+        // = == ===
+        Advance();
+        if (c0_ == '=') {
+          token = Select('=', Token::EQ_STRICT, Token::EQ);
+        } else {
+          token = Token::ASSIGN;
+        }
+        break;
+
+      case '!':
+        // ! != !==
+        Advance();
+        if (c0_ == '=') {
+          token = Select('=', Token::NE_STRICT, Token::NE);
+        } else {
+          token = Token::NOT;
+        }
+        break;
+
+      case '+':
+        // + ++ +=
+        Advance();
+        if (c0_ == '+') {
+          token = Select(Token::INC);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_ADD);
+        } else {
+          token = Token::ADD;
+        }
+        break;
+
+      case '-':
+        // - -- --> -=
+        Advance();
+        if (c0_ == '-') {
+          Advance();
+          if (c0_ == '>' && has_line_terminator_before_next_) {
+            // For compatibility with SpiderMonkey, we skip lines that
+            // start with an HTML comment end '-->'.
+            token = SkipSingleLineComment();
+          } else {
+            token = Token::DEC;
+          }
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_SUB);
+        } else {
+          token = Token::SUB;
+        }
+        break;
+
+      case '*':
+        // * *=
+        token = Select('=', Token::ASSIGN_MUL, Token::MUL);
+        break;
+
+      case '%':
+        // % %=
+        token = Select('=', Token::ASSIGN_MOD, Token::MOD);
+        break;
+
+      case '/':
+        // /  // /* /=
+        Advance();
+        if (c0_ == '/') {
+          token = SkipSingleLineComment();
+        } else if (c0_ == '*') {
+          token = SkipMultiLineComment();
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_DIV);
+        } else {
+          token = Token::DIV;
+        }
+        break;
+
+      case '&':
+        // & && &=
+        Advance();
+        if (c0_ == '&') {
+          token = Select(Token::AND);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_BIT_AND);
+        } else {
+          token = Token::BIT_AND;
+        }
+        break;
+
+      case '|':
+        // | || |=
+        Advance();
+        if (c0_ == '|') {
+          token = Select(Token::OR);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_BIT_OR);
+        } else {
+          token = Token::BIT_OR;
+        }
+        break;
+
+      case '^':
+        // ^ ^=
+        token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
+        break;
+
+      case '.':
+        // . Number
+        Advance();
+        if (IsDecimalDigit(c0_)) {
+          token = ScanNumber(true);
+        } else {
+          token = Token::PERIOD;
+        }
+        break;
+
+      case ':':
+        token = Select(Token::COLON);
+        break;
+
+      case ';':
+        token = Select(Token::SEMICOLON);
+        break;
+
+      case ',':
+        token = Select(Token::COMMA);
+        break;
+
+      case '(':
+        token = Select(Token::LPAREN);
+        break;
+
+      case ')':
+        token = Select(Token::RPAREN);
+        break;
+
+      case '[':
+        token = Select(Token::LBRACK);
+        break;
+
+      case ']':
+        token = Select(Token::RBRACK);
+        break;
+
+      case '{':
+        token = Select(Token::LBRACE);
+        break;
+
+      case '}':
+        token = Select(Token::RBRACE);
+        break;
+
+      case '?':
+        token = Select(Token::CONDITIONAL);
+        break;
+
+      case '~':
+        token = Select(Token::BIT_NOT);
+        break;
+
+      default:
+        if (unicode_cache_->IsIdentifierStart(c0_)) {
+          token = ScanIdentifierOrKeyword();
+        } else if (IsDecimalDigit(c0_)) {
+          token = ScanNumber(false);
+        } else if (SkipWhiteSpace()) {
+          token = Token::WHITESPACE;
+        } else if (c0_ < 0) {
           token = Token::EOS;
         } else {
-          Advance();
           token = Select(Token::ILLEGAL);
         }
+        break;
     }
+
+    // Continue scanning for tokens as long as we're just skipping
+    // whitespace.
   } while (token == Token::WHITESPACE);
 
   next_.location.end_pos = source_pos();
@@ -459,127 +616,495 @@
 }
 
 
-Token::Value JsonScanner::ScanJsonString() {
-  ASSERT_EQ('"', c0_);
+void JavaScriptScanner::SeekForward(int pos) {
+  // After this call, we will have the token at the given position as
+  // the "next" token. The "current" token will be invalid.
+  if (pos == next_.location.beg_pos) return;
+  int current_pos = source_pos();
+  ASSERT_EQ(next_.location.end_pos, current_pos);
+  // Positions inside the lookahead token aren't supported.
+  ASSERT(pos >= current_pos);
+  if (pos != current_pos) {
+    source_->SeekForward(pos - source_->pos());
+    Advance();
+    // This function is only called to seek to the location
+    // of the end of a function (at the "}" token). It doesn't matter
+    // whether there was a line terminator in the part we skip.
+    has_line_terminator_before_next_ = false;
+    has_multiline_comment_before_next_ = false;
+  }
+  Scan();
+}
+
+
+void JavaScriptScanner::ScanEscape() {
+  uc32 c = c0_;
   Advance();
+
+  // Skip escaped newlines.
+  if (unicode_cache_->IsLineTerminator(c)) {
+    // Allow CR+LF newlines in multiline string literals.
+    if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
+    // Allow LF+CR newlines in multiline string literals.
+    if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
+    return;
+  }
+
+  switch (c) {
+    case '\'':  // fall through
+    case '"' :  // fall through
+    case '\\': break;
+    case 'b' : c = '\b'; break;
+    case 'f' : c = '\f'; break;
+    case 'n' : c = '\n'; break;
+    case 'r' : c = '\r'; break;
+    case 't' : c = '\t'; break;
+    case 'u' : {
+      c = ScanHexNumber(4);
+      if (c < 0) c = 'u';
+      break;
+    }
+    case 'v' : c = '\v'; break;
+    case 'x' : {
+      c = ScanHexNumber(2);
+      if (c < 0) c = 'x';
+      break;
+    }
+    case '0' :  // fall through
+    case '1' :  // fall through
+    case '2' :  // fall through
+    case '3' :  // fall through
+    case '4' :  // fall through
+    case '5' :  // fall through
+    case '6' :  // fall through
+    case '7' : c = ScanOctalEscape(c, 2); break;
+  }
+
+  // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
+  // should be illegal, but they are commonly handled
+  // as non-escaped characters by JS VMs.
+  AddLiteralChar(c);
+}
+
+
+// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
+// ECMA-262. Other JS VMs support them.
+uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+  uc32 x = c - '0';
+  int i = 0;
+  for (; i < length; i++) {
+    int d = c0_ - '0';
+    if (d < 0 || d > 7) break;
+    int nx = x * 8 + d;
+    if (nx >= 256) break;
+    x = nx;
+    Advance();
+  }
+  // Anything except '\0' is an octal escape sequence, illegal in strict mode.
+  // Remember the position of octal escape sequences so that an error
+  // can be reported later (in strict mode).
+  // We don't report the error immediately, because the octal escape can
+  // occur before the "use strict" directive.
+  if (c != '0' || i > 0) {
+    octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
+  }
+  return x;
+}
+
+
+Token::Value JavaScriptScanner::ScanString() {
+  uc32 quote = c0_;
+  Advance();  // consume quote
+
   LiteralScope literal(this);
-  while (c0_ != '"') {
-    // Check for control character (0x00-0x1f) or unterminated string (<0).
-    if (c0_ < 0x20) return Token::ILLEGAL;
-    if (c0_ != '\\') {
-      AddLiteralCharAdvance();
+  while (c0_ != quote && c0_ >= 0
+         && !unicode_cache_->IsLineTerminator(c0_)) {
+    uc32 c = c0_;
+    Advance();
+    if (c == '\\') {
+      if (c0_ < 0) return Token::ILLEGAL;
+      ScanEscape();
     } else {
-      Advance();
-      switch (c0_) {
-        case '"':
-        case '\\':
-        case '/':
-          AddLiteralChar(c0_);
-          break;
-        case 'b':
-          AddLiteralChar('\x08');
-          break;
-        case 'f':
-          AddLiteralChar('\x0c');
-          break;
-        case 'n':
-          AddLiteralChar('\x0a');
-          break;
-        case 'r':
-          AddLiteralChar('\x0d');
-          break;
-        case 't':
-          AddLiteralChar('\x09');
-          break;
-        case 'u': {
-          uc32 value = 0;
-          for (int i = 0; i < 4; i++) {
-            Advance();
-            int digit = HexValue(c0_);
-            if (digit < 0) {
-              return Token::ILLEGAL;
-            }
-            value = value * 16 + digit;
-          }
-          AddLiteralChar(value);
-          break;
-        }
-        default:
-          return Token::ILLEGAL;
-      }
-      Advance();
+      AddLiteralChar(c);
     }
   }
+  if (c0_ != quote) return Token::ILLEGAL;
   literal.Complete();
-  Advance();
+
+  Advance();  // consume quote
   return Token::STRING;
 }
 
 
-Token::Value JsonScanner::ScanJsonNumber() {
-  LiteralScope literal(this);
-  bool negative = false;
+void JavaScriptScanner::ScanDecimalDigits() {
+  while (IsDecimalDigit(c0_))
+    AddLiteralCharAdvance();
+}
 
-  if (c0_ == '-') {
-    AddLiteralCharAdvance();
-    negative = true;
-  }
-  if (c0_ == '0') {
-    AddLiteralCharAdvance();
-    // Prefix zero is only allowed if it's the only digit before
-    // a decimal point or exponent.
-    if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
+
+Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+  ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
+
+  enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
+
+  LiteralScope literal(this);
+  if (seen_period) {
+    // we have already seen a decimal point of the float
+    AddLiteralChar('.');
+    ScanDecimalDigits();  // we know we have at least one digit
+
   } else {
-    int i = 0;
-    int digits = 0;
-    if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
-    do {
-      i = i * 10 + c0_ - '0';
-      digits++;
+    // if the first character is '0' we must check for octals and hex
+    if (c0_ == '0') {
+      int start_pos = source_pos();  // For reporting octal positions.
       AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
-    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
-      number_ = (negative ? -i : i);
-      return Token::NUMBER;
+
+      // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
+      if (c0_ == 'x' || c0_ == 'X') {
+        // hex number
+        kind = HEX;
+        AddLiteralCharAdvance();
+        if (!IsHexDigit(c0_)) {
+          // we must have at least one hex digit after 'x'/'X'
+          return Token::ILLEGAL;
+        }
+        while (IsHexDigit(c0_)) {
+          AddLiteralCharAdvance();
+        }
+      } else if ('0' <= c0_ && c0_ <= '7') {
+        // (possible) octal number
+        kind = OCTAL;
+        while (true) {
+          if (c0_ == '8' || c0_ == '9') {
+            kind = DECIMAL;
+            break;
+          }
+          if (c0_  < '0' || '7'  < c0_) {
+            // Octal literal finished.
+            octal_pos_ = Location(start_pos, source_pos());
+            break;
+          }
+          AddLiteralCharAdvance();
+        }
+      }
+    }
+
+    // Parse decimal digits and allow trailing fractional part.
+    if (kind == DECIMAL) {
+      ScanDecimalDigits();  // optional
+      if (c0_ == '.') {
+        AddLiteralCharAdvance();
+        ScanDecimalDigits();  // optional
+      }
     }
   }
-  if (c0_ == '.') {
+
+  // scan exponent, if any
+  if (c0_ == 'e' || c0_ == 'E') {
+    ASSERT(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
+    if (kind == OCTAL) return Token::ILLEGAL;  // no exponent for octals allowed
+    // scan exponent
     AddLiteralCharAdvance();
-    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
-    do {
+    if (c0_ == '+' || c0_ == '-')
       AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
+    if (!IsDecimalDigit(c0_)) {
+      // we must have at least one decimal digit after 'e'/'E'
+      return Token::ILLEGAL;
+    }
+    ScanDecimalDigits();
   }
-  if (AsciiAlphaToLower(c0_) == 'e') {
-    AddLiteralCharAdvance();
-    if (c0_ == '-' || c0_ == '+') AddLiteralCharAdvance();
-    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
-    do {
-      AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
-  }
+
+  // The source character immediately following a numeric literal must
+  // not be an identifier start or a decimal digit; see ECMA-262
+  // section 7.8.3, page 17 (note that we read only one decimal digit
+  // if the value is 0).
+  if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_))
+    return Token::ILLEGAL;
+
   literal.Complete();
-  ASSERT_NOT_NULL(next_.literal_chars);
-  number_ = StringToDouble(unicode_cache_,
-                           next_.literal_chars->ascii_literal(),
-                           NO_FLAGS,  // Hex, octal or trailing junk.
-                           OS::nan_value());
+
   return Token::NUMBER;
 }
 
 
-Token::Value JsonScanner::ScanJsonIdentifier(const char* text,
-                                             Token::Value token) {
-  LiteralScope literal(this);
-  while (*text != '\0') {
-    if (c0_ != *text) return Token::ILLEGAL;
-    Advance();
-    text++;
-  }
-  if (unicode_cache_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
-  literal.Complete();
-  return token;
+uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+  Advance();
+  if (c0_ != 'u') return -1;
+  Advance();
+  uc32 result = ScanHexNumber(4);
+  if (result < 0) PushBack('u');
+  return result;
 }
 
 
+// ----------------------------------------------------------------------------
+// Keyword Matcher
+
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD)                            \
+  KEYWORD_GROUP('b')                                                \
+  KEYWORD("break", Token::BREAK)                                    \
+  KEYWORD_GROUP('c')                                                \
+  KEYWORD("case", Token::CASE)                                      \
+  KEYWORD("catch", Token::CATCH)                                    \
+  KEYWORD("class", Token::FUTURE_RESERVED_WORD)                     \
+  KEYWORD("const", Token::CONST)                                    \
+  KEYWORD("continue", Token::CONTINUE)                              \
+  KEYWORD_GROUP('d')                                                \
+  KEYWORD("debugger", Token::DEBUGGER)                              \
+  KEYWORD("default", Token::DEFAULT)                                \
+  KEYWORD("delete", Token::DELETE)                                  \
+  KEYWORD("do", Token::DO)                                          \
+  KEYWORD_GROUP('e')                                                \
+  KEYWORD("else", Token::ELSE)                                      \
+  KEYWORD("enum", Token::FUTURE_RESERVED_WORD)                      \
+  KEYWORD("export", Token::FUTURE_RESERVED_WORD)                    \
+  KEYWORD("extends", Token::FUTURE_RESERVED_WORD)                   \
+  KEYWORD_GROUP('f')                                                \
+  KEYWORD("false", Token::FALSE_LITERAL)                            \
+  KEYWORD("finally", Token::FINALLY)                                \
+  KEYWORD("for", Token::FOR)                                        \
+  KEYWORD("function", Token::FUNCTION)                              \
+  KEYWORD_GROUP('i')                                                \
+  KEYWORD("if", Token::IF)                                          \
+  KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD)         \
+  KEYWORD("import", Token::FUTURE_RESERVED_WORD)                    \
+  KEYWORD("in", Token::IN)                                          \
+  KEYWORD("instanceof", Token::INSTANCEOF)                          \
+  KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)          \
+  KEYWORD_GROUP('l')                                                \
+  KEYWORD("let", harmony_block_scoping                              \
+                 ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
+  KEYWORD_GROUP('n')                                                \
+  KEYWORD("new", Token::NEW)                                        \
+  KEYWORD("null", Token::NULL_LITERAL)                              \
+  KEYWORD_GROUP('p')                                                \
+  KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD)            \
+  KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD)            \
+  KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD)          \
+  KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD)             \
+  KEYWORD_GROUP('r')                                                \
+  KEYWORD("return", Token::RETURN)                                  \
+  KEYWORD_GROUP('s')                                                \
+  KEYWORD("static", Token::FUTURE_STRICT_RESERVED_WORD)             \
+  KEYWORD("super", Token::FUTURE_RESERVED_WORD)                     \
+  KEYWORD("switch", Token::SWITCH)                                  \
+  KEYWORD_GROUP('t')                                                \
+  KEYWORD("this", Token::THIS)                                      \
+  KEYWORD("throw", Token::THROW)                                    \
+  KEYWORD("true", Token::TRUE_LITERAL)                              \
+  KEYWORD("try", Token::TRY)                                        \
+  KEYWORD("typeof", Token::TYPEOF)                                  \
+  KEYWORD_GROUP('v')                                                \
+  KEYWORD("var", Token::VAR)                                        \
+  KEYWORD("void", Token::VOID)                                      \
+  KEYWORD_GROUP('w')                                                \
+  KEYWORD("while", Token::WHILE)                                    \
+  KEYWORD("with", Token::WITH)                                      \
+  KEYWORD_GROUP('y')                                                \
+  KEYWORD("yield", Token::FUTURE_STRICT_RESERVED_WORD)
+
+
+static Token::Value KeywordOrIdentifierToken(const char* input,
+                                             int input_length,
+                                             bool harmony_block_scoping) {
+  ASSERT(input_length >= 1);
+  const int kMinLength = 2;
+  const int kMaxLength = 10;
+  if (input_length < kMinLength || input_length > kMaxLength) {
+    return Token::IDENTIFIER;
+  }
+  switch (input[0]) {
+    default:
+#define KEYWORD_GROUP_CASE(ch)                                \
+      break;                                                  \
+    case ch:
+#define KEYWORD(keyword, token)                               \
+    {                                                         \
+      /* 'keyword' is a char array, so sizeof(keyword) is */  \
+      /* strlen(keyword) plus 1 for the NUL char. */          \
+      const int keyword_length = sizeof(keyword) - 1;         \
+      STATIC_ASSERT(keyword_length >= kMinLength);            \
+      STATIC_ASSERT(keyword_length <= kMaxLength);            \
+      if (input_length == keyword_length &&                   \
+          input[1] == keyword[1] &&                           \
+          (keyword_length <= 2 || input[2] == keyword[2]) &&  \
+          (keyword_length <= 3 || input[3] == keyword[3]) &&  \
+          (keyword_length <= 4 || input[4] == keyword[4]) &&  \
+          (keyword_length <= 5 || input[5] == keyword[5]) &&  \
+          (keyword_length <= 6 || input[6] == keyword[6]) &&  \
+          (keyword_length <= 7 || input[7] == keyword[7]) &&  \
+          (keyword_length <= 8 || input[8] == keyword[8]) &&  \
+          (keyword_length <= 9 || input[9] == keyword[9])) {  \
+        return token;                                         \
+      }                                                       \
+    }
+    KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
+  }
+  return Token::IDENTIFIER;
+}
+
+
+Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+  ASSERT(unicode_cache_->IsIdentifierStart(c0_));
+  LiteralScope literal(this);
+  // Scan identifier start character.
+  if (c0_ == '\\') {
+    uc32 c = ScanIdentifierUnicodeEscape();
+    // Only allow legal identifier start characters.
+    if (c < 0 ||
+        c == '\\' ||  // No recursive escapes.
+        !unicode_cache_->IsIdentifierStart(c)) {
+      return Token::ILLEGAL;
+    }
+    AddLiteralChar(c);
+    return ScanIdentifierSuffix(&literal);
+  }
+
+  uc32 first_char = c0_;
+  Advance();
+  AddLiteralChar(first_char);
+
+  // Scan the rest of the identifier characters.
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
+    if (c0_ != '\\') {
+      uc32 next_char = c0_;
+      Advance();
+      AddLiteralChar(next_char);
+      continue;
+    }
+    // Fallthrough if no longer able to complete keyword.
+    return ScanIdentifierSuffix(&literal);
+  }
+
+  literal.Complete();
+
+  if (next_.literal_chars->is_ascii()) {
+    Vector<const char> chars = next_.literal_chars->ascii_literal();
+    return KeywordOrIdentifierToken(chars.start(),
+                                    chars.length(),
+                                    harmony_block_scoping_);
+  }
+
+  return Token::IDENTIFIER;
+}
+
+
+Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+  // Scan the rest of the identifier characters.
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
+    if (c0_ == '\\') {
+      uc32 c = ScanIdentifierUnicodeEscape();
+      // Only allow legal identifier part characters.
+      if (c < 0 ||
+          c == '\\' ||
+          !unicode_cache_->IsIdentifierPart(c)) {
+        return Token::ILLEGAL;
+      }
+      AddLiteralChar(c);
+    } else {
+      AddLiteralChar(c0_);
+      Advance();
+    }
+  }
+  literal->Complete();
+
+  return Token::IDENTIFIER;
+}
+
+
+bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+  // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
+  bool in_character_class = false;
+
+  // Previous token is either '/' or '/=', in the second case, the
+  // pattern starts at =.
+  next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
+  next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
+
+  // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
+  // the scanner should pass uninterpreted bodies to the RegExp
+  // constructor.
+  LiteralScope literal(this);
+  if (seen_equal) {
+    AddLiteralChar('=');
+  }
+
+  while (c0_ != '/' || in_character_class) {
+    if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
+    if (c0_ == '\\') {  // Escape sequence.
+      AddLiteralCharAdvance();
+      if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
+      AddLiteralCharAdvance();
+      // If the escape allows more characters, i.e., \x??, \u????, or \c?,
+      // only "safe" characters are allowed (letters, digits, underscore),
+      // otherwise the escape isn't valid and the invalid character has
+      // its normal meaning. I.e., we can just continue scanning without
+      // worrying whether the following characters are part of the escape
+      // or not, since any '/', '\\' or '[' is guaranteed to not be part
+      // of the escape sequence.
+
+      // TODO(896): At some point, parse RegExps more throughly to capture
+      // octal esacpes in strict mode.
+    } else {  // Unescaped character.
+      if (c0_ == '[') in_character_class = true;
+      if (c0_ == ']') in_character_class = false;
+      AddLiteralCharAdvance();
+    }
+  }
+  Advance();  // consume '/'
+
+  literal.Complete();
+
+  return true;
+}
+
+
+bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+  ASSERT(c0_ == '\\');
+  uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
+  Advance();
+  int i = 1;
+  if (c0_ == 'u') {
+    i++;
+    while (i < 6) {
+      Advance();
+      if (!IsHexDigit(c0_)) break;
+      chars_read[i] = c0_;
+      i++;
+    }
+  }
+  if (i < 6) {
+    // Incomplete escape. Undo all advances and return false.
+    while (i > 0) {
+      i--;
+      PushBack(chars_read[i]);
+    }
+    return false;
+  }
+  // Complete escape. Add all chars to current literal buffer.
+  for (int i = 0; i < 6; i++) {
+    AddLiteralChar(chars_read[i]);
+  }
+  return true;
+}
+
+
+bool JavaScriptScanner::ScanRegExpFlags() {
+  // Scan regular expression flags.
+  LiteralScope literal(this);
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
+    if (c0_ != '\\') {
+      AddLiteralCharAdvance();
+    } else {
+      if (!ScanLiteralUnicodeEscape()) {
+        break;
+      }
+    }
+  }
+  literal.Complete();
+
+  next_.location.end_pos = source_pos() - 1;
+  return true;
+}
+
 } }  // namespace v8::internal
diff --git a/src/scanner.h b/src/scanner.h
index 871c69b..16c3a42 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -25,170 +25,538 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Features shared by parsing and pre-parsing scanners.
+
 #ifndef V8_SCANNER_H_
 #define V8_SCANNER_H_
 
+#include "allocation.h"
+#include "char-predicates.h"
+#include "checks.h"
+#include "globals.h"
 #include "token.h"
-#include "char-predicates-inl.h"
-#include "scanner-base.h"
+#include "unicode-inl.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos_ pointing to any position,
-// even positions before the current).
-class BufferedUC16CharacterStream: public UC16CharacterStream {
- public:
-  BufferedUC16CharacterStream();
-  virtual ~BufferedUC16CharacterStream();
+// Returns the value (0 .. 15) of a hexadecimal character c.
+// If c is not a legal hexadecimal character, returns a value < 0.
+inline int HexValue(uc32 c) {
+  c -= '0';
+  if (static_cast<unsigned>(c) <= 9) return c;
+  c = (c | 0x20) - ('a' - '0');  // detect 0x11..0x16 and 0x31..0x36.
+  if (static_cast<unsigned>(c) <= 5) return c + 10;
+  return -1;
+}
 
-  virtual void PushBack(uc32 character);
+
+// ---------------------------------------------------------------------
+// Buffered stream of characters, using an internal UC16 buffer.
+
+class UC16CharacterStream {
+ public:
+  UC16CharacterStream() : pos_(0) { }
+  virtual ~UC16CharacterStream() { }
+
+  // Returns and advances past the next UC16 character in the input
+  // stream. If there are no more characters, it returns a negative
+  // value.
+  inline uc32 Advance() {
+    if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
+      pos_++;
+      return static_cast<uc32>(*(buffer_cursor_++));
+    }
+    // Note: currently the following increment is necessary to avoid a
+    // parser problem! The scanner treats the final kEndOfInput as
+    // a character with a position, and does math relative to that
+    // position.
+    pos_++;
+
+    return kEndOfInput;
+  }
+
+  // Return the current position in the character stream.
+  // Starts at zero.
+  inline unsigned pos() const { return pos_; }
+
+  // Skips forward past the next character_count UC16 characters
+  // in the input, or until the end of input if that comes sooner.
+  // Returns the number of characters actually skipped. If less
+  // than character_count,
+  inline unsigned SeekForward(unsigned character_count) {
+    unsigned buffered_chars =
+        static_cast<unsigned>(buffer_end_ - buffer_cursor_);
+    if (character_count <= buffered_chars) {
+      buffer_cursor_ += character_count;
+      pos_ += character_count;
+      return character_count;
+    }
+    return SlowSeekForward(character_count);
+  }
+
+  // Pushes back the most recently read UC16 character (or negative
+  // value if at end of input), i.e., the value returned by the most recent
+  // call to Advance.
+  // Must not be used right after calling SeekForward.
+  virtual void PushBack(int32_t character) = 0;
 
  protected:
-  static const unsigned kBufferSize = 512;
-  static const unsigned kPushBackStepSize = 16;
+  static const uc32 kEndOfInput = -1;
 
-  virtual unsigned SlowSeekForward(unsigned delta);
-  virtual bool ReadBlock();
-  virtual void SlowPushBack(uc16 character);
+  // Ensures that the buffer_cursor_ points to the character at
+  // position pos_ of the input, if possible. If the position
+  // is at or after the end of the input, return false. If there
+  // are more characters available, return true.
+  virtual bool ReadBlock() = 0;
+  virtual unsigned SlowSeekForward(unsigned character_count) = 0;
 
-  virtual unsigned BufferSeekForward(unsigned delta) = 0;
-  virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
-
-  const uc16* pushback_limit_;
-  uc16 buffer_[kBufferSize];
+  const uc16* buffer_cursor_;
+  const uc16* buffer_end_;
+  unsigned pos_;
 };
 
 
-// Generic string stream.
-class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+class UnicodeCache {
+// ---------------------------------------------------------------------
+// Caching predicates used by scanners.
  public:
-  GenericStringUC16CharacterStream(Handle<String> data,
-                                   unsigned start_position,
-                                   unsigned end_position);
-  virtual ~GenericStringUC16CharacterStream();
+  UnicodeCache() {}
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
 
- protected:
-  virtual unsigned BufferSeekForward(unsigned delta);
-  virtual unsigned FillBuffer(unsigned position, unsigned length);
-
-  Handle<String> string_;
-  unsigned start_position_;
-  unsigned length_;
-};
-
-
-// UC16 stream based on a literal UTF-8 string.
-class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
- public:
-  Utf8ToUC16CharacterStream(const byte* data, unsigned length);
-  virtual ~Utf8ToUC16CharacterStream();
-
- protected:
-  virtual unsigned BufferSeekForward(unsigned delta);
-  virtual unsigned FillBuffer(unsigned char_position, unsigned length);
-  void SetRawPosition(unsigned char_position);
-
-  const byte* raw_data_;
-  unsigned raw_data_length_;  // Measured in bytes, not characters.
-  unsigned raw_data_pos_;
-  // The character position of the character at raw_data[raw_data_pos_].
-  // Not necessarily the same as pos_.
-  unsigned raw_character_position_;
-};
-
-
-// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
- public:
-  ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
-                                           int start_position,
-                                           int end_position);
-  virtual ~ExternalTwoByteStringUC16CharacterStream();
-
-  virtual void PushBack(uc32 character) {
-    ASSERT(buffer_cursor_ > raw_data_);
-    buffer_cursor_--;
-    pos_--;
+  StaticResource<Utf8Decoder>* utf8_decoder() {
+    return &utf8_decoder_;
   }
 
- protected:
-  virtual unsigned SlowSeekForward(unsigned delta) {
-    // Fast case always handles seeking.
-    return 0;
-  }
-  virtual bool ReadBlock() {
-    // Entire string is read at start.
-    return false;
-  }
-  Handle<ExternalTwoByteString> source_;
-  const uc16* raw_data_;  // Pointer to the actual array of characters.
+  bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
+  bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
+  bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
+  bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+
+ private:
+  unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+  unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+  unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+  unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+  StaticResource<Utf8Decoder> utf8_decoder_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
 };
 
 
 // ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-// JavaScript scanner getting its input from either a V8 String or a unicode
-// CharacterStream.
+// LiteralBuffer -  Collector of chars of literals.
 
-class V8JavaScriptScanner : public JavaScriptScanner {
+class LiteralBuffer {
  public:
-  explicit V8JavaScriptScanner(UnicodeCache* unicode_cache)
-      : JavaScriptScanner(unicode_cache) {}
+  LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
 
-  void Initialize(UC16CharacterStream* source);
+  ~LiteralBuffer() {
+    if (backing_store_.length() > 0) {
+      backing_store_.Dispose();
+    }
+  }
+
+  inline void AddChar(uc16 character) {
+    if (position_ >= backing_store_.length()) ExpandBuffer();
+    if (is_ascii_) {
+      if (character < kMaxAsciiCharCodeU) {
+        backing_store_[position_] = static_cast<byte>(character);
+        position_ += kASCIISize;
+        return;
+      }
+      ConvertToUC16();
+    }
+    *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
+    position_ += kUC16Size;
+  }
+
+  bool is_ascii() { return is_ascii_; }
+
+  Vector<const uc16> uc16_literal() {
+    ASSERT(!is_ascii_);
+    ASSERT((position_ & 0x1) == 0);
+    return Vector<const uc16>(
+        reinterpret_cast<const uc16*>(backing_store_.start()),
+        position_ >> 1);
+  }
+
+  Vector<const char> ascii_literal() {
+    ASSERT(is_ascii_);
+    return Vector<const char>(
+        reinterpret_cast<const char*>(backing_store_.start()),
+        position_);
+  }
+
+  int length() {
+    return is_ascii_ ? position_ : (position_ >> 1);
+  }
+
+  void Reset() {
+    position_ = 0;
+    is_ascii_ = true;
+  }
+
+ private:
+  static const int kInitialCapacity = 16;
+  static const int kGrowthFactory = 4;
+  static const int kMinConversionSlack = 256;
+  static const int kMaxGrowth = 1 * MB;
+  inline int NewCapacity(int min_capacity) {
+    int capacity = Max(min_capacity, backing_store_.length());
+    int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
+    return new_capacity;
+  }
+
+  void ExpandBuffer() {
+    Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
+    memcpy(new_store.start(), backing_store_.start(), position_);
+    backing_store_.Dispose();
+    backing_store_ = new_store;
+  }
+
+  void ConvertToUC16() {
+    ASSERT(is_ascii_);
+    Vector<byte> new_store;
+    int new_content_size = position_ * kUC16Size;
+    if (new_content_size >= backing_store_.length()) {
+      // Ensure room for all currently read characters as UC16 as well
+      // as the character about to be stored.
+      new_store = Vector<byte>::New(NewCapacity(new_content_size));
+    } else {
+      new_store = backing_store_;
+    }
+    char* src = reinterpret_cast<char*>(backing_store_.start());
+    uc16* dst = reinterpret_cast<uc16*>(new_store.start());
+    for (int i = position_ - 1; i >= 0; i--) {
+      dst[i] = src[i];
+    }
+    if (new_store.start() != backing_store_.start()) {
+      backing_store_.Dispose();
+      backing_store_ = new_store;
+    }
+    position_ = new_content_size;
+    is_ascii_ = false;
+  }
+
+  bool is_ascii_;
+  int position_;
+  Vector<byte> backing_store_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
 };
 
 
-class JsonScanner : public Scanner {
+// ----------------------------------------------------------------------------
+// Scanner base-class.
+
+// Generic functionality used by both JSON and JavaScript scanners.
+class Scanner {
  public:
-  explicit JsonScanner(UnicodeCache* unicode_cache);
+  // -1 is outside of the range of any real source code.
+  static const int kNoOctalLocation = -1;
+
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+  class LiteralScope {
+   public:
+    explicit LiteralScope(Scanner* self);
+    ~LiteralScope();
+    void Complete();
+
+   private:
+    Scanner* scanner_;
+    bool complete_;
+  };
+
+  explicit Scanner(UnicodeCache* scanner_contants);
+
+  // Returns the current token again.
+  Token::Value current_token() { return current_.token; }
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const { return next_.token; }
+
+  struct Location {
+    Location(int b, int e) : beg_pos(b), end_pos(e) { }
+    Location() : beg_pos(0), end_pos(0) { }
+
+    bool IsValid() const {
+      return beg_pos >= 0 && end_pos >= beg_pos;
+    }
+
+    static Location invalid() { return Location(-1, -1); }
+
+    int beg_pos;
+    int end_pos;
+  };
+
+  // Returns the location information for the current token
+  // (the token returned by Next()).
+  Location location() const { return current_.location; }
+  Location peek_location() const { return next_.location; }
+
+  // Returns the literal string, if any, for the current token (the
+  // token returned by Next()). The string is 0-terminated and in
+  // UTF-8 format; they may contain 0-characters. Literal strings are
+  // collected for identifiers, strings, and numbers.
+  // These functions only give the correct result if the literal
+  // was scanned between calls to StartLiteral() and TerminateLiteral().
+  bool is_literal_ascii() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->is_ascii();
+  }
+  Vector<const char> literal_ascii_string() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->ascii_literal();
+  }
+  Vector<const uc16> literal_uc16_string() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->uc16_literal();
+  }
+  int literal_length() const {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->length();
+  }
+
+  bool literal_contains_escapes() const {
+    Location location = current_.location;
+    int source_length = (location.end_pos - location.beg_pos);
+    if (current_.token == Token::STRING) {
+      // Subtract delimiters.
+      source_length -= 2;
+    }
+    return current_.literal_chars->length() != source_length;
+  }
+
+  // Returns the literal string for the next token (the token that
+  // would be returned if Next() were called).
+  bool is_next_literal_ascii() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->is_ascii();
+  }
+  Vector<const char> next_literal_ascii_string() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->ascii_literal();
+  }
+  Vector<const uc16> next_literal_uc16_string() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->uc16_literal();
+  }
+  int next_literal_length() const {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->length();
+  }
+
+  UnicodeCache* unicode_cache() { return unicode_cache_; }
+
+  static const int kCharacterLookaheadBufferSize = 1;
+
+ protected:
+  // The current and look-ahead token.
+  struct TokenDesc {
+    Token::Value token;
+    Location location;
+    LiteralBuffer* literal_chars;
+  };
+
+  // Call this after setting source_ to the input.
+  void Init() {
+    // Set c0_ (one character ahead)
+    STATIC_ASSERT(kCharacterLookaheadBufferSize == 1);
+    Advance();
+    // Initialize current_ to not refer to a literal.
+    current_.literal_chars = NULL;
+  }
+
+  // Literal buffer support
+  inline void StartLiteral() {
+    LiteralBuffer* free_buffer = (current_.literal_chars == &literal_buffer1_) ?
+            &literal_buffer2_ : &literal_buffer1_;
+    free_buffer->Reset();
+    next_.literal_chars = free_buffer;
+  }
+
+  inline void AddLiteralChar(uc32 c) {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    next_.literal_chars->AddChar(c);
+  }
+
+  // Complete scanning of a literal.
+  inline void TerminateLiteral() {
+    // Does nothing in the current implementation.
+  }
+
+  // Stops scanning of a literal and drop the collected characters,
+  // e.g., due to an encountered error.
+  inline void DropLiteral() {
+    next_.literal_chars = NULL;
+  }
+
+  inline void AddLiteralCharAdvance() {
+    AddLiteralChar(c0_);
+    Advance();
+  }
+
+  // Low-level scanning support.
+  void Advance() { c0_ = source_->Advance(); }
+  void PushBack(uc32 ch) {
+    source_->PushBack(c0_);
+    c0_ = ch;
+  }
+
+  inline Token::Value Select(Token::Value tok) {
+    Advance();
+    return tok;
+  }
+
+  inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_) {
+    Advance();
+    if (c0_ == next) {
+      Advance();
+      return then;
+    } else {
+      return else_;
+    }
+  }
+
+  uc32 ScanHexNumber(int expected_length);
+
+  // Return the current source position.
+  int source_pos() {
+    return source_->pos() - kCharacterLookaheadBufferSize;
+  }
+
+  UnicodeCache* unicode_cache_;
+
+  // Buffers collecting literal strings, numbers, etc.
+  LiteralBuffer literal_buffer1_;
+  LiteralBuffer literal_buffer2_;
+
+  TokenDesc current_;  // desc for current token (as returned by Next())
+  TokenDesc next_;     // desc for next token (one token look-ahead)
+
+  // Input stream. Must be initialized to an UC16CharacterStream.
+  UC16CharacterStream* source_;
+
+  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+  uc32 c0_;
+};
+
+// ----------------------------------------------------------------------------
+// JavaScriptScanner - base logic for JavaScript scanning.
+
+class JavaScriptScanner : public Scanner {
+ public:
+  // A LiteralScope that disables recording of some types of JavaScript
+  // literals. If the scanner is configured to not record the specific
+  // type of literal, the scope will not call StartLiteral.
+  class LiteralScope {
+   public:
+    explicit LiteralScope(JavaScriptScanner* self)
+        : scanner_(self), complete_(false) {
+      scanner_->StartLiteral();
+    }
+     ~LiteralScope() {
+       if (!complete_) scanner_->DropLiteral();
+     }
+    void Complete() {
+      scanner_->TerminateLiteral();
+      complete_ = true;
+    }
+
+   private:
+    JavaScriptScanner* scanner_;
+    bool complete_;
+  };
+
+  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
 
   void Initialize(UC16CharacterStream* source);
 
   // Returns the next token.
   Token::Value Next();
 
-  // Returns the value of a number token.
-  double number() {
-    return number_;
+  // Returns true if there was a line terminator before the peek'ed token,
+  // possibly inside a multi-line comment.
+  bool HasAnyLineTerminatorBeforeNext() const {
+    return has_line_terminator_before_next_ ||
+           has_multiline_comment_before_next_;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  bool HarmonyBlockScoping() const {
+    return harmony_block_scoping_;
+  }
+  void SetHarmonyBlockScoping(bool block_scoping) {
+    harmony_block_scoping_ = block_scoping;
   }
 
 
  protected:
-  // Skip past JSON whitespace (only space, tab, newline and carrige-return).
-  bool SkipJsonWhiteSpace();
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
 
-  // Scan a single JSON token. The JSON lexical grammar is specified in the
-  // ECMAScript 5 standard, section 15.12.1.1.
-  // Recognizes all of the single-character tokens directly, or calls a function
-  // to scan a number, string or identifier literal.
-  // The only allowed whitespace characters between tokens are tab,
-  // carriage-return, newline and space.
-  void ScanJson();
+  // Scans a single JavaScript token.
+  void Scan();
 
-  // A JSON number (production JSONNumber) is a subset of the valid JavaScript
-  // decimal number literals.
-  // It includes an optional minus sign, must have at least one
-  // digit before and after a decimal point, may not have prefixed zeros (unless
-  // the integer part is zero), and may include an exponent part (e.g., "e-10").
-  // Hexadecimal and octal numbers are not allowed.
-  Token::Value ScanJsonNumber();
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifierOrKeyword();
+  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
 
-  // A JSON string (production JSONString) is subset of valid JavaScript string
-  // literals. The string must only be double-quoted (not single-quoted), and
-  // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
-  // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
-  Token::Value ScanJsonString();
+  void ScanEscape();
+  Token::Value ScanString();
 
-  // Used to recognizes one of the literals "true", "false", or "null". These
-  // are the only valid JSON identifiers (productions JSONBooleanLiteral,
-  // JSONNullLiteral).
-  Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
 
-  // Holds the value of a scanned number token.
-  double number_;
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadChar.
+  uc32 ScanIdentifierUnicodeEscape();
+  // Recognizes a uniocde escape-sequence and adds its characters,
+  // uninterpreted, to the current literal. Used for parsing RegExp
+  // flags.
+  bool ScanLiteralUnicodeEscape();
+
+  // Start position of the octal literal last scanned.
+  Location octal_pos_;
+
+  // Whether there is a line terminator whitespace character after
+  // the current token, and  before the next. Does not count newlines
+  // inside multiline comments.
+  bool has_line_terminator_before_next_;
+  // Whether there is a multi-line comment that contains a
+  // line-terminator after the current token, and before the next.
+  bool has_multiline_comment_before_next_;
+  // Whether we scan 'let' as a keyword for harmony block scoped
+  // let bindings.
+  bool harmony_block_scoping_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 58e2ad2..ad31ca4 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,17 +32,15 @@
 #include "scopeinfo.h"
 #include "scopes.h"
 
+#include "allocation-inl.h"
+
 namespace v8 {
 namespace internal {
 
 
 static int CompareLocal(Variable* const* v, Variable* const* w) {
-  Slot* s = (*v)->AsSlot();
-  Slot* t = (*w)->AsSlot();
-  // We may have rewritten parameters (that are in the arguments object)
-  // and which may have a NULL slot... - find a better solution...
-  int x = (s != NULL ? s->index() : 0);
-  int y = (t != NULL ? t->index() : 0);
+  int x = (*v)->index();
+  int y = (*w)->index();
   // Consider sorting them according to type as well?
   return x - y;
 }
@@ -52,6 +50,7 @@
 ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
     : function_name_(FACTORY->empty_symbol()),
       calls_eval_(scope->calls_eval()),
+      is_strict_mode_(scope->is_strict_mode()),
       parameters_(scope->num_parameters()),
       stack_slots_(scope->num_stack_slots()),
       context_slots_(scope->num_heap_slots()),
@@ -83,27 +82,24 @@
   for (int i = 0; i < locals.length(); i++) {
     Variable* var = locals[i];
     if (var->is_used()) {
-      Slot* slot = var->AsSlot();
-      if (slot != NULL) {
-        switch (slot->type()) {
-          case Slot::PARAMETER:
-            // explicitly added to parameters_ above - ignore
-            break;
+      switch (var->location()) {
+        case Variable::UNALLOCATED:
+        case Variable::PARAMETER:
+          break;
 
-          case Slot::LOCAL:
-            ASSERT(stack_slots_.length() == slot->index());
-            stack_slots_.Add(var->name());
-            break;
+        case Variable::LOCAL:
+          ASSERT(stack_slots_.length() == var->index());
+          stack_slots_.Add(var->name());
+          break;
 
-          case Slot::CONTEXT:
-            heap_locals.Add(var);
-            break;
+        case Variable::CONTEXT:
+          heap_locals.Add(var);
+          break;
 
-          case Slot::LOOKUP:
-            // This is currently not used.
-            UNREACHABLE();
-            break;
-        }
+        case Variable::LOOKUP:
+          // We don't expect lookup variables in the locals list.
+          UNREACHABLE();
+          break;
       }
     }
   }
@@ -112,9 +108,9 @@
   if (scope->num_heap_slots() > 0) {
     // Add user-defined slots.
     for (int i = 0; i < heap_locals.length(); i++) {
-      ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
              context_slots_.length());
-      ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
              context_modes_.length());
       context_slots_.Add(heap_locals[i]->name());
       context_modes_.Add(heap_locals[i]->mode());
@@ -128,18 +124,18 @@
   // For now, this must happen at the very end because of the
   // ordering of the scope info slots and the respective slot indices.
   if (scope->is_function_scope()) {
-    Variable* var = scope->function();
-    if (var != NULL &&
-        var->is_used() &&
-        var->AsSlot()->type() == Slot::CONTEXT) {
-      function_name_ = var->name();
+    VariableProxy* proxy = scope->function();
+    if (proxy != NULL &&
+        proxy->var()->is_used() &&
+        proxy->var()->IsContextSlot()) {
+      function_name_ = proxy->name();
       // Note that we must not find the function name in the context slot
       // list - instead it must be handled separately in the
       // Contexts::Lookup() function. Thus record an empty symbol here so we
       // get the correct number of context slots.
-      ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_slots_.length());
-      ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_modes_.length());
       context_slots_.Add(FACTORY->empty_symbol());
       context_modes_.Add(Variable::INTERNAL);
@@ -248,6 +244,7 @@
     Object** p = p0;
     p = ReadSymbol(p, &function_name_);
     p = ReadBool(p, &calls_eval_);
+    p = ReadBool(p, &is_strict_mode_);
     p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
     p = ReadList<Allocator>(p, &parameters_);
     p = ReadList<Allocator>(p, &stack_slots_);
@@ -301,21 +298,22 @@
 
 template<class Allocator>
 Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
-  // function name, calls eval, length for 3 tables:
-  const int extra_slots = 1 + 1 + 3;
+  // function name, calls eval, is_strict_mode, length for 3 tables:
+  const int extra_slots = 1 + 1 + 1 + 3;
   int length = extra_slots +
                context_slots_.length() * 2 +
                parameters_.length() +
                stack_slots_.length();
 
   Handle<SerializedScopeInfo> data(
-      SerializedScopeInfo::cast(*FACTORY->NewFixedArray(length, TENURED)));
+      SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
   AssertNoAllocation nogc;
 
   Object** p0 = data->data_start();
   Object** p = p0;
   p = WriteSymbol(p, function_name_);
   p = WriteBool(p, calls_eval_);
+  p = WriteBool(p, is_strict_mode_);
   p = WriteList(p, &context_slots_, &context_modes_);
   p = WriteList(p, &parameters_);
   p = WriteList(p, &stack_slots_);
@@ -363,7 +361,8 @@
 
 Object** SerializedScopeInfo::ContextEntriesAddr() {
   ASSERT(length() > 0);
-  return data_start() + 2;  // +2 for function name and calls eval.
+  // +3 for function name, calls eval, strict mode.
+  return data_start() + 3;
 }
 
 
@@ -392,7 +391,18 @@
     p = ReadBool(p, &calls_eval);
     return calls_eval;
   }
-  return true;
+  return false;
+}
+
+
+bool SerializedScopeInfo::IsStrictMode() {
+  if (length() > 0) {
+    Object** p = data_start() + 2;  // +2 for function name, calls eval.
+    bool strict_mode;
+    p = ReadBool(p, &strict_mode);
+    return strict_mode;
+  }
+  return false;
 }
 
 
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 2552af2..40c5c8a 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_SCOPEINFO_H_
 #define V8_SCOPEINFO_H_
 
+#include "allocation.h"
 #include "variables.h"
 #include "zone-inl.h"
 
@@ -92,6 +93,7 @@
  private:
   Handle<String> function_name_;
   bool calls_eval_;
+  bool is_strict_mode_;
   List<Handle<String>, Allocator > parameters_;
   List<Handle<String>, Allocator > stack_slots_;
   List<Handle<String>, Allocator > context_slots_;
@@ -105,17 +107,15 @@
  public :
 
   static SerializedScopeInfo* cast(Object* object) {
-    ASSERT(object->IsFixedArray());
+    ASSERT(object->IsSerializedScopeInfo());
     return reinterpret_cast<SerializedScopeInfo*>(object);
   }
 
   // Does this scope call eval?
   bool CallsEval();
 
-  // Does this scope have an arguments shadow?
-  bool HasArgumentsShadow() {
-    return StackSlotIndex(GetHeap()->arguments_shadow_symbol()) >= 0;
-  }
+  // Is this scope a strict mode scope?
+  bool IsStrictMode();
 
   // Return the number of stack slots for code.
   int NumberOfStackSlots();
@@ -156,7 +156,6 @@
   static SerializedScopeInfo* Empty();
 
  private:
-
   inline Object** ContextEntriesAddr();
 
   inline Object** ParameterEntriesAddr();
@@ -187,6 +186,7 @@
   void Clear();
 
   static const int kNotFound = -2;
+
  private:
   ContextSlotCache() {
     for (int i = 0; i < kLength; ++i) {
diff --git a/src/scopes.cc b/src/scopes.cc
index 8df93c5..d5a7a9f 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,9 +31,10 @@
 
 #include "bootstrapper.h"
 #include "compiler.h"
-#include "prettyprinter.h"
 #include "scopeinfo.h"
 
+#include "allocation-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -114,70 +115,74 @@
 
 // Dummy constructor
 Scope::Scope(Type type)
-  : inner_scopes_(0),
-    variables_(false),
-    temps_(0),
-    params_(0),
-    unresolved_(0),
-    decls_(0) {
+    : isolate_(Isolate::Current()),
+      inner_scopes_(0),
+      variables_(false),
+      temps_(0),
+      params_(0),
+      unresolved_(0),
+      decls_(0),
+      already_resolved_(false) {
   SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-  ASSERT(!resolved());
 }
 
 
 Scope::Scope(Scope* outer_scope, Type type)
-  : inner_scopes_(4),
-    variables_(),
-    temps_(4),
-    params_(4),
-    unresolved_(16),
-    decls_(4) {
+    : isolate_(Isolate::Current()),
+      inner_scopes_(4),
+      variables_(),
+      temps_(4),
+      params_(4),
+      unresolved_(16),
+      decls_(4),
+      already_resolved_(false) {
   SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
   ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
   ASSERT(!HasIllegalRedeclaration());
-  ASSERT(!resolved());
 }
 
 
-Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
-  : inner_scopes_(4),
-    variables_(),
-    temps_(4),
-    params_(4),
-    unresolved_(16),
-    decls_(4) {
+Scope::Scope(Scope* inner_scope,
+             Type type,
+             Handle<SerializedScopeInfo> scope_info)
+    : isolate_(Isolate::Current()),
+      inner_scopes_(4),
+      variables_(),
+      temps_(4),
+      params_(4),
+      unresolved_(16),
+      decls_(4),
+      already_resolved_(true) {
   ASSERT(!scope_info.is_null());
-  SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
-  ASSERT(resolved());
+  SetDefaults(type, NULL, scope_info);
   if (scope_info->HasHeapAllocatedLocals()) {
     num_heap_slots_ = scope_info_->NumberOfContextSlots();
   }
-
   AddInnerScope(inner_scope);
+}
 
-  // This scope's arguments shadow (if present) is context-allocated if an inner
-  // scope accesses this one's parameters.  Allocate the arguments_shadow_
-  // variable if necessary.
-  Isolate* isolate = Isolate::Current();
-  Variable::Mode mode;
-  int arguments_shadow_index =
-      scope_info_->ContextSlotIndex(
-          isolate->heap()->arguments_shadow_symbol(), &mode);
-  if (arguments_shadow_index >= 0) {
-    ASSERT(mode == Variable::INTERNAL);
-    arguments_shadow_ = new Variable(
-        this,
-        isolate->factory()->arguments_shadow_symbol(),
-        Variable::INTERNAL,
-        true,
-        Variable::ARGUMENTS);
-    arguments_shadow_->set_rewrite(
-        new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
-    arguments_shadow_->set_is_used(true);
-  }
+
+Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
+    : isolate_(Isolate::Current()),
+      inner_scopes_(1),
+      variables_(),
+      temps_(0),
+      params_(0),
+      unresolved_(0),
+      decls_(0),
+      already_resolved_(true) {
+  SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+  AddInnerScope(inner_scope);
+  ++num_var_or_const_;
+  Variable* variable = variables_.Declare(this,
+                                          catch_variable_name,
+                                          Variable::VAR,
+                                          true,  // Valid left-hand side.
+                                          Variable::NORMAL);
+  AllocateHeapSlot(variable);
 }
 
 
@@ -186,12 +191,11 @@
                         Handle<SerializedScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   type_ = type;
-  scope_name_ = FACTORY->empty_symbol();
+  scope_name_ = isolate_->factory()->empty_symbol();
   dynamics_ = NULL;
   receiver_ = NULL;
   function_ = NULL;
   arguments_ = NULL;
-  arguments_shadow_ = NULL;
   illegal_redecl_ = NULL;
   scope_inside_with_ = false;
   scope_contains_with_ = false;
@@ -199,6 +203,7 @@
   // Inherit the strict mode from the parent scope.
   strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
   outer_scope_calls_eval_ = false;
+  outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
   outer_scope_is_eval_scope_ = false;
   force_eager_compilation_ = false;
@@ -211,30 +216,48 @@
 
 Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
                                     Scope* global_scope) {
+  // Reconstruct the outer scope chain from a closure's context chain.
   ASSERT(!info->closure().is_null());
-  // If we have a serialized scope info, reuse it.
+  Context* context = info->closure()->context();
+  Scope* current_scope = NULL;
   Scope* innermost_scope = NULL;
-  Scope* scope = NULL;
-
-  SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
-  if (scope_info != SerializedScopeInfo::Empty()) {
-    JSFunction* current = *info->closure();
-    do {
-      current = current->context()->closure();
-      Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
-      if (*scope_info != SerializedScopeInfo::Empty()) {
-        scope = new Scope(scope, scope_info);
-        if (innermost_scope == NULL) innermost_scope = scope;
-      } else {
-        ASSERT(current->context()->IsGlobalContext());
+  bool contains_with = false;
+  while (!context->IsGlobalContext()) {
+    if (context->IsWithContext()) {
+      // All the inner scopes are inside a with.
+      contains_with = true;
+      for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
+        s->scope_inside_with_ = true;
       }
-    } while (!current->context()->IsGlobalContext());
+    } else {
+      if (context->IsFunctionContext()) {
+        SerializedScopeInfo* scope_info =
+            context->closure()->shared()->scope_info();
+        current_scope = new Scope(current_scope, FUNCTION_SCOPE,
+            Handle<SerializedScopeInfo>(scope_info));
+      } else if (context->IsBlockContext()) {
+        SerializedScopeInfo* scope_info =
+            SerializedScopeInfo::cast(context->extension());
+        current_scope = new Scope(current_scope, BLOCK_SCOPE,
+            Handle<SerializedScopeInfo>(scope_info));
+      } else {
+        ASSERT(context->IsCatchContext());
+        String* name = String::cast(context->extension());
+        current_scope = new Scope(current_scope, Handle<String>(name));
+      }
+      if (contains_with) current_scope->RecordWithStatement();
+      if (innermost_scope == NULL) innermost_scope = current_scope;
+    }
+
+    // Forget about a with when we move to a context for a different function.
+    if (context->previous()->closure() != context->closure()) {
+      contains_with = false;
+    }
+    context = context->previous();
   }
 
-  global_scope->AddInnerScope(scope);
-  if (innermost_scope == NULL) innermost_scope = global_scope;
-
-  return innermost_scope;
+  global_scope->AddInnerScope(current_scope);
+  return (innermost_scope == NULL) ? global_scope : innermost_scope;
 }
 
 
@@ -259,7 +282,7 @@
 
 
 void Scope::Initialize(bool inside_with) {
-  ASSERT(!resolved());
+  ASSERT(!already_resolved());
 
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
@@ -277,73 +300,96 @@
   // instead load them directly from the stack. Currently, the only
   // such parameter is 'this' which is passed on the stack when
   // invoking scripts
-  Variable* var =
-      variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
-                         false, Variable::THIS);
-  var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
-  receiver_ = var;
+  if (is_catch_scope() || is_block_scope()) {
+    ASSERT(outer_scope() != NULL);
+    receiver_ = outer_scope()->receiver();
+  } else {
+    ASSERT(is_function_scope() ||
+           is_global_scope() ||
+           is_eval_scope());
+    Variable* var =
+        variables_.Declare(this,
+                           isolate_->factory()->this_symbol(),
+                           Variable::VAR,
+                           false,
+                           Variable::THIS);
+    var->AllocateTo(Variable::PARAMETER, -1);
+    receiver_ = var;
+  }
 
   if (is_function_scope()) {
     // Declare 'arguments' variable which exists in all functions.
     // Note that it might never be accessed, in which case it won't be
     // allocated during variable allocation.
-    variables_.Declare(this, FACTORY->arguments_symbol(), Variable::VAR,
-                       true, Variable::ARGUMENTS);
+    variables_.Declare(this,
+                       isolate_->factory()->arguments_symbol(),
+                       Variable::VAR,
+                       true,
+                       Variable::ARGUMENTS);
   }
 }
 
 
+Scope* Scope::FinalizeBlockScope() {
+  ASSERT(is_block_scope());
+  ASSERT(temps_.is_empty());
+  ASSERT(params_.is_empty());
+
+  if (num_var_or_const() > 0) return this;
+
+  // Remove this scope from outer scope.
+  for (int i = 0; i < outer_scope_->inner_scopes_.length(); i++) {
+    if (outer_scope_->inner_scopes_[i] == this) {
+      outer_scope_->inner_scopes_.Remove(i);
+      break;
+    }
+  }
+
+  // Reparent inner scopes.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    outer_scope()->AddInnerScope(inner_scopes_[i]);
+  }
+
+  // Move unresolved variables
+  for (int i = 0; i < unresolved_.length(); i++) {
+    outer_scope()->unresolved_.Add(unresolved_[i]);
+  }
+
+  return NULL;
+}
+
+
 Variable* Scope::LocalLookup(Handle<String> name) {
   Variable* result = variables_.Lookup(name);
-  if (result != NULL || !resolved()) {
+  if (result != NULL || scope_info_.is_null()) {
     return result;
   }
-  // If the scope is resolved, we can find a variable in serialized scope info.
-
-  // We should never lookup 'arguments' in this scope
-  // as it is implicitly present in any scope.
-  ASSERT(*name != *FACTORY->arguments_symbol());
-
-  // Assert that there is no local slot with the given name.
+  // If we have a serialized scope info, we might find the variable there.
+  //
+  // We should never lookup 'arguments' in this scope as it is implicitly
+  // present in every scope.
+  ASSERT(*name != *isolate_->factory()->arguments_symbol());
+  // There should be no local slot with the given name.
   ASSERT(scope_info_->StackSlotIndex(*name) < 0);
 
   // Check context slot lookup.
   Variable::Mode mode;
   int index = scope_info_->ContextSlotIndex(*name, &mode);
-  if (index >= 0) {
-    Variable* var =
-        variables_.Declare(this, name, mode, true, Variable::NORMAL);
-    var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
-    return var;
+  if (index < 0) {
+    // Check parameters.
+    mode = Variable::VAR;
+    index = scope_info_->ParameterIndex(*name);
+    if (index < 0) {
+      // Check the function name.
+      index = scope_info_->FunctionContextSlotIndex(*name);
+      if (index < 0) return NULL;
+    }
   }
 
-  index = scope_info_->ParameterIndex(*name);
-  if (index >= 0) {
-    // ".arguments" must be present in context slots.
-    ASSERT(arguments_shadow_ != NULL);
-    Variable* var =
-        variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
-    Property* rewrite =
-        new Property(new VariableProxy(arguments_shadow_),
-                     new Literal(Handle<Object>(Smi::FromInt(index))),
-                     RelocInfo::kNoPosition,
-                     Property::SYNTHETIC);
-    rewrite->set_is_arguments_access(true);
-    var->set_rewrite(rewrite);
-    return var;
-  }
-
-  index = scope_info_->FunctionContextSlotIndex(*name);
-  if (index >= 0) {
-    // Check that there is no local slot with the given name.
-    ASSERT(scope_info_->StackSlotIndex(*name) < 0);
-    Variable* var =
-        variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
-    var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
-    return var;
-  }
-
-  return NULL;
+  Variable* var =
+      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  var->AllocateTo(Variable::CONTEXT, index);
+  return var;
 }
 
 
@@ -360,48 +406,52 @@
 
 Variable* Scope::DeclareFunctionVar(Handle<String> name) {
   ASSERT(is_function_scope() && function_ == NULL);
-  function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
-  return function_;
+  Variable* function_var =
+      new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
+  function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
+  return function_var;
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name,
-                              Variable::Mode mode,
-                              LocalType type) {
-  // DYNAMIC variables are introduces during variable allocation,
-  // INTERNAL variables are allocated explicitly, and TEMPORARY
-  // variables are allocated via NewTemporary().
-  ASSERT(!resolved());
-  ASSERT(mode == Variable::VAR || mode == Variable::CONST);
-  if (type == VAR_OR_CONST) {
-    num_var_or_const_++;
-  }
+void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
+  ASSERT(!already_resolved());
+  ASSERT(is_function_scope());
+  Variable* var =
+      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  params_.Add(var);
+}
+
+
+Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+  ASSERT(!already_resolved());
+  // This function handles VAR and CONST modes.  DYNAMIC variables are
+  // introduces during variable allocation, INTERNAL variables are allocated
+  // explicitly, and TEMPORARY variables are allocated via NewTemporary().
+  ASSERT(mode == Variable::VAR ||
+         mode == Variable::CONST ||
+         mode == Variable::LET);
+  ++num_var_or_const_;
   return variables_.Declare(this, name, mode, true, Variable::NORMAL);
 }
 
 
 Variable* Scope::DeclareGlobal(Handle<String> name) {
   ASSERT(is_global_scope());
-  return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, true,
+  return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
+                            true,
                             Variable::NORMAL);
 }
 
 
-void Scope::AddParameter(Variable* var) {
-  ASSERT(is_function_scope());
-  ASSERT(LocalLookup(var->name()) == var);
-  params_.Add(var);
-}
-
-
 VariableProxy* Scope::NewUnresolved(Handle<String> name,
                                     bool inside_with,
                                     int position) {
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
-  ASSERT(!resolved());
-  VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
+  ASSERT(!already_resolved());
+  VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
+      isolate_, name, false, inside_with, position);
   unresolved_.Add(proxy);
   return proxy;
 }
@@ -420,9 +470,12 @@
 
 
 Variable* Scope::NewTemporary(Handle<String> name) {
-  ASSERT(!resolved());
-  Variable* var =
-      new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
+  ASSERT(!already_resolved());
+  Variable* var = new Variable(this,
+                               name,
+                               Variable::TEMPORARY,
+                               true,
+                               Variable::NORMAL);
   temps_.Add(var);
   return var;
 }
@@ -448,6 +501,28 @@
 }
 
 
+Declaration* Scope::CheckConflictingVarDeclarations() {
+  int length = decls_.length();
+  for (int i = 0; i < length; i++) {
+    Declaration* decl = decls_[i];
+    if (decl->mode() != Variable::VAR) continue;
+    Handle<String> name = decl->proxy()->name();
+    bool cond = true;
+    for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
+      // There is a conflict if there exists a non-VAR binding.
+      Variable* other_var = scope->variables_.Lookup(name);
+      if (other_var != NULL && other_var->mode() != Variable::VAR) {
+        return decl;
+      }
+
+      // Include declaration scope in the iteration but stop after.
+      if (!scope->is_block_scope() && !scope->is_catch_scope()) cond = false;
+    }
+  }
+  return NULL;
+}
+
+
 template<class Allocator>
 void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
   // Collect variables in this scope.
@@ -489,8 +564,17 @@
   // and assume they may invoke eval themselves. Eventually we could capture
   // this information in the ScopeInfo and then use it here (by traversing
   // the call chain stack, at compile time).
+
   bool eval_scope = is_eval_scope();
-  PropagateScopeInfo(eval_scope, eval_scope);
+  bool outer_scope_calls_eval = false;
+  bool outer_scope_calls_non_strict_eval = false;
+  if (!is_global_scope()) {
+    context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
+                                  &outer_scope_calls_non_strict_eval);
+  }
+  PropagateScopeInfo(outer_scope_calls_eval,
+                     outer_scope_calls_non_strict_eval,
+                     eval_scope);
 
   // 2) Resolve variables.
   Scope* global_scope = NULL;
@@ -541,12 +625,32 @@
 }
 
 
+Scope* Scope::DeclarationScope() {
+  Scope* scope = this;
+  while (scope->is_catch_scope() ||
+         scope->is_block_scope()) {
+    scope = scope->outer_scope();
+  }
+  return scope;
+}
+
+
+Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
+  if (scope_info_.is_null()) {
+    scope_info_ = SerializedScopeInfo::Create(this);
+  }
+  return scope_info_;
+}
+
+
 #ifdef DEBUG
 static const char* Header(Scope::Type type) {
   switch (type) {
     case Scope::EVAL_SCOPE: return "eval";
     case Scope::FUNCTION_SCOPE: return "function";
     case Scope::GLOBAL_SCOPE: return "global";
+    case Scope::CATCH_SCOPE: return "catch";
+    case Scope::BLOCK_SCOPE: return "block";
   }
   UNREACHABLE();
   return NULL;
@@ -559,31 +663,51 @@
 
 
 static void PrintName(Handle<String> name) {
-  SmartPointer<char> s = name->ToCString(DISALLOW_NULLS);
+  SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
   PrintF("%s", *s);
 }
 
 
-static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
-  if (var->is_used() || var->rewrite() != NULL) {
+static void PrintLocation(Variable* var) {
+  switch (var->location()) {
+    case Variable::UNALLOCATED:
+      break;
+    case Variable::PARAMETER:
+      PrintF("parameter[%d]", var->index());
+      break;
+    case Variable::LOCAL:
+      PrintF("local[%d]", var->index());
+      break;
+    case Variable::CONTEXT:
+      PrintF("context[%d]", var->index());
+      break;
+    case Variable::LOOKUP:
+      PrintF("lookup");
+      break;
+  }
+}
+
+
+static void PrintVar(int indent, Variable* var) {
+  if (var->is_used() || !var->IsUnallocated()) {
     Indent(indent, Variable::Mode2String(var->mode()));
     PrintF(" ");
     PrintName(var->name());
     PrintF(";  // ");
-    if (var->rewrite() != NULL) {
-      PrintF("%s, ", printer->Print(var->rewrite()));
-      if (var->is_accessed_from_inner_scope()) PrintF(", ");
+    PrintLocation(var);
+    if (var->is_accessed_from_inner_scope()) {
+      if (!var->IsUnallocated()) PrintF(", ");
+      PrintF("inner scope access");
     }
-    if (var->is_accessed_from_inner_scope()) PrintF("inner scope access");
     PrintF("\n");
   }
 }
 
 
-static void PrintMap(PrettyPrinter* printer, int indent, VariableMap* map) {
+static void PrintMap(int indent, VariableMap* map) {
   for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
-    PrintVar(printer, indent, var);
+    PrintVar(indent, var);
   }
 }
 
@@ -622,10 +746,14 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
+  if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
   if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
+  if (outer_scope_calls_non_strict_eval_) {
+    Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+  }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
   if (outer_scope_is_eval_scope_) {
     Indent(n1, "// outer scope is 'eval' scope\n");
@@ -636,25 +764,24 @@
   PrintF("%d heap slots\n", num_heap_slots_); }
 
   // Print locals.
-  PrettyPrinter printer;
   Indent(n1, "// function var\n");
   if (function_ != NULL) {
-    PrintVar(&printer, n1, function_);
+    PrintVar(n1, function_->var());
   }
 
   Indent(n1, "// temporary vars\n");
   for (int i = 0; i < temps_.length(); i++) {
-    PrintVar(&printer, n1, temps_[i]);
+    PrintVar(n1, temps_[i]);
   }
 
   Indent(n1, "// local vars\n");
-  PrintMap(&printer, n1, &variables_);
+  PrintMap(n1, &variables_);
 
   Indent(n1, "// dynamic vars\n");
   if (dynamics_ != NULL) {
-    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC));
-    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
-    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -678,7 +805,7 @@
     // Declare a new non-local.
     var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
     // Allocate it by giving it a dynamic lookup.
-    var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
+    var->AllocateTo(Variable::LOOKUP, -1);
   }
   return var;
 }
@@ -691,7 +818,7 @@
 // another variable that is introduced dynamically via an 'eval' call
 // or a 'with' statement).
 Variable* Scope::LookupRecursive(Handle<String> name,
-                                 bool inner_lookup,
+                                 bool from_inner_scope,
                                  Variable** invalidated_local) {
   // If we find a variable, but the current scope calls 'eval', the found
   // variable may not be the correct one (the 'eval' may introduce a
@@ -707,7 +834,7 @@
     // (Even if there is an 'eval' in this scope which introduces the
     // same variable again, the resulting variable remains the same.
     // Note that enclosing 'with' statements are handled at the call site.)
-    if (!inner_lookup)
+    if (!from_inner_scope)
       return var;
 
   } else {
@@ -720,7 +847,7 @@
     // the name of named function literal is kept in an intermediate scope
     // in between this scope and the next outer scope.)
     if (function_ != NULL && function_->name().is_identical_to(name)) {
-      var = function_;
+      var = function_->var();
 
     } else if (outer_scope_ != NULL) {
       var = outer_scope_->LookupRecursive(name, true, invalidated_local);
@@ -740,7 +867,7 @@
   ASSERT(var != NULL);
 
   // If this is a lookup from an inner scope, mark the variable.
-  if (inner_lookup) {
+  if (from_inner_scope) {
     var->MarkAsAccessedFromInnerScope();
   }
 
@@ -852,20 +979,30 @@
 
 
 bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
+                               bool outer_scope_calls_non_strict_eval,
                                bool outer_scope_is_eval_scope) {
   if (outer_scope_calls_eval) {
     outer_scope_calls_eval_ = true;
   }
 
+  if (outer_scope_calls_non_strict_eval) {
+    outer_scope_calls_non_strict_eval_ = true;
+  }
+
   if (outer_scope_is_eval_scope) {
     outer_scope_is_eval_scope_ = true;
   }
 
   bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
   bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
+  bool calls_non_strict_eval =
+      (scope_calls_eval_ && !is_strict_mode()) ||
+      outer_scope_calls_non_strict_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_eval, is_eval)) {
+    if (inner_scope->PropagateScopeInfo(calls_eval,
+                                        calls_non_strict_eval,
+                                        is_eval)) {
       inner_scope_calls_eval_ = true;
     }
     if (inner_scope->force_eager_compilation_) {
@@ -883,8 +1020,11 @@
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
       (var->is_accessed_from_inner_scope() ||
-       scope_calls_eval_ || inner_scope_calls_eval_ ||
-       scope_contains_with_)) {
+       scope_calls_eval_ ||
+       inner_scope_calls_eval_ ||
+       scope_contains_with_ ||
+       is_catch_scope() ||
+       is_block_scope())) {
     var->set_is_used(true);
   }
   // Global variables do not need to be allocated.
@@ -893,73 +1033,60 @@
 
 
 bool Scope::MustAllocateInContext(Variable* var) {
-  // If var is accessed from an inner scope, or if there is a
-  // possibility that it might be accessed from the current or an inner
-  // scope (through an eval() call), it must be allocated in the
-  // context.  Exception: temporary variables are not allocated in the
+  // If var is accessed from an inner scope, or if there is a possibility
+  // that it might be accessed from the current or an inner scope (through
+  // an eval() call or a runtime with lookup), it must be allocated in the
   // context.
-  return
-    var->mode() != Variable::TEMPORARY &&
-    (var->is_accessed_from_inner_scope() ||
-     scope_calls_eval_ || inner_scope_calls_eval_ ||
-     scope_contains_with_ || var->is_global());
+  //
+  // Exceptions: temporary variables are never allocated in a context;
+  // catch-bound variables are always allocated in a context.
+  if (var->mode() == Variable::TEMPORARY) return false;
+  if (is_catch_scope() || is_block_scope()) return true;
+  return var->is_accessed_from_inner_scope() ||
+      scope_calls_eval_ ||
+      inner_scope_calls_eval_ ||
+      scope_contains_with_ ||
+      var->is_global();
 }
 
 
 bool Scope::HasArgumentsParameter() {
   for (int i = 0; i < params_.length(); i++) {
-    if (params_[i]->name().is_identical_to(FACTORY->arguments_symbol()))
+    if (params_[i]->name().is_identical_to(
+            isolate_->factory()->arguments_symbol())) {
       return true;
+    }
   }
   return false;
 }
 
 
 void Scope::AllocateStackSlot(Variable* var) {
-  var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
+  var->AllocateTo(Variable::LOCAL, num_stack_slots_++);
 }
 
 
 void Scope::AllocateHeapSlot(Variable* var) {
-  var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
+  var->AllocateTo(Variable::CONTEXT, num_heap_slots_++);
 }
 
 
 void Scope::AllocateParameterLocals() {
   ASSERT(is_function_scope());
-  Variable* arguments = LocalLookup(FACTORY->arguments_symbol());
+  Variable* arguments = LocalLookup(isolate_->factory()->arguments_symbol());
   ASSERT(arguments != NULL);  // functions have 'arguments' declared implicitly
 
-  // Parameters are rewritten to arguments[i] if 'arguments' is used in
-  // a non-strict mode function. Strict mode code doesn't alias arguments.
-  bool rewrite_parameters = false;
+  bool uses_nonstrict_arguments = false;
 
   if (MustAllocate(arguments) && !HasArgumentsParameter()) {
     // 'arguments' is used. Unless there is also a parameter called
-    // 'arguments', we must be conservative and access all parameters via
-    // the arguments object: The i'th parameter is rewritten into
-    // '.arguments[i]' (*). If we have a parameter named 'arguments', a
-    // (new) value is always assigned to it via the function
-    // invocation. Then 'arguments' denotes that specific parameter value
-    // and cannot be used to access the parameters, which is why we don't
-    // need to rewrite in that case.
-    //
-    // (*) Instead of having a parameter called 'arguments', we may have an
-    // assignment to 'arguments' in the function body, at some arbitrary
-    // point in time (possibly through an 'eval()' call!). After that
-    // assignment any re-write of parameters would be invalid (was bug
-    // 881452). Thus, we introduce a shadow '.arguments'
-    // variable which also points to the arguments object. For rewrites we
-    // use '.arguments' which remains valid even if we assign to
-    // 'arguments'. To summarize: If we need to rewrite, we allocate an
-    // 'arguments' object dynamically upon function invocation. The compiler
-    // introduces 2 local variables 'arguments' and '.arguments', both of
-    // which originally point to the arguments object that was
-    // allocated. All parameters are rewritten into property accesses via
-    // the '.arguments' variable. Thus, any changes to properties of
-    // 'arguments' are reflected in the variables and vice versa. If the
-    // 'arguments' variable is changed, '.arguments' still points to the
-    // correct arguments object and the rewrites still work.
+    // 'arguments', we must be conservative and allocate all parameters to
+    // the context assuming they will be captured by the arguments object.
+    // If we have a parameter named 'arguments', a (new) value is always
+    // assigned to it via the function invocation. Then 'arguments' denotes
+    // that specific parameter value and cannot be used to access the
+    // parameters, which is why we don't need to allocate an arguments
+    // object in that case.
 
     // We are using 'arguments'. Tell the code generator that is needs to
     // allocate the arguments object by setting 'arguments_'.
@@ -968,76 +1095,32 @@
     // In strict mode 'arguments' does not alias formal parameters.
     // Therefore in strict mode we allocate parameters as if 'arguments'
     // were not used.
-    rewrite_parameters = !is_strict_mode();
+    uses_nonstrict_arguments = !is_strict_mode();
   }
 
-  if (rewrite_parameters) {
-    // We also need the '.arguments' shadow variable. Declare it and create
-    // and bind the corresponding proxy. It's ok to declare it only now
-    // because it's a local variable that is allocated after the parameters
-    // have been allocated.
-    //
-    // Note: This is "almost" at temporary variable but we cannot use
-    // NewTemporary() because the mode needs to be INTERNAL since this
-    // variable may be allocated in the heap-allocated context (temporaries
-    // are never allocated in the context).
-    arguments_shadow_ = new Variable(this,
-                                     FACTORY->arguments_shadow_symbol(),
-                                     Variable::INTERNAL,
-                                     true,
-                                     Variable::ARGUMENTS);
-    arguments_shadow_->set_is_used(true);
-    temps_.Add(arguments_shadow_);
-
-    // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
-    for (int i = 0; i < params_.length(); i++) {
-      Variable* var = params_[i];
-      ASSERT(var->scope() == this);
-      if (MustAllocate(var)) {
-        if (MustAllocateInContext(var)) {
-          // It is ok to set this only now, because arguments is a local
-          // variable that is allocated after the parameters have been
-          // allocated.
-          arguments_shadow_->MarkAsAccessedFromInnerScope();
-        }
-        Property* rewrite =
-            new Property(new VariableProxy(arguments_shadow_),
-                         new Literal(Handle<Object>(Smi::FromInt(i))),
-                         RelocInfo::kNoPosition,
-                         Property::SYNTHETIC);
-        rewrite->set_is_arguments_access(true);
-        var->set_rewrite(rewrite);
-      }
+  // The same parameter may occur multiple times in the parameters_ list.
+  // If it does, and if it is not copied into the context object, it must
+  // receive the highest parameter index for that parameter; thus iteration
+  // order is relevant!
+  for (int i = params_.length() - 1; i >= 0; --i) {
+    Variable* var = params_[i];
+    ASSERT(var->scope() == this);
+    if (uses_nonstrict_arguments) {
+      // Give the parameter a use from an inner scope, to force allocation
+      // to the context.
+      var->MarkAsAccessedFromInnerScope();
     }
 
-  } else {
-    // The arguments object is not used, so we can access parameters directly.
-    // The same parameter may occur multiple times in the parameters_ list.
-    // If it does, and if it is not copied into the context object, it must
-    // receive the highest parameter index for that parameter; thus iteration
-    // order is relevant!
-    for (int i = 0; i < params_.length(); i++) {
-      Variable* var = params_[i];
-      ASSERT(var->scope() == this);
-      if (MustAllocate(var)) {
-        if (MustAllocateInContext(var)) {
-          ASSERT(var->rewrite() == NULL ||
-                 (var->AsSlot() != NULL &&
-                  var->AsSlot()->type() == Slot::CONTEXT));
-          if (var->rewrite() == NULL) {
-            // Only set the heap allocation if the parameter has not
-            // been allocated yet.
-            AllocateHeapSlot(var);
-          }
-        } else {
-          ASSERT(var->rewrite() == NULL ||
-                 (var->AsSlot() != NULL &&
-                  var->AsSlot()->type() == Slot::PARAMETER));
-          // Set the parameter index always, even if the parameter
-          // was seen before! (We need to access the actual parameter
-          // supplied for the last occurrence of a multiply declared
-          // parameter.)
-          var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
+    if (MustAllocate(var)) {
+      if (MustAllocateInContext(var)) {
+        ASSERT(var->IsUnallocated() || var->IsContextSlot());
+        if (var->IsUnallocated()) {
+          AllocateHeapSlot(var);
+        }
+      } else {
+        ASSERT(var->IsUnallocated() || var->IsParameter());
+        if (var->IsUnallocated()) {
+          var->AllocateTo(Variable::PARAMETER, i);
         }
       }
     }
@@ -1047,10 +1130,9 @@
 
 void Scope::AllocateNonParameterLocal(Variable* var) {
   ASSERT(var->scope() == this);
-  ASSERT(var->rewrite() == NULL ||
-         (!var->IsVariable(FACTORY->result_symbol())) ||
-         (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
-  if (var->rewrite() == NULL && MustAllocate(var)) {
+  ASSERT(!var->IsVariable(isolate_->factory()->result_symbol()) ||
+         !var->IsStackLocal());
+  if (var->IsUnallocated() && MustAllocate(var)) {
     if (MustAllocateInContext(var)) {
       AllocateHeapSlot(var);
     } else {
@@ -1078,7 +1160,7 @@
   // because of the current ScopeInfo implementation (see
   // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
   if (function_ != NULL) {
-    AllocateNonParameterLocal(function_);
+    AllocateNonParameterLocal(function_->var());
   }
 }
 
@@ -1091,7 +1173,7 @@
 
   // If scope is already resolved, we still need to allocate
   // variables in inner scopes which might not had been resolved yet.
-  if (resolved()) return;
+  if (already_resolved()) return;
   // The number of slots required for variables.
   num_stack_slots_ = 0;
   num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
diff --git a/src/scopes.h b/src/scopes.h
index a0e56a4..2917a63 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -90,20 +90,15 @@
   // Construction
 
   enum Type {
-    EVAL_SCOPE,     // the top-level scope for an 'eval' source
-    FUNCTION_SCOPE,  // the top-level scope for a function
-    GLOBAL_SCOPE    // the top-level scope for a program or a top-level eval
-  };
-
-  enum LocalType {
-    PARAMETER,
-    VAR_OR_CONST
+    EVAL_SCOPE,      // The top-level scope for an eval source.
+    FUNCTION_SCOPE,  // The top-level scope for a function.
+    GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
+    CATCH_SCOPE,     // The scope introduced by catch.
+    BLOCK_SCOPE      // The scope introduced by a new block.
   };
 
   Scope(Scope* outer_scope, Type type);
 
-  virtual ~Scope() { }
-
   // Compute top scope and allocate variables. For lazy compilation the top
   // scope only contains the single lazily compiled function, so this
   // doesn't re-allocate variables repeatedly.
@@ -115,33 +110,36 @@
   // The scope name is only used for printing/debugging.
   void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
-  virtual void Initialize(bool inside_with);
+  void Initialize(bool inside_with);
 
-  // Called just before leaving a scope.
-  virtual void Leave() {
-    // No cleanup or fixup necessary.
-  }
+  // Checks if the block scope is redundant, i.e. it does not contain any
+  // block scoped declarations. In that case it is removed from the scope
+  // tree and its children are reparented.
+  Scope* FinalizeBlockScope();
 
   // ---------------------------------------------------------------------------
   // Declarations
 
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
-  virtual Variable* LocalLookup(Handle<String> name);
+  Variable* LocalLookup(Handle<String> name);
 
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
-  virtual Variable* Lookup(Handle<String> name);
+  Variable* Lookup(Handle<String> name);
 
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
   Variable* DeclareFunctionVar(Handle<String> name);
 
+  // Declare a parameter in this scope.  When there are duplicated
+  // parameters the rightmost one 'wins'.  However, the implementation
+  // expects all parameters to be declared and from left to right.
+  void DeclareParameter(Handle<String> name, Variable::Mode mode);
+
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  virtual Variable* DeclareLocal(Handle<String> name,
-                                 Variable::Mode mode,
-                                 LocalType type);
+  Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
@@ -149,16 +147,10 @@
   // with statements or eval calls.
   Variable* DeclareGlobal(Handle<String> name);
 
-  // Add a parameter to the parameter list. The parameter must have been
-  // declared via Declare. The same parameter may occur more than once in
-  // the parameter list; they must be added in source order, from left to
-  // right.
-  void AddParameter(Variable* var);
-
   // Create a new unresolved variable.
-  virtual VariableProxy* NewUnresolved(Handle<String> name,
-                                       bool inside_with,
-                                       int position = RelocInfo::kNoPosition);
+  VariableProxy* NewUnresolved(Handle<String> name,
+                               bool inside_with,
+                               int position = RelocInfo::kNoPosition);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -172,7 +164,7 @@
   // for printing and cannot be used to find the variable.  In particular,
   // the only way to get hold of the temporary is by keeping the Variable*
   // around.
-  virtual Variable* NewTemporary(Handle<String> name);
+  Variable* NewTemporary(Handle<String> name);
 
   // Adds the specific declaration node to the list of declarations in
   // this scope. The declarations are processed as part of entering
@@ -195,6 +187,10 @@
   // Check if the scope has (at least) one illegal redeclaration.
   bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
 
+  // For harmony block scoping mode: Check if the scope has conflicting var
+  // declarations, i.e. a var declaration that has been hoisted from a nested
+  // scope over a let binding of the same name.
+  Declaration* CheckConflictingVarDeclarations();
 
   // ---------------------------------------------------------------------------
   // Scope-specific info.
@@ -217,11 +213,19 @@
   bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
   bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
   bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+  bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
+  bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
   bool is_strict_mode() const { return strict_mode_; }
+  bool is_strict_mode_eval_scope() const {
+    return is_eval_scope() && is_strict_mode();
+  }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
   bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+  bool outer_scope_calls_non_strict_eval() const {
+    return outer_scope_calls_non_strict_eval_;
+  }
 
   // Is this scope inside a with statement.
   bool inside_with() const { return scope_inside_with_; }
@@ -234,18 +238,13 @@
   // ---------------------------------------------------------------------------
   // Accessors.
 
-  // A new variable proxy corresponding to the (function) receiver.
-  VariableProxy* receiver() const {
-    VariableProxy* proxy =
-        new VariableProxy(FACTORY->this_symbol(), true, false);
-    proxy->BindTo(receiver_);
-    return proxy;
-  }
+  // The variable corresponding the 'this' value.
+  Variable* receiver() { return receiver_; }
 
   // The variable holding the function literal for named function
   // literals, or NULL.
   // Only valid for function scopes.
-  Variable* function() const {
+  VariableProxy* function() const {
     ASSERT(is_function_scope());
     return function_;
   }
@@ -260,18 +259,12 @@
   int num_parameters() const { return params_.length(); }
 
   // The local variable 'arguments' if we need to allocate it; NULL otherwise.
-  // If arguments() exist, arguments_shadow() exists, too.
   Variable* arguments() const { return arguments_; }
 
-  // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
-  // If arguments_shadow() exist, arguments() exists, too.
-  Variable* arguments_shadow() const { return arguments_shadow_; }
-
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
 
 
-
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
@@ -303,11 +296,17 @@
   bool AllowsLazyCompilation() const;
 
   // True if the outer context of this scope is always the global context.
-  virtual bool HasTrivialOuterContext() const;
+  bool HasTrivialOuterContext() const;
 
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope);
 
+  // Find the first function, global, or eval scope.  This is the scope
+  // where var declarations will be hoisted to in the implementation.
+  Scope* DeclarationScope();
+
+  Handle<SerializedScopeInfo> GetSerializedScopeInfo();
+
   // ---------------------------------------------------------------------------
   // Strict mode support.
   bool IsDeclared(Handle<String> name) {
@@ -333,6 +332,8 @@
 
   explicit Scope(Type type);
 
+  Isolate* const isolate_;
+
   // Scope tree.
   Scope* outer_scope_;  // the immediately enclosing outer scope, or NULL
   ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
@@ -362,27 +363,36 @@
   // Convenience variable.
   Variable* receiver_;
   // Function variable, if any; function scopes only.
-  Variable* function_;
+  VariableProxy* function_;
   // Convenience variable; function scopes only.
   Variable* arguments_;
-  // Convenience variable; function scopes only.
-  Variable* arguments_shadow_;
 
   // Illegal redeclaration.
   Expression* illegal_redecl_;
 
-  // Scope-specific information.
-  bool scope_inside_with_;  // this scope is inside a 'with' of some outer scope
-  bool scope_contains_with_;  // this scope contains a 'with' statement
-  bool scope_calls_eval_;  // this scope contains an 'eval' call
-  bool strict_mode_;  // this scope is a strict mode scope
+  // Scope-specific information computed during parsing.
+  //
+  // This scope is inside a 'with' of some outer scope.
+  bool scope_inside_with_;
+  // This scope contains a 'with' statement.
+  bool scope_contains_with_;
+  // This scope or a nested catch scope or with scope contain an 'eval' call. At
+  // the 'eval' call site this scope is the declaration scope.
+  bool scope_calls_eval_;
+  // This scope is a strict mode scope.
+  bool strict_mode_;
 
   // Computed via PropagateScopeInfo.
   bool outer_scope_calls_eval_;
+  bool outer_scope_calls_non_strict_eval_;
   bool inner_scope_calls_eval_;
   bool outer_scope_is_eval_scope_;
   bool force_eager_compilation_;
 
+  // True if it doesn't need scope resolution (e.g., if the scope was
+  // constructed based on a serialized scope info or a catch context).
+  bool already_resolved_;
+
   // Computed as variables are declared.
   int num_var_or_const_;
 
@@ -392,7 +402,7 @@
 
   // Serialized scopes support.
   Handle<SerializedScopeInfo> scope_info_;
-  bool resolved() { return !scope_info_.is_null(); }
+  bool already_resolved() { return already_resolved_; }
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
@@ -400,7 +410,7 @@
 
   // Variable resolution.
   Variable* LookupRecursive(Handle<String> name,
-                            bool inner_lookup,
+                            bool from_inner_function,
                             Variable** invalidated_local);
   void ResolveVariable(Scope* global_scope,
                        Handle<Context> context,
@@ -410,6 +420,7 @@
 
   // Scope analysis.
   bool PropagateScopeInfo(bool outer_scope_calls_eval,
+                          bool outer_scope_calls_non_strict_eval,
                           bool outer_scope_is_eval_scope);
   bool HasTrivialContext() const;
 
@@ -427,7 +438,11 @@
   void AllocateVariablesRecursively();
 
  private:
-  Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
+  // Construct a function or block scope based on the scope info.
+  Scope(Scope* inner_scope, Type type, Handle<SerializedScopeInfo> scope_info);
+
+  // Construct a catch scope with a binding for the name.
+  Scope(Scope* inner_scope, Handle<String> catch_variable_name);
 
   void AddInnerScope(Scope* inner_scope) {
     if (inner_scope != NULL) {
@@ -441,59 +456,6 @@
                    Handle<SerializedScopeInfo> scope_info);
 };
 
-
-// Scope used during pre-parsing.
-class DummyScope : public Scope {
- public:
-  DummyScope()
-      : Scope(GLOBAL_SCOPE),
-        nesting_level_(1),  // Allows us to Leave the initial scope.
-        inside_with_level_(kNotInsideWith) {
-    outer_scope_ = this;
-    scope_inside_with_ = false;
-  }
-
-  virtual void Initialize(bool inside_with) {
-    nesting_level_++;
-    if (inside_with && inside_with_level_ == kNotInsideWith) {
-      inside_with_level_ = nesting_level_;
-    }
-    ASSERT(inside_with_level_ <= nesting_level_);
-  }
-
-  virtual void Leave() {
-    nesting_level_--;
-    ASSERT(nesting_level_ >= 0);
-    if (nesting_level_ < inside_with_level_) {
-      inside_with_level_ = kNotInsideWith;
-    }
-    ASSERT(inside_with_level_ <= nesting_level_);
-  }
-
-  virtual Variable* Lookup(Handle<String> name)  { return NULL; }
-
-  virtual VariableProxy* NewUnresolved(Handle<String> name,
-                                       bool inside_with,
-                                       int position = RelocInfo::kNoPosition) {
-    return NULL;
-  }
-
-  virtual Variable* NewTemporary(Handle<String> name)  { return NULL; }
-
-  virtual bool HasTrivialOuterContext() const {
-    return (nesting_level_ == 0 || inside_with_level_ <= 0);
-  }
-
- private:
-  static const int kNotInsideWith = -1;
-  // Number of surrounding scopes of the current scope.
-  int nesting_level_;
-  // Nesting level of outermost scope that is contained in a with statement,
-  // or kNotInsideWith if there are no with's around the current scope.
-  int inside_with_level_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_SCOPES_H_
diff --git a/src/serialize.cc b/src/serialize.cc
index 12e9613..ecb480a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 
 #include "accessors.h"
 #include "api.h"
+#include "bootstrapper.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic-inl.h"
@@ -38,7 +39,6 @@
 #include "serialize.h"
 #include "stub-cache.h"
 #include "v8threads.h"
-#include "bootstrapper.h"
 
 namespace v8 {
 namespace internal {
@@ -62,57 +62,15 @@
 }
 
 
-// ExternalReferenceTable is a helper class that defines the relationship
-// between external references and their encodings. It is used to build
-// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
-class ExternalReferenceTable {
- public:
-  static ExternalReferenceTable* instance(Isolate* isolate) {
-    ExternalReferenceTable* external_reference_table =
-        isolate->external_reference_table();
-    if (external_reference_table == NULL) {
-      external_reference_table = new ExternalReferenceTable(isolate);
-      isolate->set_external_reference_table(external_reference_table);
-    }
-    return external_reference_table;
+ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
+  ExternalReferenceTable* external_reference_table =
+      isolate->external_reference_table();
+  if (external_reference_table == NULL) {
+    external_reference_table = new ExternalReferenceTable(isolate);
+    isolate->set_external_reference_table(external_reference_table);
   }
-
-  int size() const { return refs_.length(); }
-
-  Address address(int i) { return refs_[i].address; }
-
-  uint32_t code(int i) { return refs_[i].code; }
-
-  const char* name(int i) { return refs_[i].name; }
-
-  int max_id(int code) { return max_id_[code]; }
-
- private:
-  explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
-      PopulateTable(isolate);
-  }
-  ~ExternalReferenceTable() { }
-
-  struct ExternalReferenceEntry {
-    Address address;
-    uint32_t code;
-    const char* name;
-  };
-
-  void PopulateTable(Isolate* isolate);
-
-  // For a few types of references, we can get their address from their id.
-  void AddFromId(TypeCode type,
-                 uint16_t id,
-                 const char* name,
-                 Isolate* isolate);
-
-  // For other types of references, the caller will figure out the address.
-  void Add(Address address, TypeCode type, uint16_t id, const char* name);
-
-  List<ExternalReferenceEntry> refs_;
-  int max_id_[kTypeCodeCount];
-};
+  return external_reference_table;
+}
 
 
 void ExternalReferenceTable::AddFromId(TypeCode type,
@@ -282,14 +240,14 @@
   // Top addresses
 
   const char* AddressNames[] = {
-#define C(name) "Isolate::" #name,
-    ISOLATE_ADDRESS_LIST(C)
-    ISOLATE_ADDRESS_LIST_PROF(C)
+#define BUILD_NAME_LITERAL(CamelName, hacker_name)      \
+    "Isolate::" #hacker_name "_address",
+    FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
     NULL
 #undef C
   };
 
-  for (uint16_t i = 0; i < Isolate::k_isolate_address_count; ++i) {
+  for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
     Add(isolate->get_address_from_id((Isolate::AddressId)i),
         TOP_ADDRESS, i, AddressNames[i]);
   }
@@ -1017,10 +975,11 @@
 
       case kNativesStringResource: {
         int index = source_->Get();
-        Vector<const char> source_vector = Natives::GetScriptSource(index);
+        Vector<const char> source_vector = Natives::GetRawScriptSource(index);
         NativesExternalStringResource* resource =
-            new NativesExternalStringResource(
-                isolate->bootstrapper(), source_vector.start());
+            new NativesExternalStringResource(isolate->bootstrapper(),
+                                              source_vector.start(),
+                                              source_vector.length());
         *current++ = reinterpret_cast<Object*>(resource);
         break;
       }
diff --git a/src/serialize.h b/src/serialize.h
index 07c0a25..66d6fb5 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -60,6 +60,52 @@
 const int kDebugIdShift = kDebugRegisterBits;
 
 
+// ExternalReferenceTable is a helper class that defines the relationship
+// between external references and their encodings. It is used to build
+// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
+class ExternalReferenceTable {
+ public:
+  static ExternalReferenceTable* instance(Isolate* isolate);
+
+  ~ExternalReferenceTable() { }
+
+  int size() const { return refs_.length(); }
+
+  Address address(int i) { return refs_[i].address; }
+
+  uint32_t code(int i) { return refs_[i].code; }
+
+  const char* name(int i) { return refs_[i].name; }
+
+  int max_id(int code) { return max_id_[code]; }
+
+ private:
+  explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
+      PopulateTable(isolate);
+  }
+
+  struct ExternalReferenceEntry {
+    Address address;
+    uint32_t code;
+    const char* name;
+  };
+
+  void PopulateTable(Isolate* isolate);
+
+  // For a few types of references, we can get their address from their id.
+  void AddFromId(TypeCode type,
+                 uint16_t id,
+                 const char* name,
+                 Isolate* isolate);
+
+  // For other types of references, the caller will figure out the address.
+  void Add(Address address, TypeCode type, uint16_t id, const char* name);
+
+  List<ExternalReferenceEntry> refs_;
+  int max_id_[kTypeCodeCount];
+};
+
+
 class ExternalReferenceEncoder {
  public:
   ExternalReferenceEncoder();
@@ -148,7 +194,7 @@
 // This only works for objects in the first page of a space.  Don't use this for
 // things in newspace since it bypasses the write barrier.
 
-RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
+static const int k64 = (sizeof(uintptr_t) - 4) / 4;
 
 #define COMMON_REFERENCE_PATTERNS(f)                               \
   f(kNumberOfSpaces, 2, (11 - k64))                                \
@@ -181,8 +227,8 @@
 // both.
 class SerializerDeserializer: public ObjectVisitor {
  public:
-  RLYSTC void Iterate(ObjectVisitor* visitor);
-  RLYSTC void SetSnapshotCacheSize(int size);
+  static void Iterate(ObjectVisitor* visitor);
+  static void SetSnapshotCacheSize(int size);
 
  protected:
   // Where the pointed-to object can be found:
@@ -220,34 +266,34 @@
 
   // Misc.
   // Raw data to be copied from the snapshot.
-  RLYSTC const int kRawData = 0x30;
+  static const int kRawData = 0x30;
   // Some common raw lengths: 0x31-0x3f
   // A tag emitted at strategic points in the snapshot to delineate sections.
   // If the deserializer does not find these at the expected moments then it
   // is an indication that the snapshot and the VM do not fit together.
   // Examine the build process for architecture, version or configuration
   // mismatches.
-  RLYSTC const int kSynchronize = 0x70;
+  static const int kSynchronize = 0x70;
   // Used for the source code of the natives, which is in the executable, but
   // is referred to from external strings in the snapshot.
-  RLYSTC const int kNativesStringResource = 0x71;
-  RLYSTC const int kNewPage = 0x72;
+  static const int kNativesStringResource = 0x71;
+  static const int kNewPage = 0x72;
   // 0x73-0x7f                            Free.
   // 0xb0-0xbf                            Free.
   // 0xf0-0xff                            Free.
 
 
-  RLYSTC const int kLargeData = LAST_SPACE;
-  RLYSTC const int kLargeCode = kLargeData + 1;
-  RLYSTC const int kLargeFixedArray = kLargeCode + 1;
-  RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
-  RLYSTC const int kAnyOldSpace = -1;
+  static const int kLargeData = LAST_SPACE;
+  static const int kLargeCode = kLargeData + 1;
+  static const int kLargeFixedArray = kLargeCode + 1;
+  static const int kNumberOfSpaces = kLargeFixedArray + 1;
+  static const int kAnyOldSpace = -1;
 
   // A bitmask for getting the space out of an instruction.
-  RLYSTC const int kSpaceMask = 15;
+  static const int kSpaceMask = 15;
 
-  RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
-  RLYSTC inline bool SpaceIsPaged(int space) {
+  static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+  static inline bool SpaceIsPaged(int space) {
     return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
   }
 };
@@ -380,19 +426,19 @@
   }
 
  private:
-  RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
+  static bool SerializationMatchFun(void* key1, void* key2) {
     return key1 == key2;
   }
 
-  RLYSTC uint32_t Hash(HeapObject* obj) {
+  static uint32_t Hash(HeapObject* obj) {
     return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
   }
 
-  RLYSTC void* Key(HeapObject* obj) {
+  static void* Key(HeapObject* obj) {
     return reinterpret_cast<void*>(obj->address());
   }
 
-  RLYSTC void* Value(int v) {
+  static void* Value(int v) {
     return reinterpret_cast<void*>(v);
   }
 
@@ -403,7 +449,7 @@
 
 
 // There can be only one serializer per V8 process.
-STATIC_CLASS Serializer : public SerializerDeserializer {
+class Serializer : public SerializerDeserializer {
  public:
   explicit Serializer(SnapshotByteSink* sink);
   ~Serializer();
@@ -415,25 +461,25 @@
     return fullness_[space];
   }
 
-  RLYSTC void Enable() {
+  static void Enable() {
     if (!serialization_enabled_) {
       ASSERT(!too_late_to_enable_now_);
     }
     serialization_enabled_ = true;
   }
 
-  RLYSTC void Disable() { serialization_enabled_ = false; }
+  static void Disable() { serialization_enabled_ = false; }
   // Call this when you have made use of the fact that there is no serialization
   // going on.
-  RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
-  RLYSTC bool enabled() { return serialization_enabled_; }
+  static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
+  static bool enabled() { return serialization_enabled_; }
   SerializationAddressMapper* address_mapper() { return &address_mapper_; }
 #ifdef DEBUG
   virtual void Synchronize(const char* tag);
 #endif
 
  protected:
-  RLYSTC const int kInvalidRootIndex = -1;
+  static const int kInvalidRootIndex = -1;
   virtual int RootIndex(HeapObject* heap_object) = 0;
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
 
@@ -488,11 +534,11 @@
   // object space it may return kLargeCode or kLargeFixedArray in order
   // to indicate to the deserializer what kind of large object allocation
   // to make.
-  RLYSTC int SpaceOfObject(HeapObject* object);
+  static int SpaceOfObject(HeapObject* object);
   // This just returns the space of the object.  It will return LO_SPACE
   // for all large objects since you can't check the type of the object
   // once the map has been used for the serialization address.
-  RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
+  static int SpaceOfAlreadySerializedObject(HeapObject* object);
   int Allocate(int space, int size, bool* new_page_started);
   int EncodeExternalReference(Address addr) {
     return external_reference_encoder_->Encode(addr);
@@ -506,9 +552,9 @@
   SnapshotByteSink* sink_;
   int current_root_index_;
   ExternalReferenceEncoder* external_reference_encoder_;
-  RLYSTC bool serialization_enabled_;
+  static bool serialization_enabled_;
   // Did we already make use of the fact that serialization was not enabled?
-  RLYSTC bool too_late_to_enable_now_;
+  static bool too_late_to_enable_now_;
   int large_object_total_;
   SerializationAddressMapper address_mapper_;
 
@@ -544,6 +590,7 @@
     ASSERT(!o->IsScript());
     return o->IsString() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
+           o->IsSerializedScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
   }
 
diff --git a/src/shell.h b/src/shell.h
deleted file mode 100644
index ca51040..0000000
--- a/src/shell.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// A simple interactive shell.  Enable with --shell.
-
-#ifndef V8_SHELL_H_
-#define V8_SHELL_H_
-
-#include "../public/debug.h"
-
-namespace v8 {
-namespace internal {
-
-// Debug event handler for interactive debugging.
-void handle_debug_event(v8::DebugEvent event,
-                        v8::Handle<v8::Object> exec_state,
-                        v8::Handle<v8::Object> event_data,
-                        v8::Handle<Value> data);
-
-
-class Shell {
- public:
-  static void PrintObject(v8::Handle<v8::Value> obj);
-  // Run the read-eval loop, executing code in the specified
-  // environment.
-  static void Run(v8::Handle<v8::Context> context);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_SHELL_H_
diff --git a/src/small-pointer-list.h b/src/small-pointer-list.h
index 6291d9e..6c5ce89 100644
--- a/src/small-pointer-list.h
+++ b/src/small-pointer-list.h
@@ -44,6 +44,31 @@
  public:
   SmallPointerList() : data_(kEmptyTag) {}
 
+  explicit SmallPointerList(int capacity) : data_(kEmptyTag) {
+    Reserve(capacity);
+  }
+
+  void Reserve(int capacity) {
+    if (capacity < 2) return;
+    if ((data_ & kTagMask) == kListTag) {
+      if (list()->capacity() >= capacity) return;
+      int old_length = list()->length();
+      list()->AddBlock(NULL, capacity - list()->capacity());
+      list()->Rewind(old_length);
+      return;
+    }
+    PointerList* list = new PointerList(capacity);
+    if ((data_ & kTagMask) == kSingletonTag) {
+      list->Add(single_value());
+    }
+    ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
+    data_ = reinterpret_cast<intptr_t>(list) | kListTag;
+  }
+
+  void Clear() {
+    data_ = kEmptyTag;
+  }
+
   bool is_empty() const { return length() == 0; }
 
   int length() const {
diff --git a/src/smart-pointer.h b/src/smart-array-pointer.h
similarity index 70%
rename from src/smart-pointer.h
rename to src/smart-array-pointer.h
index 0fa8224..00721c1 100644
--- a/src/smart-pointer.h
+++ b/src/smart-array-pointer.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,8 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_SMART_POINTER_H_
-#define V8_SMART_POINTER_H_
+#ifndef V8_SMART_ARRAY_POINTER_H_
+#define V8_SMART_ARRAY_POINTER_H_
 
 namespace v8 {
 namespace internal {
@@ -35,75 +35,66 @@
 // A 'scoped array pointer' that calls DeleteArray on its pointer when the
 // destructor is called.
 template<typename T>
-class SmartPointer {
+class SmartArrayPointer {
  public:
+  // Default constructor. Constructs an empty scoped pointer.
+  inline SmartArrayPointer() : p_(NULL) {}
 
-  // Default constructor. Construct an empty scoped pointer.
-  inline SmartPointer() : p(NULL) {}
-
-
-  // Construct a scoped pointer from a plain one.
-  explicit inline SmartPointer(T* pointer) : p(pointer) {}
-
+  // Constructs a scoped pointer from a plain one.
+  explicit inline SmartArrayPointer(T* ptr) : p_(ptr) {}
 
   // Copy constructor removes the pointer from the original to avoid double
   // freeing.
-  inline SmartPointer(const SmartPointer<T>& rhs) : p(rhs.p) {
-    const_cast<SmartPointer<T>&>(rhs).p = NULL;
+  inline SmartArrayPointer(const SmartArrayPointer<T>& rhs) : p_(rhs.p_) {
+    const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
   }
 
-
   // When the destructor of the scoped pointer is executed the plain pointer
   // is deleted using DeleteArray.  This implies that you must allocate with
   // NewArray.
-  inline ~SmartPointer() { if (p) DeleteArray(p); }
+  inline ~SmartArrayPointer() { if (p_) DeleteArray(p_); }
 
+  inline T* operator->() const { return p_; }
 
   // You can get the underlying pointer out with the * operator.
-  inline T* operator*() { return p; }
-
+  inline T* operator*() { return p_; }
 
   // You can use [n] to index as if it was a plain pointer
   inline T& operator[](size_t i) {
-    return p[i];
+    return p_[i];
   }
 
   // We don't have implicit conversion to a T* since that hinders migration:
   // You would not be able to change a method from returning a T* to
-  // returning an SmartPointer<T> and then get errors wherever it is used.
+  // returning an SmartArrayPointer<T> and then get errors wherever it is used.
 
 
   // If you want to take out the plain pointer and don't want it automatically
   // deleted then call Detach().  Afterwards, the smart pointer is empty
   // (NULL).
   inline T* Detach() {
-    T* temp = p;
-    p = NULL;
+    T* temp = p_;
+    p_ = NULL;
     return temp;
   }
 
-
-  // Assignment requires an empty (NULL) SmartPointer as the receiver.  Like
+  // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
   // the copy constructor it removes the pointer in the original to avoid
   // double freeing.
-  inline SmartPointer& operator=(const SmartPointer<T>& rhs) {
+  inline SmartArrayPointer& operator=(const SmartArrayPointer<T>& rhs) {
     ASSERT(is_empty());
-    T* tmp = rhs.p;  // swap to handle self-assignment
-    const_cast<SmartPointer<T>&>(rhs).p = NULL;
-    p = tmp;
+    T* tmp = rhs.p_;  // swap to handle self-assignment
+    const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+    p_ = tmp;
     return *this;
   }
 
-
-  inline bool is_empty() {
-    return p == NULL;
-  }
-
+  inline bool is_empty() { return p_ == NULL; }
 
  private:
-  T* p;
+  T* p_;
 };
 
 } }  // namespace v8::internal
 
-#endif  // V8_SMART_POINTER_H_
+#endif  // V8_SMART_ARRAY_POINTER_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 7f82895..ef89a5e 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -53,7 +53,7 @@
     DeleteArray(str);
     return true;
   } else if (size_ > 0) {
-    Deserialize(data_, size_);
+    Deserialize(raw_data_, raw_size_);
     return true;
   }
   return false;
@@ -71,7 +71,8 @@
                      map_space_used_,
                      cell_space_used_,
                      large_space_used_);
-  SnapshotByteSource source(context_data_, context_size_);
+  SnapshotByteSource source(context_raw_data_,
+                            context_raw_size_);
   Deserializer deserializer(&source);
   Object* root;
   deserializer.DeserializePartial(&root);
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
index cb26eb8..0b35720 100644
--- a/src/snapshot-empty.cc
+++ b/src/snapshot-empty.cc
@@ -35,9 +35,13 @@
 namespace internal {
 
 const byte Snapshot::data_[] = { 0 };
+const byte* Snapshot::raw_data_ = NULL;
 const int Snapshot::size_ = 0;
+const int Snapshot::raw_size_ = 0;
 const byte Snapshot::context_data_[] = { 0 };
+const byte* Snapshot::context_raw_data_ = NULL;
 const int Snapshot::context_size_ = 0;
+const int Snapshot::context_raw_size_ = 0;
 
 const int Snapshot::new_space_used_ = 0;
 const int Snapshot::pointer_space_used_ = 0;
diff --git a/src/snapshot.h b/src/snapshot.h
index bedd186..4f01a2d 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -33,7 +33,7 @@
 namespace v8 {
 namespace internal {
 
-STATIC_CLASS Snapshot {
+class Snapshot {
  public:
   // Initialize the VM from the given snapshot file. If snapshot_file is
   // NULL, use the internal snapshot instead. Returns false if no snapshot
@@ -50,9 +50,25 @@
   // successfully.
   static bool WriteToFile(const char* snapshot_file);
 
+  static const byte* data() { return data_; }
+  static int size() { return size_; }
+  static int raw_size() { return raw_size_; }
+  static void set_raw_data(const byte* raw_data) {
+    raw_data_ = raw_data;
+  }
+  static const byte* context_data() { return context_data_; }
+  static int context_size() { return context_size_; }
+  static int context_raw_size() { return context_raw_size_; }
+  static void set_context_raw_data(
+      const byte* context_raw_data) {
+    context_raw_data_ = context_raw_data;
+  }
+
  private:
   static const byte data_[];
+  static const byte* raw_data_;
   static const byte context_data_[];
+  static const byte* context_raw_data_;
   static const int new_space_used_;
   static const int pointer_space_used_;
   static const int data_space_used_;
@@ -61,7 +77,9 @@
   static const int cell_space_used_;
   static const int large_space_used_;
   static const int size_;
+  static const int raw_size_;
   static const int context_size_;
+  static const int context_raw_size_;
 
   static bool Deserialize(const byte* content, int len);
 
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 070f970..35d7224 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -155,7 +155,8 @@
 
 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
   uint32_t result = 0;
-  if (length_in_bytes >= kPageSize) {
+  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
+  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
     result = kAllRegionsDirtyMarks;
   } else if (length_in_bytes > 0) {
     int start_region = GetRegionNumberForAddress(start);
@@ -294,13 +295,13 @@
   SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
 }
 
-bool Page::IsPageExecutable() {
-  return GetPageFlag(IS_EXECUTABLE);
+Executability Page::PageExecutability() {
+  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
 }
 
 
-void Page::SetIsPageExecutable(bool is_page_executable) {
-  SetPageFlag(IS_EXECUTABLE, is_page_executable);
+void Page::SetPageExecutability(Executability executable) {
+  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
 }
 
 
@@ -378,35 +379,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
-  OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
-                                size_t size,
-                                Executability executable) {
-  OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
-                chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
 // --------------------------------------------------------------------------
 // PagedSpace
 
@@ -463,23 +435,6 @@
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-Address LargeObjectChunk::GetStartAddress() {
-  // Round the chunk address up to the nearest page-aligned address
-  // and return the heap object in that page.
-  Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  return page->ObjectAreaStart();
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
-  Isolate* isolate =
-      Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
-  isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
-}
-
-// -----------------------------------------------------------------------------
 // NewSpace
 
 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
diff --git a/src/spaces.cc b/src/spaces.cc
index 3db9306..97c6d2a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -402,7 +402,9 @@
                                     size_t length,
                                     Executability executable) {
 #ifdef DEBUG
-  ZapBlock(reinterpret_cast<Address>(mem), length);
+  // Do not try to zap the guard page.
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
 #endif
   if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
     isolate_->code_range()->FreeRawMemory(mem, length);
@@ -504,14 +506,28 @@
   LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
 
   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+
   // We may 'lose' a page due to alignment.
   ASSERT(*allocated_pages >= kPagesPerChunk - 1);
-  if (*allocated_pages == 0) {
-    FreeRawMemory(chunk, chunk_size, owner->executable());
+
+  size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
+
+  // Check that we got at least one page that we can use.
+  if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
+    FreeRawMemory(chunk,
+                  chunk_size,
+                  owner->executable());
     LOG(isolate_, DeleteEvent("PagedChunk", chunk));
     return Page::FromAddress(NULL);
   }
 
+  if (guard_size != 0) {
+    OS::Guard(chunk, guard_size);
+    chunk_size -= guard_size;
+    chunk = static_cast<Address>(chunk) + guard_size;
+    --*allocated_pages;
+  }
+
   int chunk_id = Pop();
   chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
 
@@ -681,7 +697,8 @@
     LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
     ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
     size_t size = c.size();
-    FreeRawMemory(c.address(), size, c.executable());
+    size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
+    FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
     PerformAllocationCallback(space, kAllocationActionFree, size);
   }
   c.init(NULL, 0, NULL);
@@ -868,30 +885,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void PagedSpace::Protect() {
-  Page* page = first_page_;
-  while (page->is_valid()) {
-    Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
-    page = Isolate::Current()->memory_allocator()->
-        FindLastPageInSameChunk(page)->next_page();
-  }
-}
-
-
-void PagedSpace::Unprotect() {
-  Page* page = first_page_;
-  while (page->is_valid()) {
-    Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
-    page = Isolate::Current()->memory_allocator()->
-        FindLastPageInSameChunk(page)->next_page();
-  }
-}
-
-#endif
-
-
 void PagedSpace::MarkAllPagesClean() {
   PageIterator it(this, PageIterator::ALL_PAGES);
   while (it.has_next()) {
@@ -1196,7 +1189,6 @@
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
 
   // Allocate and setup the histogram arrays if necessary.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
 
@@ -1204,7 +1196,6 @@
                        promoted_histogram_[name].set_name(#name);
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
-#endif
 
   ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
   ASSERT(IsAddressAligned(start, size, 0));
@@ -1236,7 +1227,6 @@
 
 
 void NewSpace::TearDown() {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   if (allocated_histogram_) {
     DeleteArray(allocated_histogram_);
     allocated_histogram_ = NULL;
@@ -1245,7 +1235,6 @@
     DeleteArray(promoted_histogram_);
     promoted_histogram_ = NULL;
   }
-#endif
 
   start_ = NULL;
   allocation_info_.top = NULL;
@@ -1258,24 +1247,6 @@
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void NewSpace::Protect() {
-  heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
-  heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
-}
-
-
-void NewSpace::Unprotect() {
-  heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
-                                                   to_space_.executable());
-  heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
-                                                   from_space_.executable());
-}
-
-#endif
-
-
 void NewSpace::Flip() {
   SemiSpace tmp = from_space_;
   from_space_ = to_space_;
@@ -1564,14 +1535,14 @@
       CASE(BUILTIN);
       CASE(LOAD_IC);
       CASE(KEYED_LOAD_IC);
-      CASE(KEYED_EXTERNAL_ARRAY_LOAD_IC);
       CASE(STORE_IC);
       CASE(KEYED_STORE_IC);
-      CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
-      CASE(TYPE_RECORDING_BINARY_OP_IC);
+      CASE(UNARY_OP_IC);
+      CASE(BINARY_OP_IC);
       CASE(COMPARE_IC);
+      CASE(TO_BOOLEAN_IC);
     }
   }
 
@@ -1639,7 +1610,6 @@
 
 
 // Support for statistics gathering for --heap-stats and --log-gc.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 void NewSpace::ClearHistograms() {
   for (int i = 0; i <= LAST_TYPE; i++) {
     allocated_histogram_[i].clear();
@@ -1649,9 +1619,7 @@
 
 // Because the copying collector does not touch garbage objects, we iterate
 // the new space before a collection to get a histogram of allocated objects.
-// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
-// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
-// flag is set.
+// This only happens when --log-gc flag is set.
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
@@ -1660,7 +1628,6 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 static void DoReportStatistics(Isolate* isolate,
                                HistogramInfo* info, const char* description) {
   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
@@ -1687,7 +1654,6 @@
   }
   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
 }
-#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 void NewSpace::ReportStatistics() {
@@ -1710,13 +1676,11 @@
   }
 #endif  // DEBUG
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_gc) {
     Isolate* isolate = ISOLATE;
     DoReportStatistics(isolate, allocated_histogram_, "allocated");
     DoReportStatistics(isolate, promoted_histogram_, "promoted");
   }
-#endif  // ENABLE_LOGGING_AND_PROFILING
 }
 
 
@@ -1734,7 +1698,6 @@
   promoted_histogram_[type].increment_number(1);
   promoted_histogram_[type].increment_bytes(obj->Size());
 }
-#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 
 
 // -----------------------------------------------------------------------------
@@ -2727,9 +2690,10 @@
                                         Executability executable) {
   size_t requested = ChunkSizeFor(size_in_bytes);
   size_t size;
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
   Isolate* isolate = Isolate::Current();
   void* mem = isolate->memory_allocator()->AllocateRawMemory(
-      requested, &size, executable);
+      requested + guard_size, &size, executable);
   if (mem == NULL) return NULL;
 
   // The start of the chunk may be overlayed with a page so we have to
@@ -2737,13 +2701,19 @@
   ASSERT((size & Page::kPageFlagMask) == 0);
 
   LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
-  if (size < requested) {
+  if (size < requested + guard_size) {
     isolate->memory_allocator()->FreeRawMemory(
         mem, size, executable);
     LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
     return NULL;
   }
 
+  if (guard_size != 0) {
+    OS::Guard(mem, guard_size);
+    size -= guard_size;
+    mem = static_cast<Address>(mem) + guard_size;
+  }
+
   ObjectSpace space = (executable == EXECUTABLE)
       ? kObjectSpaceCodeSpace
       : kObjectSpaceLoSpace;
@@ -2752,12 +2722,26 @@
 
   LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
   chunk->size_ = size;
-  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
-  page->heap_ = isolate->heap();
+  chunk->GetPage()->heap_ = isolate->heap();
   return chunk;
 }
 
 
+void LargeObjectChunk::Free(Executability executable) {
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  ObjectSpace space =
+      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+  // Do not access instance fields after FreeRawMemory!
+  Address my_address = address();
+  size_t my_size = size();
+  Isolate* isolate = GetPage()->heap_->isolate();
+  MemoryAllocator* a = isolate->memory_allocator();
+  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
+  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
+  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
+}
+
+
 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
   int os_alignment = static_cast<int>(OS::AllocateAlignment());
   if (os_alignment < Page::kPageSize) {
@@ -2790,51 +2774,12 @@
   while (first_chunk_ != NULL) {
     LargeObjectChunk* chunk = first_chunk_;
     first_chunk_ = first_chunk_->next();
-    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
-    Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
-    Executability executable =
-        page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
-    ObjectSpace space = kObjectSpaceLoSpace;
-    if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
-    size_t size = chunk->size();
-    heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
-                                                         size,
-                                                         executable);
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, size);
+    chunk->Free(chunk->GetPage()->PageExecutability());
   }
-
-  size_ = 0;
-  page_count_ = 0;
-  objects_size_ = 0;
+  Setup();
 }
 
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void LargeObjectSpace::Protect() {
-  LargeObjectChunk* chunk = first_chunk_;
-  while (chunk != NULL) {
-    heap()->isolate()->memory_allocator()->Protect(chunk->address(),
-                                                   chunk->size());
-    chunk = chunk->next();
-  }
-}
-
-
-void LargeObjectSpace::Unprotect() {
-  LargeObjectChunk* chunk = first_chunk_;
-  while (chunk != NULL) {
-    bool is_code = chunk->GetObject()->IsCode();
-    heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
-        chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
-    chunk = chunk->next();
-  }
-}
-
-#endif
-
-
 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
                                                    int object_size,
                                                    Executability executable) {
@@ -2859,14 +2804,14 @@
   first_chunk_ = chunk;
 
   // Initialize page header.
-  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+  Page* page = chunk->GetPage();
   Address object_address = page->ObjectAreaStart();
 
   // Clear the low order bit of the second word in the page to flag it as a
   // large object page.  If the chunk_size happened to be written there, its
   // low order bit should already be clear.
   page->SetIsLargeObjectPage(true);
-  page->SetIsPageExecutable(executable);
+  page->SetPageExecutability(executable);
   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
   return HeapObject::FromAddress(object_address);
 }
@@ -2997,14 +2942,8 @@
       previous = current;
       current = current->next();
     } else {
-      Page* page = Page::FromAddress(RoundUp(current->address(),
-                                     Page::kPageSize));
-      Executability executable =
-          page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
-      Address chunk_address = current->address();
-      size_t chunk_size = current->size();
-
       // Cut the chunk out from the chunk list.
+      LargeObjectChunk* current_chunk = current;
       current = current->next();
       if (previous == NULL) {
         first_chunk_ = current;
@@ -3017,17 +2956,10 @@
           object, heap()->isolate());
       LiveObjectList::ProcessNonLive(object);
 
-      size_ -= static_cast<int>(chunk_size);
+      size_ -= static_cast<int>(current_chunk->size());
       objects_size_ -= object->Size();
       page_count_--;
-      ObjectSpace space = kObjectSpaceLoSpace;
-      if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
-      heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
-                                                           chunk_size,
-                                                           executable);
-      heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-          space, kAllocationActionFree, size_);
-      LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
+      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
     }
   }
 }
@@ -3071,7 +3003,7 @@
     // strings), fixed arrays, and byte arrays in large object space.
     ASSERT(object->IsCode() || object->IsSeqString() ||
            object->IsExternalString() || object->IsFixedArray() ||
-           object->IsByteArray());
+           object->IsFixedDoubleArray() || object->IsByteArray());
 
     // The object itself should look OK.
     object->Verify();
diff --git a/src/spaces.h b/src/spaces.h
index f323f85..f156496 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,8 @@
 #ifndef V8_SPACES_H_
 #define V8_SPACES_H_
 
-#include "list-inl.h"
+#include "allocation.h"
+#include "list.h"
 #include "log.h"
 
 namespace v8 {
@@ -199,9 +200,9 @@
 
   inline void SetIsLargeObjectPage(bool is_large_object_page);
 
-  inline bool IsPageExecutable();
+  inline Executability PageExecutability();
 
-  inline void SetIsPageExecutable(bool is_page_executable);
+  inline void SetPageExecutability(Executability executable);
 
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
@@ -379,12 +380,6 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the space by marking it read-only/writable.
-  virtual void Protect() = 0;
-  virtual void Unprotect() = 0;
-#endif
-
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -414,6 +409,7 @@
 class CodeRange {
  public:
   explicit CodeRange(Isolate* isolate);
+  ~CodeRange() { TearDown(); }
 
   // Reserves a range of virtual memory, but does not commit any of it.
   // Can only be called once, at heap initialization time.
@@ -640,17 +636,6 @@
                                   Page** last_page,
                                   Page** last_page_in_use);
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect a block of memory by marking it read-only/writable.
-  inline void Protect(Address start, size_t size);
-  inline void Unprotect(Address start, size_t size,
-                        Executability executable);
-
-  // Protect/unprotect a chunk given a page in the chunk.
-  inline void ProtectChunkFromPage(Page* page);
-  inline void UnprotectChunkFromPage(Page* page);
-#endif
-
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
@@ -663,20 +648,17 @@
 #ifdef V8_TARGET_ARCH_X64
   static const int kPagesPerChunk = 32;
   // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
-  static const int kPagesPerChunkLog2 = 5;
   static const int kChunkTableLevels = 4;
   static const int kChunkTableBitsPerLevel = 12;
 #else
   static const int kPagesPerChunk = 16;
   // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
-  static const int kPagesPerChunkLog2 = 4;
   static const int kChunkTableLevels = 2;
   static const int kChunkTableBitsPerLevel = 8;
 #endif
 
  private:
   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
-  static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
 
   Isolate* isolate_;
 
@@ -1152,12 +1134,6 @@
   // Ensures that the capacity is at least 'capacity'. Returns false on failure.
   bool EnsureCapacity(int capacity);
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the space by marking it read-only/writable.
-  void Protect();
-  void Unprotect();
-#endif
-
 #ifdef DEBUG
   // Print meta info and objects in this space.
   virtual void Print();
@@ -1256,8 +1232,8 @@
   // Returns the number of total pages in this space.
   int CountTotalPages();
 #endif
- private:
 
+ private:
   // Returns a pointer to the page of the relocation pointer.
   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
 
@@ -1265,7 +1241,6 @@
 };
 
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
 class NumberAndSizeInfo BASE_EMBEDDED {
  public:
   NumberAndSizeInfo() : number_(0), bytes_(0) {}
@@ -1288,9 +1263,7 @@
 
 
 // HistogramInfo class for recording a single "bar" of a histogram.  This
-// class is used for collecting statistics to print to stdout (when compiled
-// with DEBUG) or to the log file (when compiled with
-// ENABLE_LOGGING_AND_PROFILING).
+// class is used for collecting statistics to print to the log file.
 class HistogramInfo: public NumberAndSizeInfo {
  public:
   HistogramInfo() : NumberAndSizeInfo() {}
@@ -1301,7 +1274,6 @@
  private:
   const char* name_;
 };
-#endif
 
 
 // -----------------------------------------------------------------------------
@@ -1387,12 +1359,6 @@
   bool Commit();
   bool Uncommit();
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the space by marking it read-only/writable.
-  virtual void Protect() {}
-  virtual void Unprotect() {}
-#endif
-
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
@@ -1623,12 +1589,6 @@
   template <typename StringType>
   inline void ShrinkStringAtAllocationBoundary(String* string, int len);
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the space by marking it read-only/writable.
-  virtual void Protect();
-  virtual void Unprotect();
-#endif
-
 #ifdef DEBUG
   // Verify the active semispace.
   virtual void Verify();
@@ -1636,7 +1596,6 @@
   virtual void Print() { to_space_.Print(); }
 #endif
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   // Iterates the active semispace to collect statistics.
   void CollectStatistics();
   // Reports previously collected statistics of the active semispace.
@@ -1649,7 +1608,6 @@
   // to space during a scavenge GC.
   void RecordAllocation(HeapObject* obj);
   void RecordPromotion(HeapObject* obj);
-#endif
 
   // Return whether the operation succeded.
   bool CommitFromSpaceIfNeeded() {
@@ -1678,10 +1636,8 @@
   AllocationInfo allocation_info_;
   AllocationInfo mc_forwarding_info_;
 
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
-#endif
 
   // Implementation of AllocateRaw and MCAllocateRaw.
   MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
@@ -1860,7 +1816,6 @@
   void MarkNodes();
 
  private:
-
   Heap* heap_;
 
   // Available bytes on the free list.
@@ -2188,7 +2143,7 @@
   static LargeObjectChunk* New(int size_in_bytes, Executability executable);
 
   // Free the memory associated with the chunk.
-  inline void Free(Executability executable);
+  void Free(Executability executable);
 
   // Interpret a raw address as a large object chunk.
   static LargeObjectChunk* FromAddress(Address address) {
@@ -2198,13 +2153,17 @@
   // Returns the address of this chunk.
   Address address() { return reinterpret_cast<Address>(this); }
 
+  Page* GetPage() {
+    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
+  }
+
   // Accessors for the fields of the chunk.
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
   size_t size() { return size_ & ~Page::kPageFlagMask; }
 
   // Compute the start address in the chunk.
-  inline Address GetStartAddress();
+  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
 
   // Returns the object in this chunk.
   HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
@@ -2291,12 +2250,6 @@
   // may use some memory, leaving less for large objects.
   virtual bool ReserveSpace(int bytes);
 
-#ifdef ENABLE_HEAP_PROTECTION
-  // Protect/unprotect the space by marking it read-only/writable.
-  void Protect();
-  void Unprotect();
-#endif
-
 #ifdef DEBUG
   virtual void Verify();
   virtual void Print();
diff --git a/src/splay-tree.h b/src/splay-tree.h
index c265276..72231e4 100644
--- a/src/splay-tree.h
+++ b/src/splay-tree.h
@@ -28,6 +28,8 @@
 #ifndef V8_SPLAY_TREE_H_
 #define V8_SPLAY_TREE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -121,8 +123,8 @@
     Value value() { return value_; }
     Node* left() { return left_; }
     Node* right() { return right_; }
-   private:
 
+   private:
     friend class SplayTree;
     friend class Locator;
     Key key_;
@@ -141,6 +143,7 @@
     Value& value() { return node_->value_; }
     void set_value(const Value& value) { node_->value_ = value; }
     inline void bind(Node* node) { node_ = node; }
+
    private:
     Node* node_;
   };
@@ -149,7 +152,6 @@
   void ForEach(Callback* callback);
 
  protected:
-
   // Resets tree root. Existing nodes become unreachable.
   void ResetRoot() { root_ = NULL; }
 
@@ -185,7 +187,6 @@
     void Call(Node* node) { delete node; }
 
    private:
-
     DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
   };
 
diff --git a/src/string-stream.cc b/src/string-stream.cc
index aea1420..8086cf9 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,8 @@
 #include "factory.h"
 #include "string-stream.h"
 
+#include "allocation-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -250,11 +252,11 @@
 }
 
 
-SmartPointer<const char> StringStream::ToCString() const {
+SmartArrayPointer<const char> StringStream::ToCString() const {
   char* str = NewArray<char>(length_ + 1);
   memcpy(str, buffer_, length_);
   str[length_] = '\0';
-  return SmartPointer<const char>(str);
+  return SmartArrayPointer<const char>(str);
 }
 
 
diff --git a/src/string-stream.h b/src/string-stream.h
index b3f2e0d..0ba8f52 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -93,6 +93,7 @@
   FmtElm(void* value) : type_(POINTER) {  // NOLINT
     data_.u_pointer_ = value;
   }
+
  private:
   friend class StringStream;
   enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
@@ -142,7 +143,7 @@
   void OutputToStdOut() { OutputToFile(stdout); }
   void Log();
   Handle<String> ToString();
-  SmartPointer<const char> ToCString() const;
+  SmartArrayPointer<const char> ToCString() const;
   int length() const { return length_; }
 
   // Object printing support.
diff --git a/src/string.js b/src/string.js
index d8d402c..297105d 100644
--- a/src/string.js
+++ b/src/string.js
@@ -62,6 +62,10 @@
 
 // ECMA-262, section 15.5.4.4
 function StringCharAt(pos) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.charAt"]);
+  }
   var result = %_StringCharAt(this, pos);
   if (%_IsSmi(result)) {
     result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -72,6 +76,10 @@
 
 // ECMA-262 section 15.5.4.5
 function StringCharCodeAt(pos) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.charCodeAt"]);
+  }
   var result = %_StringCharCodeAt(this, pos);
   if (!%_IsSmi(result)) {
     result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -82,6 +90,9 @@
 
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+  }
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
   if (len === 1) {
@@ -102,6 +113,10 @@
 
 // ECMA-262 section 15.5.4.7
 function StringIndexOf(pattern /* position */) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.indexOf"]);
+  }
   var subject = TO_STRING_INLINE(this);
   pattern = TO_STRING_INLINE(pattern);
   var index = 0;
@@ -117,6 +132,10 @@
 
 // ECMA-262 section 15.5.4.8
 function StringLastIndexOf(pat /* position */) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.lastIndexOf"]);
+  }
   var sub = TO_STRING_INLINE(this);
   var subLength = sub.length;
   var pat = TO_STRING_INLINE(pat);
@@ -130,7 +149,7 @@
         position = 0;
       }
       if (position + patLength < subLength) {
-        index = position
+        index = position;
       }
     }
   }
@@ -146,14 +165,22 @@
 // This function is implementation specific.  For now, we do not
 // do anything locale specific.
 function StringLocaleCompare(other) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.localeCompare"]);
+  }
   if (%_ArgumentsLength() === 0) return 0;
-  return %StringLocaleCompare(TO_STRING_INLINE(this), 
+  return %StringLocaleCompare(TO_STRING_INLINE(this),
                               TO_STRING_INLINE(other));
 }
 
 
 // ECMA-262 section 15.5.4.10
 function StringMatch(regexp) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.match"]);
+  }
   var subject = TO_STRING_INLINE(this);
   if (IS_REGEXP(regexp)) {
     if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
@@ -187,12 +214,16 @@
 
 // ECMA-262, section 15.5.4.11
 function StringReplace(search, replace) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.replace"]);
+  }
   var subject = TO_STRING_INLINE(this);
 
   // Delegate to one of the regular expression variants if necessary.
   if (IS_REGEXP(search)) {
     %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
-    if (IS_FUNCTION(replace)) {
+    if (IS_SPEC_FUNCTION(replace)) {
       if (search.global) {
         return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
       } else {
@@ -219,8 +250,9 @@
   builder.addSpecialSlice(0, start);
 
   // Compute the string to replace with.
-  if (IS_FUNCTION(replace)) {
-    builder.add(%_CallFunction(%GetGlobalReceiver(),
+  if (IS_SPEC_FUNCTION(replace)) {
+    var receiver = %GetDefaultReceiver(replace);
+    builder.add(%_CallFunction(receiver,
                                search,
                                start,
                                subject,
@@ -243,7 +275,7 @@
 // the result.
 function ExpandReplacement(string, subject, matchInfo, builder) {
   var length = string.length;
-  var builder_elements = builder.elements; 
+  var builder_elements = builder.elements;
   var next = %StringIndexOf(string, '$', 0);
   if (next < 0) {
     if (length > 0) builder_elements.push(string);
@@ -387,7 +419,7 @@
   if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
     var match_start = 0;
     var override = new InternalArray(null, 0, subject);
-    var receiver = %GetGlobalReceiver();
+    var receiver = %GetDefaultReceiver(replace);
     while (i < len) {
       var elem = res[i];
       if (%_IsSmi(elem)) {
@@ -408,13 +440,14 @@
       i++;
     }
   } else {
+    var receiver = %GetDefaultReceiver(replace);
     while (i < len) {
       var elem = res[i];
       if (!%_IsSmi(elem)) {
         // elem must be an Array.
         // Use the apply argument as backing for global RegExp properties.
         lastMatchInfoOverride = elem;
-        var func_result = replace.apply(null, elem);
+        var func_result = %Apply(replace, receiver, elem, 0, elem.length);
         res[i] = TO_STRING_INLINE(func_result);
       }
       i++;
@@ -440,12 +473,13 @@
   // The number of captures plus one for the match.
   var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
   var replacement;
+  var receiver = %GetDefaultReceiver(replace);
   if (m == 1) {
     // No captures, only the match, which is always valid.
     var s = SubString(subject, index, endOfMatch);
     // Don't call directly to avoid exposing the built-in global object.
     replacement =
-        %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
+        %_CallFunction(receiver, s, index, subject, replace);
   } else {
     var parameters = new InternalArray(m + 2);
     for (var j = 0; j < m; j++) {
@@ -454,7 +488,7 @@
     parameters[j] = index;
     parameters[j + 1] = subject;
 
-    replacement = replace.apply(null, parameters);
+    replacement = %Apply(replace, receiver, parameters, 0, j + 2);
   }
 
   result.add(replacement);  // The add method converts to string if necessary.
@@ -467,6 +501,10 @@
 
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.search"]);
+  }
   var regexp;
   if (IS_STRING(re)) {
     regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
@@ -485,6 +523,10 @@
 
 // ECMA-262 section 15.5.4.13
 function StringSlice(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.slice"]);
+  }
   var s = TO_STRING_INLINE(this);
   var s_len = s.length;
   var start_i = TO_INTEGER(start);
@@ -520,6 +562,10 @@
 
 // ECMA-262 section 15.5.4.14
 function StringSplit(separator, limit) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.split"]);
+  }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
   if (limit === 0) return [];
@@ -613,6 +659,10 @@
 
 // ECMA-262 section 15.5.4.15
 function StringSubstring(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.subString"]);
+  }
   var s = TO_STRING_INLINE(this);
   var s_len = s.length;
 
@@ -646,6 +696,10 @@
 
 // This is not a part of ECMA-262.
 function StringSubstr(start, n) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.substr"]);
+  }
   var s = TO_STRING_INLINE(this);
   var len;
 
@@ -686,37 +740,65 @@
 
 // ECMA-262, 15.5.4.16
 function StringToLowerCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLowerCase"]);
+  }
   return %StringToLowerCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.17
 function StringToLocaleLowerCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLocaleLowerCase"]);
+  }
   return %StringToLowerCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.18
 function StringToUpperCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toUpperCase"]);
+  }
   return %StringToUpperCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.19
 function StringToLocaleUpperCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLocaleUpperCase"]);
+  }
   return %StringToUpperCase(TO_STRING_INLINE(this));
 }
 
 // ES5, 15.5.4.20
 function StringTrim() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trim"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), true, true);
 }
 
 function StringTrimLeft() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trimLeft"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), true, false);
 }
 
 function StringTrimRight() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trimRight"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), false, true);
 }
 
@@ -830,48 +912,47 @@
   this.special_string = str;
 }
 
-
-ReplaceResultBuilder.prototype.add = function(str) {
-  str = TO_STRING_INLINE(str);
-  if (str.length > 0) this.elements.push(str);
-}
-
-
-ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
-  var len = end - start;
-  if (start < 0 || len <= 0) return;
-  if (start < 0x80000 && len < 0x800) {
-    this.elements.push((start << 11) | len);
-  } else {
-    // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
-    // so -len is a smi.
+SetUpLockedPrototype(ReplaceResultBuilder,
+  $Array("elements", "special_string"), $Array(
+  "add", function(str) {
+    str = TO_STRING_INLINE(str);
+    if (str.length > 0) this.elements.push(str);
+  },
+  "addSpecialSlice", function(start, end) {
+    var len = end - start;
+    if (start < 0 || len <= 0) return;
+    if (start < 0x80000 && len < 0x800) {
+      this.elements.push((start << 11) | len);
+    } else {
+      // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
+      // so -len is a smi.
+      var elements = this.elements;
+      elements.push(-len);
+      elements.push(start);
+    }
+  },
+  "generate", function() {
     var elements = this.elements;
-    elements.push(-len);
-    elements.push(start);
+    return %StringBuilderConcat(elements, elements.length, this.special_string);
   }
-}
-
-
-ReplaceResultBuilder.prototype.generate = function() {
-  var elements = this.elements;
-  return %StringBuilderConcat(elements, elements.length, this.special_string);
-}
+));
 
 
 // -------------------------------------------------------------------
 
-function SetupString() {
-  // Setup the constructor property on the String prototype object.
+function SetUpString() {
+  %CheckIsBootstrapping();
+  // Set up the constructor property on the String prototype object.
   %SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
 
 
-  // Setup the non-enumerable functions on the String object.
+  // Set up the non-enumerable functions on the String object.
   InstallFunctions($String, DONT_ENUM, $Array(
     "fromCharCode", StringFromCharCode
   ));
 
 
-  // Setup the non-enumerable functions on the String prototype object.
+  // Set up the non-enumerable functions on the String prototype object.
   InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
     "valueOf", StringValueOf,
     "toString", StringToString,
@@ -911,5 +992,4 @@
   ));
 }
 
-
-SetupString();
+SetUpString();
diff --git a/src/strtod.cc b/src/strtod.cc
index cedbff9..c89c8f3 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,10 +26,11 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdarg.h>
-#include <limits.h>
+#include <math.h>
+#include <limits>
 
-#include "v8.h"
-
+#include "globals.h"
+#include "utils.h"
 #include "strtod.h"
 #include "bignum.h"
 #include "cached-powers.h"
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 0c6a7f7..cdb4874 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,8 @@
 
 #include "api.h"
 #include "arguments.h"
+#include "ast.h"
+#include "code-stubs.h"
 #include "gdb-jit.h"
 #include "ic-inl.h"
 #include "stub-cache.h"
@@ -72,7 +74,7 @@
   // the bits are the least significant so they will be the ones
   // masked out.
   ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
-  ASSERT(Code::kFlagsICStateShift == 0);
+  STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
 
   // Make sure that the code type is not included in the hash.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -457,34 +459,6 @@
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
-  // Using NORMAL as the PropertyType for array element loads is a misuse. The
-  // generated stub always accesses fast elements, not slow-mode fields, but
-  // some property type is required for the stub lookup. Note that overloading
-  // the NORMAL PropertyType is only safe as long as no stubs are generated for
-  // other keyed field loads. This is guaranteed to be the case since all field
-  // keyed loads that are not array elements go through a generic builtin stub.
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
-  String* name = heap()->KeyedLoadSpecialized_symbol();
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
-  return code;
-}
-
-
 MaybeObject* StubCache::ComputeStoreField(String* name,
                                           JSObject* receiver,
                                           int field_index,
@@ -513,146 +487,47 @@
 }
 
 
-MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
-    JSObject* receiver,
-    StrictModeFlag strict_mode) {
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
-  String* name = heap()->KeyedStoreSpecialized_symbol();
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
-  return code;
-}
-
-
-namespace {
-
-ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
-  switch (kind) {
-    case JSObject::EXTERNAL_BYTE_ELEMENTS:
-      return kExternalByteArray;
-    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      return kExternalUnsignedByteArray;
-    case JSObject::EXTERNAL_SHORT_ELEMENTS:
-      return kExternalShortArray;
-    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      return kExternalUnsignedShortArray;
-    case JSObject::EXTERNAL_INT_ELEMENTS:
-      return kExternalIntArray;
-    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      return kExternalUnsignedIntArray;
-    case JSObject::EXTERNAL_FLOAT_ELEMENTS:
-      return kExternalFloatArray;
-    case JSObject::EXTERNAL_PIXEL_ELEMENTS:
-      return kExternalPixelArray;
-    default:
-      UNREACHABLE();
-      return static_cast<ExternalArrayType>(0);
-  }
-}
-
-String* ExternalArrayTypeToStubName(Heap* heap,
-                                    ExternalArrayType array_type,
-                                    bool is_store) {
-  if (is_store) {
-    switch (array_type) {
-      case kExternalByteArray:
-        return heap->KeyedStoreExternalByteArray_symbol();
-      case kExternalUnsignedByteArray:
-        return heap->KeyedStoreExternalUnsignedByteArray_symbol();
-      case kExternalShortArray:
-        return heap->KeyedStoreExternalShortArray_symbol();
-      case kExternalUnsignedShortArray:
-        return heap->KeyedStoreExternalUnsignedShortArray_symbol();
-      case kExternalIntArray:
-        return heap->KeyedStoreExternalIntArray_symbol();
-      case kExternalUnsignedIntArray:
-        return heap->KeyedStoreExternalUnsignedIntArray_symbol();
-      case kExternalFloatArray:
-        return heap->KeyedStoreExternalFloatArray_symbol();
-      case kExternalPixelArray:
-        return heap->KeyedStoreExternalPixelArray_symbol();
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  } else {
-    switch (array_type) {
-      case kExternalByteArray:
-        return heap->KeyedLoadExternalByteArray_symbol();
-      case kExternalUnsignedByteArray:
-        return heap->KeyedLoadExternalUnsignedByteArray_symbol();
-      case kExternalShortArray:
-        return heap->KeyedLoadExternalShortArray_symbol();
-      case kExternalUnsignedShortArray:
-        return heap->KeyedLoadExternalUnsignedShortArray_symbol();
-      case kExternalIntArray:
-        return heap->KeyedLoadExternalIntArray_symbol();
-      case kExternalUnsignedIntArray:
-        return heap->KeyedLoadExternalUnsignedIntArray_symbol();
-      case kExternalFloatArray:
-        return heap->KeyedLoadExternalFloatArray_symbol();
-      case kExternalPixelArray:
-        return heap->KeyedLoadExternalPixelArray_symbol();
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-}
-
-}  // anonymous namespace
-
-
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
     JSObject* receiver,
     bool is_store,
     StrictModeFlag strict_mode) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(
-          is_store ? Code::KEYED_EXTERNAL_ARRAY_STORE_IC :
-                     Code::KEYED_EXTERNAL_ARRAY_LOAD_IC,
+          is_store ? Code::KEYED_STORE_IC :
+                     Code::KEYED_LOAD_IC,
           NORMAL,
           strict_mode);
-  ExternalArrayType array_type =
-      ElementsKindToExternalArrayType(receiver->GetElementsKind());
-  String* name = ExternalArrayTypeToStubName(heap(), array_type, is_store);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    ExternalArrayStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          is_store ?
-              compiler.CompileKeyedStoreStub(receiver, array_type, flags) :
-              compiler.CompileKeyedLoadStub(receiver, array_type, flags);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    Code::cast(code)->set_external_array_type(array_type);
-    if (is_store) {
-      PROFILE(isolate_,
-          CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
-                          Code::cast(code), 0));
-    } else {
-      PROFILE(isolate_,
-          CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
-                          Code::cast(code), 0));
-    }
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
+  String* name = is_store
+      ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
+      : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
+  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
+  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+
+  MaybeObject* maybe_new_code = NULL;
+  Map* receiver_map = receiver->map();
+  if (is_store) {
+    KeyedStoreStubCompiler compiler(strict_mode);
+    maybe_new_code = compiler.CompileStoreElement(receiver_map);
+  } else {
+    KeyedLoadStubCompiler compiler;
+    maybe_new_code = compiler.CompileLoadElement(receiver_map);
+  }
+  Code* code;
+  if (!maybe_new_code->To(&code)) return maybe_new_code;
+  if (is_store) {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+                            Code::cast(code), 0));
+  } else {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+                            Code::cast(code), 0));
+  }
+  ASSERT(code->IsCode());
+  Object* result;
+  { MaybeObject* maybe_result =
+        receiver->UpdateMapCodeCache(name, Code::cast(code));
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return code;
 }
@@ -777,7 +652,6 @@
     (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
 
 MaybeObject* StubCache::ComputeCallConstant(int argc,
-                                            InLoopFlag in_loop,
                                             Code::Kind kind,
                                             Code::ExtraICState extra_ic_state,
                                             String* name,
@@ -803,7 +677,6 @@
                                                     CONSTANT_FUNCTION,
                                                     extra_ic_state,
                                                     cache_holder,
-                                                    in_loop,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
@@ -813,8 +686,7 @@
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
     // Compile the stub - only create stubs for fully compiled functions.
-    CallStubCompiler compiler(
-        argc, in_loop, kind, extra_ic_state, cache_holder);
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallConstant(object, holder, function, name, check);
       if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -836,8 +708,8 @@
 
 
 MaybeObject* StubCache::ComputeCallField(int argc,
-                                         InLoopFlag in_loop,
                                          Code::Kind kind,
+                                         Code::ExtraICState extra_ic_state,
                                          String* name,
                                          Object* object,
                                          JSObject* holder,
@@ -856,14 +728,12 @@
 
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     FIELD,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
-                                                    in_loop,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
-    CallStubCompiler compiler(
-        argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallField(JSObject::cast(object),
                                     holder,
@@ -886,11 +756,13 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallInterceptor(int argc,
-                                               Code::Kind kind,
-                                               String* name,
-                                               Object* object,
-                                               JSObject* holder) {
+MaybeObject* StubCache::ComputeCallInterceptor(
+    int argc,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state,
+    String* name,
+    Object* object,
+    JSObject* holder) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
       IC::GetCodeCacheForObject(object, holder);
@@ -905,14 +777,12 @@
 
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     INTERCEPTOR,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
-                                                    NOT_IN_LOOP,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
-    CallStubCompiler compiler(
-        argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
       if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -933,12 +803,12 @@
 
 
 MaybeObject* StubCache::ComputeCallNormal(int argc,
-                                          InLoopFlag in_loop,
                                           Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state,
                                           String* name,
                                           JSObject* receiver) {
   Object* code;
-  { MaybeObject* maybe_code = ComputeCallNormal(argc, in_loop, kind);
+  { MaybeObject* maybe_code = ComputeCallNormal(argc, kind, extra_ic_state);
     if (!maybe_code->ToObject(&code)) return maybe_code;
   }
   return code;
@@ -946,8 +816,8 @@
 
 
 MaybeObject* StubCache::ComputeCallGlobal(int argc,
-                                          InLoopFlag in_loop,
                                           Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state,
                                           String* name,
                                           JSObject* receiver,
                                           GlobalObject* holder,
@@ -958,9 +828,8 @@
   JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     NORMAL,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
-                                                    in_loop,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
@@ -969,8 +838,7 @@
     // internal error which will make sure we do not update any
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
-    CallStubCompiler compiler(
-        argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallGlobal(receiver, holder, cell, function, name);
       if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -992,7 +860,7 @@
 
 static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
   // Use raw_unchecked... so we don't get assert failures during GC.
-  NumberDictionary* dictionary =
+  UnseededNumberDictionary* dictionary =
       isolate->heap()->raw_unchecked_non_monomorphic_cache();
   int entry = dictionary->FindEntry(isolate, flags);
   if (entry != -1) return dictionary->ValueAt(entry);
@@ -1014,7 +882,8 @@
                                                    heap->undefined_value());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  heap->public_set_non_monomorphic_cache(NumberDictionary::cast(result));
+  heap->public_set_non_monomorphic_cache(
+      UnseededNumberDictionary::cast(result));
   return probe;
 }
 
@@ -1039,12 +908,14 @@
 
 
 Code* StubCache::FindCallInitialize(int argc,
-                                    InLoopFlag in_loop,
+                                    RelocInfo::Mode mode,
                                     Code::Kind kind) {
+  Code::ExtraICState extra_state =
+      CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+      CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         in_loop,
                                          UNINITIALIZED,
-                                         Code::kNoExtraICState,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
@@ -1056,12 +927,14 @@
 
 
 MaybeObject* StubCache::ComputeCallInitialize(int argc,
-                                              InLoopFlag in_loop,
+                                              RelocInfo::Mode mode,
                                               Code::Kind kind) {
+  Code::ExtraICState extra_state =
+      CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+      CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         in_loop,
                                          UNINITIALIZED,
-                                         Code::kNoExtraICState,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1074,43 +947,29 @@
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
-  if (in_loop == IN_LOOP) {
-    // Force the creation of the corresponding stub outside loops,
-    // because it may be used when clearing the ICs later - it is
-    // possible for a series of IC transitions to lose the in-loop
-    // information, and the IC clearing code can't generate a stub
-    // that it needs so we need to ensure it is generated already.
-    ComputeCallInitialize(argc, NOT_IN_LOOP);
-  }
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
+                                              RelocInfo::Mode mode) {
   CALL_HEAP_FUNCTION(isolate_,
-                     ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
+                     ComputeCallInitialize(argc, mode, Code::CALL_IC),
+                     Code);
 }
 
 
-Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc,
-                                                   InLoopFlag in_loop) {
-  if (in_loop == IN_LOOP) {
-    // Force the creation of the corresponding stub outside loops,
-    // because it may be used when clearing the ICs later - it is
-    // possible for a series of IC transitions to lose the in-loop
-    // information, and the IC clearing code can't generate a stub
-    // that it needs so we need to ensure it is generated already.
-    ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
-  }
+Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
   CALL_HEAP_FUNCTION(
       isolate_,
-      ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), Code);
+      ComputeCallInitialize(argc, RelocInfo::CODE_TARGET, Code::KEYED_CALL_IC),
+      Code);
 }
 
 
-MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
-                                                  InLoopFlag in_loop,
-                                                  Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallPreMonomorphic(
+    int argc,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state) {
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         in_loop,
                                          PREMONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1124,12 +983,11 @@
 
 
 MaybeObject* StubCache::ComputeCallNormal(int argc,
-                                          InLoopFlag in_loop,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         in_loop,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1142,11 +1000,9 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
-                                               InLoopFlag in_loop,
-                                               Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
+  ASSERT(kind == Code::KEYED_CALL_IC);
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         in_loop,
                                          MEGAMORPHIC,
                                          Code::kNoExtraICState,
                                          NORMAL,
@@ -1157,17 +1013,37 @@
   }
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallArguments(flags));
+}
+
+
+MaybeObject* StubCache::ComputeCallMegamorphic(
+    int argc,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state) {
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MEGAMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
   return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
 }
 
 
-MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallMiss(int argc,
+                                        Code::Kind kind,
+                                        Code::ExtraICState extra_ic_state) {
   // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
   // and monomorphic stubs are not mixed up together in the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          MONOMORPHIC_PROTOTYPE_FAILURE,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc,
                                          OWN_MAP);
@@ -1182,9 +1058,12 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugBreak(
+    int argc,
+    Code::Kind kind) {
+  // Extra IC state is irrelevant for debug break ICs. They jump to
+  // the actual call ic to carry out the work.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          DEBUG_BREAK,
                                          Code::kNoExtraICState,
                                          NORMAL,
@@ -1199,10 +1078,12 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
-                                                      Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
+    int argc,
+    Code::Kind kind) {
+  // Extra IC state is irrelevant for debug break ICs. They jump to
+  // the actual call ic to carry out the work.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          DEBUG_PREPARE_STEP_IN,
                                          Code::kNoExtraICState,
                                          NORMAL,
@@ -1232,7 +1113,7 @@
 }
 
 
-void StubCache::CollectMatchingMaps(ZoneMapList* types,
+void StubCache::CollectMatchingMaps(SmallMapList* types,
                                     String* name,
                                     Code::Flags flags) {
   for (int i = 0; i < kPrimaryTableSize; i++) {
@@ -1461,8 +1342,7 @@
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
   Object* value = args[2];
-  StrictModeFlag strict_mode =
-      static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
+  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
   ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
   ASSERT(recv->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
@@ -1474,8 +1354,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
   JSObject* receiver = JSObject::cast(args[0]);
-  ASSERT(Smi::cast(args[1])->value() >= 0);
-  uint32_t index = Smi::cast(args[1])->value();
+  ASSERT(args.smi_at(1) >= 0);
+  uint32_t index = args.smi_at(1);
   return receiver->GetElementWithInterceptor(receiver, index);
 }
 
@@ -1484,8 +1364,9 @@
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
@@ -1511,8 +1392,9 @@
   // The code of the PreMonomorphic stub is the same as the code
   // of the Initialized stub.  They just differ on the code object flags.
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
@@ -1537,6 +1419,9 @@
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
+    // Call normal is always with a explict receiver.
+    ASSERT(!CallIC::Contextual::decode(
+        Code::ExtractExtraICStateFromFlags(flags)));
     CallIC::GenerateNormal(masm(), argc);
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
@@ -1560,8 +1445,9 @@
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMegamorphic(masm(), argc);
+    CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMegamorphic(masm(), argc);
   }
@@ -1581,12 +1467,33 @@
 }
 
 
+MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
+  HandleScope scope(isolate());
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
+  Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallArguments");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
+  PROFILE(isolate(),
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
+  return result;
+}
+
+
 MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc);
+    CallIC::GenerateMiss(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
@@ -1632,7 +1539,8 @@
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc);
+    // For the debugger extra ic state is irrelevant.
+    CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
@@ -1711,8 +1619,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
+MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
+                                            String* name,
+                                            InlineCacheState state) {
+  Code::Flags flags = Code::ComputeFlags(
+      Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
   MaybeObject* result = GetCodeWithFlags(flags, name);
   if (!result->IsFailure()) {
     PROFILE(isolate(),
@@ -1728,8 +1639,8 @@
 
 
 MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(
-      Code::STORE_IC, type, strict_mode_);
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
   MaybeObject* result = GetCodeWithFlags(flags, name);
   if (!result->IsFailure()) {
     PROFILE(isolate(),
@@ -1744,9 +1655,11 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(
-      Code::KEYED_STORE_IC, type, strict_mode_);
+MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
+                                             String* name,
+                                             InlineCacheState state) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
   MaybeObject* result = GetCodeWithFlags(flags, name);
   if (!result->IsFailure()) {
     PROFILE(isolate(),
@@ -1761,13 +1674,17 @@
 }
 
 
+void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateSlow(masm);
+}
+
+
 CallStubCompiler::CallStubCompiler(int argc,
-                                   InLoopFlag in_loop,
                                    Code::Kind kind,
                                    Code::ExtraICState extra_ic_state,
                                    InlineCacheHolderFlag cache_holder)
     : arguments_(argc),
-      in_loop_(in_loop),
       kind_(kind),
       extra_ic_state_(extra_ic_state),
       cache_holder_(cache_holder) {
@@ -1828,7 +1745,6 @@
                                                     type,
                                                     extra_ic_state_,
                                                     cache_holder_,
-                                                    in_loop_,
                                                     argc);
   return GetCodeWithFlags(flags, name);
 }
@@ -1924,17 +1840,4 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
-  return result;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
index c5dcf36..18c157b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,8 +28,10 @@
 #ifndef V8_STUB_CACHE_H_
 #define V8_STUB_CACHE_H_
 
+#include "allocation.h"
 #include "arguments.h"
 #include "macro-assembler.h"
+#include "objects.h"
 #include "zone-inl.h"
 
 namespace v8 {
@@ -44,8 +46,10 @@
 // invalidate the cache whenever a prototype map is changed.  The stub
 // validates the map chain as in the mono-morphic case.
 
+class SmallMapList;
 class StubCache;
 
+
 class SCTableReference {
  public:
   Address address() const { return address_; }
@@ -143,9 +147,6 @@
       String* name,
       JSFunction* receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadSpecialized(
-      JSObject* receiver);
-
   // ---
 
   MUST_USE_RESULT MaybeObject* ComputeStoreField(
@@ -184,29 +185,24 @@
       Map* transition,
       StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedStoreSpecialized(
-      JSObject* receiver,
-      StrictModeFlag strict_mode);
-
-
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
       JSObject* receiver,
       bool is_store,
       StrictModeFlag strict_mode);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallField(int argc,
-                                                InLoopFlag in_loop,
-                                                Code::Kind,
-                                                String* name,
-                                                Object* object,
-                                                JSObject* holder,
-                                                int index);
+  MUST_USE_RESULT MaybeObject* ComputeCallField(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder,
+      int index);
 
   MUST_USE_RESULT MaybeObject* ComputeCallConstant(
       int argc,
-      InLoopFlag in_loop,
       Code::Kind,
       Code::ExtraICState extra_ic_state,
       String* name,
@@ -214,22 +210,25 @@
       JSObject* holder,
       JSFunction* function);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
-                                                 InLoopFlag in_loop,
-                                                 Code::Kind,
-                                                 String* name,
-                                                 JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* ComputeCallNormal(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      JSObject* receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(int argc,
-                                                      Code::Kind,
-                                                      String* name,
-                                                      Object* object,
-                                                      JSObject* holder);
+  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder);
 
   MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
       int argc,
-      InLoopFlag in_loop,
       Code::Kind,
+      Code::ExtraICState extra_ic_state,
       String* name,
       JSObject* receiver,
       GlobalObject* holder,
@@ -239,31 +238,37 @@
   // ---
 
   MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
-                                                     InLoopFlag in_loop,
+                                                     RelocInfo::Mode mode,
                                                      Code::Kind kind);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  Handle<Code> ComputeCallInitialize(int argc,
+                                     RelocInfo::Mode mode);
 
-  Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+  Handle<Code> ComputeKeyedCallInitialize(int argc);
 
   MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
       int argc,
-      InLoopFlag in_loop,
-      Code::Kind kind);
+      Code::Kind kind,
+      Code::ExtraICState extra_ic_state);
 
   MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
-                                                 InLoopFlag in_loop,
-                                                 Code::Kind kind);
+                                                 Code::Kind kind,
+                                                 Code::ExtraICState state);
+
+  MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
+                                                    Code::Kind kind);
 
   MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
-                                                      InLoopFlag in_loop,
-                                                      Code::Kind kind);
+                                                      Code::Kind kind,
+                                                      Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc, Code::Kind kind);
+  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState state);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
   MUST_USE_RESULT Code* FindCallInitialize(int argc,
-                                           InLoopFlag in_loop,
+                                           RelocInfo::Mode mode,
                                            Code::Kind kind);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -280,7 +285,7 @@
   void Clear();
 
   // Collect all maps that match the name and flags.
-  void CollectMatchingMaps(ZoneMapList* types,
+  void CollectMatchingMaps(SmallMapList* types,
                            String* name,
                            Code::Flags flags);
 
@@ -336,12 +341,12 @@
   Entry secondary_[kSecondaryTableSize];
 
   // Computes the hashed offsets for primary and secondary caches.
-  RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+  static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
     // This works well because the heap object tag size and the hash
     // shift are equal.  Shifting down the length field to get the
     // hash code would effectively throw away two bits of the hash
     // code.
-    ASSERT(kHeapObjectTagSize == String::kHashShift);
+    STATIC_ASSERT(kHeapObjectTagSize == String::kHashShift);
     // Compute the hash of the name (use entire hash field).
     ASSERT(name->HasHashCode());
     uint32_t field = name->hash_field();
@@ -359,15 +364,11 @@
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
-  RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+  static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
     // Use the seed from the primary cache in the secondary cache.
     uint32_t string_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
-    // We always set the in_loop bit to zero when generating the lookup code
-    // so do it here too so the hash codes match.
-    uint32_t iflags =
-        (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask);
-    uint32_t key = seed - string_low32bits + iflags;
+    uint32_t key = seed - string_low32bits + flags;
     return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
   }
 
@@ -376,7 +377,7 @@
   // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
   // of sizeof(Entry).  This makes it easier to avoid making mistakes
   // in the hashed offset computations.
-  RLYSTC Entry* entry(Entry* table, int offset) {
+  static Entry* entry(Entry* table, int offset) {
     const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
     return reinterpret_cast<Entry*>(
         reinterpret_cast<Address>(table) + (offset << shift_amount));
@@ -415,6 +416,7 @@
   MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
   MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
   MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
   MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
@@ -468,7 +470,10 @@
                                  Register scratch,
                                  Label* miss_label);
 
-  static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+  static void GenerateLoadMiss(MacroAssembler* masm,
+                               Code::Kind kind);
+
+  static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
 
   // Generates code that verifies that the property holder has not changed
   // (checking maps of objects in the prototype chain for fast and global
@@ -633,10 +638,25 @@
   MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
   MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
+
+  MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
+
+  static void GenerateLoadExternalArray(MacroAssembler* masm,
+                                        ElementsKind elements_kind);
+
+  static void GenerateLoadFastElement(MacroAssembler* masm);
+
+  static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
+
+  static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
+                       InlineCacheState state = MONOMORPHIC);
 };
 
 
@@ -677,10 +697,27 @@
                                                  Map* transition,
                                                  String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
+
+  MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
+
+  static void GenerateStoreFastElement(MacroAssembler* masm,
+                                       bool is_js_array);
+
+  static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
+                                             bool is_js_array);
+
+  static void GenerateStoreExternalArray(MacroAssembler* masm,
+                                         ElementsKind elements_kind);
+
+  static void GenerateStoreDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
+                       InlineCacheState state = MONOMORPHIC);
 
   StrictModeFlag strict_mode_;
 };
@@ -703,28 +740,34 @@
 class CallStubCompiler: public StubCompiler {
  public:
   CallStubCompiler(int argc,
-                   InLoopFlag in_loop,
                    Code::Kind kind,
                    Code::ExtraICState extra_ic_state,
                    InlineCacheHolderFlag cache_holder);
 
-  MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
-                                                JSObject* holder,
-                                                int index,
-                                                String* name);
-  MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
-                                                   CheckType check);
-  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
-  MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
+  MUST_USE_RESULT MaybeObject* CompileCallField(
+      JSObject* object,
+      JSObject* holder,
+      int index,
+      String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileCallConstant(
+      Object* object,
+      JSObject* holder,
+      JSFunction* function,
+      String* name,
+      CheckType check);
+
+  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
+      JSObject* object,
+      JSObject* holder,
+      String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileCallGlobal(
+      JSObject* object,
+      GlobalObject* holder,
+      JSGlobalPropertyCell* cell,
+      JSFunction* function,
+      String* name);
 
   static bool HasCustomCallGenerator(JSFunction* function);
 
@@ -756,7 +799,6 @@
       String* name);
 
   const ParameterCount arguments_;
-  const InLoopFlag in_loop_;
   const Code::Kind kind_;
   const Code::ExtraICState extra_ic_state_;
   const InlineCacheHolderFlag cache_holder_;
@@ -847,19 +889,6 @@
   CallHandlerInfo* api_call_info_;
 };
 
-class ExternalArrayStubCompiler: public StubCompiler {
- public:
-  explicit ExternalArrayStubCompiler() {}
-
-  MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
-      JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
-
-  MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
-      JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
-
- private:
-  MaybeObject* GetCode(Code::Flags flags);
-};
 
 } }  // namespace v8::internal
 
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
index a94dc58..7a3ee2f 100644
--- a/src/third_party/valgrind/valgrind.h
+++ b/src/third_party/valgrind/valgrind.h
@@ -12,7 +12,7 @@
    This file is part of Valgrind, a dynamic binary instrumentation
    framework.
 
-   Copyright (C) 2000-2007 Julian Seward.  All rights reserved.
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
 
    Redistribution and use in source and binary forms, with or without
    modification, are permitted provided that the following conditions
@@ -73,6 +73,25 @@
 #ifndef __VALGRIND_H
 #define __VALGRIND_H
 
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND                                         */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+   conditionally compile based on our version number.  Note that these
+   were introduced at version 3.6 and so do not exist in version 3.5
+   or earlier.  The recommended way to use them to check for "version
+   X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__)   \
+    && (__VALGRIND_MAJOR__ > 3                                   \
+        || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__    3
+#define __VALGRIND_MINOR__    6
+
+
 #include <stdarg.h>
 #include <stdint.h>
 
@@ -85,34 +104,44 @@
    identifying architectures, which are different to the ones we use
    within the rest of Valgrind.  Note, __powerpc__ is active for both
    32 and 64-bit PPC, whereas __powerpc64__ is only active for the
-   latter (on Linux, that is). */
+   latter (on Linux, that is).
+
+   Misc note: how to find out what's predefined in gcc by default:
+   gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
 #undef PLAT_x86_linux
 #undef PLAT_amd64_linux
 #undef PLAT_ppc32_linux
 #undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
 
-#if !defined(_AIX) && defined(__i386__)
+
+#if defined(__APPLE__) && defined(__i386__)
+#  define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#  define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
+      || (defined(_WIN32) && defined(_M_IX86))
+#  define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
 #  define PLAT_x86_linux 1
-#elif !defined(_AIX) && defined(__x86_64__)
+#elif defined(__linux__) && defined(__x86_64__)
 #  define PLAT_amd64_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
 #  define PLAT_ppc32_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
 #  define PLAT_ppc64_linux 1
-#elif defined(_AIX) && defined(__64BIT__)
-#  define PLAT_ppc64_aix5 1
-#elif defined(_AIX) && !defined(__64BIT__)
-#  define PLAT_ppc32_aix5 1
-#endif
-
-
+#elif defined(__linux__) && defined(__arm__)
+#  define PLAT_arm_linux 1
+#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
+#  define PLAT_s390x_linux 1
+#else
 /* If we're not compiling for our target platform, don't generate
    any inline asms.  */
-#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
-    && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
-    && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
 #  if !defined(NVALGRIND)
 #    define NVALGRIND 1
 #  endif
@@ -124,17 +153,31 @@
 /* in here of use to end-users -- skip to the next section.           */
 /* ------------------------------------------------------------------ */
 
+/*
+ * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
+ * request. Accepts both pointers and integers as arguments.
+ *
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result. Accepts
+ * both pointers and integers as arguments.
+ */
+
+#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default,            \
+                                   _zzq_request, _zzq_arg1, _zzq_arg2,  \
+                                   _zzq_arg3, _zzq_arg4, _zzq_arg5)     \
+  { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default),      \
+                        (_zzq_request), (_zzq_arg1), (_zzq_arg2),       \
+                        (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); }
+
 #if defined(NVALGRIND)
 
 /* Define NVALGRIND to completely remove the Valgrind magic sequence
    from the compiled code (analogous to NDEBUG's effects on
    assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
-   {                                                              \
-      (_zzq_rlval) = (_zzq_default);                              \
-   }
+      (_zzq_default)
 
 #else  /* ! NVALGRIND */
 
@@ -173,9 +216,10 @@
    inline asm stuff to be useful.
 */
 
-/* ------------------------- x86-linux ------------------------- */
+/* ------------------------- x86-{linux,darwin} ---------------- */
 
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)  \
+    ||  (defined(PLAT_x86_win32) && defined(__GNUC__))
 
 typedef
    struct { 
@@ -187,10 +231,11 @@
                      "roll $3,  %%edi ; roll $13, %%edi\n\t"      \
                      "roll $29, %%edi ; roll $19, %%edi\n\t"
 
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
-  { volatile unsigned int _zzq_args[6];                           \
+  __extension__                                                   \
+  ({volatile unsigned int _zzq_args[6];                           \
     volatile unsigned int _zzq_result;                            \
     _zzq_args[0] = (unsigned int)(_zzq_request);                  \
     _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
@@ -205,8 +250,8 @@
                      : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
                      : "cc", "memory"                             \
                     );                                            \
-    _zzq_rlval = _zzq_result;                                     \
-  }
+    _zzq_result;                                                  \
+  })
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
@@ -225,11 +270,77 @@
                      __SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* call-noredir *%EAX */                     \
                      "xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
 
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------- x86-Win32 ------------------------- */
 
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     __asm rol edi, 3  __asm rol edi, 13          \
+                     __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+    valgrind_do_client_request_expr((uintptr_t)(_zzq_default),    \
+        (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1),        \
+        (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3),           \
+        (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
+
+static __inline uintptr_t
+valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
+                                uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
+                                uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
+                                uintptr_t _zzq_arg5)
+{
+    volatile uintptr_t _zzq_args[6];
+    volatile unsigned int _zzq_result;
+    _zzq_args[0] = (uintptr_t)(_zzq_request);
+    _zzq_args[1] = (uintptr_t)(_zzq_arg1);
+    _zzq_args[2] = (uintptr_t)(_zzq_arg2);
+    _zzq_args[3] = (uintptr_t)(_zzq_arg3);
+    _zzq_args[4] = (uintptr_t)(_zzq_arg4);
+    _zzq_args[5] = (uintptr_t)(_zzq_arg5);
+    __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
+            __SPECIAL_INSTRUCTION_PREAMBLE
+            /* %EDX = client_request ( %EAX ) */
+            __asm xchg ebx,ebx
+            __asm mov _zzq_result, edx
+    }
+    return _zzq_result;
+}
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm { __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EAX = guest_NRADDR */                             \
+            __asm xchg ecx,ecx                                    \
+            __asm mov __addr, eax                                 \
+    }                                                             \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
 
 typedef
    struct { 
@@ -241,10 +352,11 @@
                      "rolq $3,  %%rdi ; rolq $13, %%rdi\n\t"      \
                      "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
 
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
-  { volatile uint64_t _zzq_args[6];                 \
+    __extension__                                                 \
+    ({ volatile uint64_t _zzq_args[6];              \
     volatile uint64_t _zzq_result;                  \
     _zzq_args[0] = (uint64_t)(_zzq_request);        \
     _zzq_args[1] = (uint64_t)(_zzq_arg1);           \
@@ -259,8 +371,8 @@
                      : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
                      : "cc", "memory"                             \
                     );                                            \
-    _zzq_rlval = _zzq_result;                                     \
-  }
+    _zzq_result;                                                  \
+    })
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
@@ -279,7 +391,7 @@
                      __SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* call-noredir *%RAX */                     \
                      "xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
 
 /* ------------------------ ppc32-linux ------------------------ */
 
@@ -295,11 +407,12 @@
                      "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
                      "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
 
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
                                                                   \
-  {          unsigned int  _zzq_args[6];                          \
+    __extension__                                                 \
+  ({         unsigned int  _zzq_args[6];                          \
              unsigned int  _zzq_result;                           \
              unsigned int* _zzq_ptr;                              \
     _zzq_args[0] = (unsigned int)(_zzq_request);                  \
@@ -318,8 +431,8 @@
                      : "=b" (_zzq_result)                         \
                      : "b" (_zzq_default), "b" (_zzq_ptr)         \
                      : "cc", "memory", "r3", "r4");               \
-    _zzq_rlval = _zzq_result;                                     \
-  }
+    _zzq_result;                                                  \
+    })
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
@@ -356,11 +469,12 @@
                      "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
                      "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
 
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
                                                                   \
-  {          uint64_t  _zzq_args[6];                \
+  __extension__                                                   \
+  ({         uint64_t  _zzq_args[6];                \
     register uint64_t  _zzq_result __asm__("r3");   \
     register uint64_t* _zzq_ptr __asm__("r4");      \
     _zzq_args[0] = (uint64_t)(_zzq_request);        \
@@ -376,8 +490,8 @@
                      : "=r" (_zzq_result)                         \
                      : "0" (_zzq_default), "r" (_zzq_ptr)         \
                      : "cc", "memory");                           \
-    _zzq_rlval = _zzq_result;                                     \
-  }
+    _zzq_result;                                                  \
+  })
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
@@ -407,149 +521,135 @@
 
 #endif /* PLAT_ppc64_linux */
 
-/* ------------------------ ppc32-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
 
-#if defined(PLAT_ppc32_aix5)
+#if defined(PLAT_arm_linux)
 
 typedef
    struct { 
       unsigned int nraddr; /* where's the code? */
-      unsigned int r2;  /* what tocptr do we need? */
    }
    OrigFn;
 
 #define __SPECIAL_INSTRUCTION_PREAMBLE                            \
-                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
-                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+            "mov r12, r12, ror #3  ; mov r12, r12, ror #13 \n\t"  \
+            "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
 
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                          \
+        _zzq_default, _zzq_request,                               \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
                                                                   \
-  {          unsigned int  _zzq_args[7];                          \
-    register unsigned int  _zzq_result;                           \
-    register unsigned int* _zzq_ptr;                              \
+  __extension__                                                   \
+  ({volatile unsigned int  _zzq_args[6];                          \
+    volatile unsigned int  _zzq_result;                           \
     _zzq_args[0] = (unsigned int)(_zzq_request);                  \
     _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
     _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
     _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
     _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
     _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
-    _zzq_args[6] = (unsigned int)(_zzq_default);                  \
-    _zzq_ptr = _zzq_args;                                         \
-    __asm__ volatile("mr 4,%1\n\t"                                \
-                     "lwz 3, 24(4)\n\t"                           \
+    __asm__ volatile("mov r3, %1\n\t" /*default*/                 \
+                     "mov r4, %2\n\t" /*ptr*/                     \
                      __SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = client_request ( %R4 ) */           \
-                     "or 1,1,1\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (_zzq_result)                         \
-                     : "b" (_zzq_ptr)                             \
-                     : "r3", "r4", "cc", "memory");               \
-    _zzq_rlval = _zzq_result;                                     \
-  }
+                     /* R3 = client_request ( R4 ) */             \
+                     "orr r10, r10, r10\n\t"                      \
+                     "mov %0, r3"     /*result*/                  \
+                     : "=r" (_zzq_result)                         \
+                     : "r" (_zzq_default), "r" (&_zzq_args[0])    \
+                     : "cc","memory", "r3", "r4");                \
+    _zzq_result;                                                  \
+  })
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
-    register unsigned int __addr;                                 \
+    unsigned int __addr;                                          \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = guest_NRADDR */                     \
-                     "or 2,2,2\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (__addr)                              \
+                     /* R3 = guest_NRADDR */                      \
+                     "orr r11, r11, r11\n\t"                      \
+                     "mov %0, r3"                                 \
+                     : "=r" (__addr)                              \
                      :                                            \
-                     : "r3", "cc", "memory"                       \
+                     : "cc", "memory", "r3"                       \
                     );                                            \
     _zzq_orig->nraddr = __addr;                                   \
-    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = guest_NRADDR_GPR2 */                \
-                     "or 4,4,4\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (__addr)                              \
-                     :                                            \
-                     : "r3", "cc", "memory"                       \
-                    );                                            \
-    _zzq_orig->r2 = __addr;                                       \
   }
 
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                    \
                      __SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* branch-and-link-to-noredir *%R11 */       \
-                     "or 3,3,3\n\t"
+                     /* branch-and-link-to-noredir *%R4 */        \
+                     "orr r12, r12, r12\n\t"
 
-#endif /* PLAT_ppc32_aix5 */
+#endif /* PLAT_arm_linux */
 
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------ s390x-linux ------------------------ */
 
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_s390x_linux)
 
 typedef
-   struct { 
-      uint64_t nraddr; /* where's the code? */
-      uint64_t r2;  /* what tocptr do we need? */
-   }
-   OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
-                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
-                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST(                               \
-        _zzq_rlval, _zzq_default, _zzq_request,                   \
-        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
-                                                                  \
-  {          uint64_t  _zzq_args[7];                \
-    register uint64_t  _zzq_result;                 \
-    register uint64_t* _zzq_ptr;                    \
-    _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
-    _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
-    _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
-    _zzq_args[3] = (unsigned int long long)(_zzq_arg3);           \
-    _zzq_args[4] = (unsigned int long long)(_zzq_arg4);           \
-    _zzq_args[5] = (unsigned int long long)(_zzq_arg5);           \
-    _zzq_args[6] = (unsigned int long long)(_zzq_default);        \
-    _zzq_ptr = _zzq_args;                                         \
-    __asm__ volatile("mr 4,%1\n\t"                                \
-                     "ld 3, 48(4)\n\t"                            \
-                     __SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = client_request ( %R4 ) */           \
-                     "or 1,1,1\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (_zzq_result)                         \
-                     : "b" (_zzq_ptr)                             \
-                     : "r3", "r4", "cc", "memory");               \
-    _zzq_rlval = _zzq_result;                                     \
+  struct {
+     uint64_t nraddr; /* where's the code? */
   }
+  OrigFn;
 
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
-  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
-    register uint64_t __addr;                       \
-    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = guest_NRADDR */                     \
-                     "or 2,2,2\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (__addr)                              \
-                     :                                            \
-                     : "r3", "cc", "memory"                       \
-                    );                                            \
-    _zzq_orig->nraddr = __addr;                                   \
-    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* %R3 = guest_NRADDR_GPR2 */                \
-                     "or 4,4,4\n\t"                               \
-                     "mr %0,3"                                    \
-                     : "=b" (__addr)                              \
-                     :                                            \
-                     : "r3", "cc", "memory"                       \
-                    );                                            \
-    _zzq_orig->r2 = __addr;                                       \
-  }
+/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
+ * code. This detection is implemented in platform specific toIR.c
+ * (e.g. VEX/priv/guest_s390_decoder.c).
+ */
+#define __SPECIAL_INSTRUCTION_PREAMBLE                           \
+                     "lr 15,15\n\t"                              \
+                     "lr 1,1\n\t"                                \
+                     "lr 2,2\n\t"                                \
+                     "lr 3,3\n\t"
 
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
-                     __SPECIAL_INSTRUCTION_PREAMBLE               \
-                     /* branch-and-link-to-noredir *%R11 */       \
-                     "or 3,3,3\n\t"
+#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
+#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
+#define __CALL_NO_REDIR_CODE  "lr 4,4\n\t"
 
-#endif /* PLAT_ppc64_aix5 */
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                         \
+       _zzq_default, _zzq_request,                               \
+       _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  __extension__                                                  \
+ ({volatile uint64_t _zzq_args[6];                 \
+   volatile uint64_t _zzq_result;                  \
+   _zzq_args[0] = (uint64_t)(_zzq_request);        \
+   _zzq_args[1] = (uint64_t)(_zzq_arg1);           \
+   _zzq_args[2] = (uint64_t)(_zzq_arg2);           \
+   _zzq_args[3] = (uint64_t)(_zzq_arg3);           \
+   _zzq_args[4] = (uint64_t)(_zzq_arg4);           \
+   _zzq_args[5] = (uint64_t)(_zzq_arg5);           \
+   __asm__ volatile(/* r2 = args */                              \
+                    "lgr 2,%1\n\t"                               \
+                    /* r3 = default */                           \
+                    "lgr 3,%2\n\t"                               \
+                    __SPECIAL_INSTRUCTION_PREAMBLE               \
+                    __CLIENT_REQUEST_CODE                        \
+                    /* results = r3 */                           \
+                    "lgr %0, 3\n\t"                              \
+                    : "=d" (_zzq_result)                         \
+                    : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                    : "cc", "2", "3", "memory"                   \
+                   );                                            \
+   _zzq_result;                                                  \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                      \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+   volatile uint64_t __addr;                       \
+   __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                    __GET_NR_CONTEXT_CODE                        \
+                    "lgr %0, 3\n\t"                              \
+                    : "=a" (__addr)                              \
+                    :                                            \
+                    : "cc", "3", "memory"                        \
+                   );                                            \
+   _zzq_orig->nraddr = __addr;                                   \
+ }
+
+#define VALGRIND_CALL_NOREDIR_R1                                 \
+                    __SPECIAL_INSTRUCTION_PREAMBLE               \
+                    __CALL_NO_REDIR_CODE
+
+#endif /* PLAT_s390x_linux */
 
 /* Insert assembly code for other platforms here... */
 
@@ -582,11 +682,15 @@
 /* Use these to write the name of your wrapper.  NOTE: duplicates
    VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
 
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+   args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
 #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname)                    \
-   _vgwZU_##soname##_##fnname
+   VG_CONCAT4(_vgwZU_,soname,_,fnname)
 
 #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname)                    \
-   _vgwZZ_##soname##_##fnname
+   VG_CONCAT4(_vgwZZ_,soname,_,fnname)
 
 /* Use this macro from within a wrapper function to collect the
    context (address and possibly other info) of the original function.
@@ -613,9 +717,25 @@
    do { volatile unsigned long _junk;                             \
         CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
 
-/* ------------------------- x86-linux ------------------------- */
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4)                \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
 
-#if defined(PLAT_x86_linux)
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5)             \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6)        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7)   \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)
 
 /* These regs are trashed by the hidden call.  No need to mention eax
    as gcc can already see that, plus causes gcc to bomb. */
@@ -648,10 +768,11 @@
       _argvec[0] = (unsigned long)_orig.nraddr;                   \
       _argvec[1] = (unsigned long)(arg1);                         \
       __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $4, %%esp\n"                                       \
+         "addl $16, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -668,11 +789,12 @@
       _argvec[1] = (unsigned long)(arg1);                         \
       _argvec[2] = (unsigned long)(arg2);                         \
       __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
          "pushl 8(%%eax)\n\t"                                     \
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $8, %%esp\n"                                       \
+         "addl $16, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -690,12 +812,13 @@
       _argvec[2] = (unsigned long)(arg2);                         \
       _argvec[3] = (unsigned long)(arg3);                         \
       __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
          "pushl 12(%%eax)\n\t"                                    \
          "pushl 8(%%eax)\n\t"                                     \
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $12, %%esp\n"                                      \
+         "addl $16, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -740,6 +863,7 @@
       _argvec[4] = (unsigned long)(arg4);                         \
       _argvec[5] = (unsigned long)(arg5);                         \
       __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
          "pushl 20(%%eax)\n\t"                                    \
          "pushl 16(%%eax)\n\t"                                    \
          "pushl 12(%%eax)\n\t"                                    \
@@ -747,7 +871,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $20, %%esp\n"                                      \
+         "addl $32, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -768,6 +892,7 @@
       _argvec[5] = (unsigned long)(arg5);                         \
       _argvec[6] = (unsigned long)(arg6);                         \
       __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
          "pushl 24(%%eax)\n\t"                                    \
          "pushl 20(%%eax)\n\t"                                    \
          "pushl 16(%%eax)\n\t"                                    \
@@ -776,7 +901,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $24, %%esp\n"                                      \
+         "addl $32, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -799,6 +924,7 @@
       _argvec[6] = (unsigned long)(arg6);                         \
       _argvec[7] = (unsigned long)(arg7);                         \
       __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
          "pushl 28(%%eax)\n\t"                                    \
          "pushl 24(%%eax)\n\t"                                    \
          "pushl 20(%%eax)\n\t"                                    \
@@ -808,7 +934,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $28, %%esp\n"                                      \
+         "addl $32, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -867,6 +993,7 @@
       _argvec[8] = (unsigned long)(arg8);                         \
       _argvec[9] = (unsigned long)(arg9);                         \
       __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
          "pushl 36(%%eax)\n\t"                                    \
          "pushl 32(%%eax)\n\t"                                    \
          "pushl 28(%%eax)\n\t"                                    \
@@ -878,7 +1005,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $36, %%esp\n"                                      \
+         "addl $48, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -904,6 +1031,7 @@
       _argvec[9] = (unsigned long)(arg9);                         \
       _argvec[10] = (unsigned long)(arg10);                       \
       __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
          "pushl 40(%%eax)\n\t"                                    \
          "pushl 36(%%eax)\n\t"                                    \
          "pushl 32(%%eax)\n\t"                                    \
@@ -916,7 +1044,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $40, %%esp\n"                                      \
+         "addl $48, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -944,6 +1072,7 @@
       _argvec[10] = (unsigned long)(arg10);                       \
       _argvec[11] = (unsigned long)(arg11);                       \
       __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
          "pushl 44(%%eax)\n\t"                                    \
          "pushl 40(%%eax)\n\t"                                    \
          "pushl 36(%%eax)\n\t"                                    \
@@ -957,7 +1086,7 @@
          "pushl 4(%%eax)\n\t"                                     \
          "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
          VALGRIND_CALL_NOREDIR_EAX                                \
-         "addl $44, %%esp\n"                                      \
+         "addl $48, %%esp\n"                                      \
          : /*out*/   "=a" (_res)                                  \
          : /*in*/    "a" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
@@ -1008,11 +1137,11 @@
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
 
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
 
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------ amd64-{linux,darwin} --------------- */
 
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
 
 /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
 
@@ -1020,6 +1149,78 @@
 #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi",       \
                             "rdi", "r8", "r9", "r10", "r11"
 
+/* This is all pretty complex.  It's so as to make stack unwinding
+   work reliably.  See bug 243270.  The basic problem is the sub and
+   add of 128 of %rsp in all of the following macros.  If gcc believes
+   the CFA is in %rsp, then unwinding may fail, because what's at the
+   CFA is not what gcc "expected" when it constructs the CFIs for the
+   places where the macros are instantiated.
+
+   But we can't just add a CFI annotation to increase the CFA offset
+   by 128, to match the sub of 128 from %rsp, because we don't know
+   whether gcc has chosen %rsp as the CFA at that point, or whether it
+   has chosen some other register (eg, %rbp).  In the latter case,
+   adding a CFI annotation to change the CFA offset is simply wrong.
+
+   So the solution is to get hold of the CFA using
+   __builtin_dwarf_cfa(), put it in a known register, and add a
+   CFI annotation to say what the register is.  We choose %rbp for
+   this (perhaps perversely), because:
+
+   (1) %rbp is already subject to unwinding.  If a new register was
+       chosen then the unwinder would have to unwind it in all stack
+       traces, which is expensive, and
+
+   (2) %rbp is already subject to precise exception updates in the
+       JIT.  If a new register was chosen, we'd have to have precise
+       exceptions for it too, which reduces performance of the
+       generated code.
+
+   However .. one extra complication.  We can't just whack the result
+   of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+   list of trashed registers at the end of the inline assembly
+   fragments; gcc won't allow %rbp to appear in that list.  Hence
+   instead we need to stash %rbp in %r15 for the duration of the asm,
+   and say that %r15 is trashed instead.  gcc seems happy to go with
+   that.
+
+   Oh .. and this all needs to be conditionalised so that it is
+   unchanged from before this commit, when compiled with older gccs
+   that don't support __builtin_dwarf_cfa.  Furthermore, since
+   this header file is freestanding, it has to be independent of
+   config.h, and so the following conditionalisation cannot depend on
+   configure time checks.
+
+   Although it's not clear from
+   'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+   this expression excludes Darwin.
+   .cfi directives in Darwin assembly appear to be completely
+   different and I haven't investigated how they work.
+
+   For even more entertainment value, note we have to use the
+   completely undocumented __builtin_dwarf_cfa(), which appears to
+   really compute the CFA, whereas __builtin_frame_address(0) claims
+   to but actually doesn't.  See
+   https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#  define __FRAME_POINTER                                         \
+      ,"r"(__builtin_dwarf_cfa())
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      "movq %%rbp, %%r15\n\t"                                     \
+      "movq %2, %%rbp\n\t"                                        \
+      ".cfi_remember_state\n\t"                                   \
+      ".cfi_def_cfa rbp, 0\n\t"
+#  define VALGRIND_CFI_EPILOGUE                                   \
+      "movq %%r15, %%rbp\n\t"                                     \
+      ".cfi_restore_state\n\t"
+#else
+#  define __FRAME_POINTER
+#  define VALGRIND_CFI_PROLOGUE
+#  define VALGRIND_CFI_EPILOGUE
+#endif
+
+
 /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
    long) == 8. */
 
@@ -1039,7 +1240,7 @@
    redzone, for the duration of the hidden call, to make it safe.
 
    Probably the same problem afflicts the other redzone-style ABIs too
-   (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+   (ppc64-linux); but for those, the stack is
    self describing (none of this CFI nonsense) so at least messing
    with the stack pointer doesn't give a danger of non-unwindable
    stack. */
@@ -1051,13 +1252,15 @@
       volatile unsigned long _res;                                \
       _argvec[0] = (unsigned long)_orig.nraddr;                   \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1070,14 +1273,16 @@
       _argvec[0] = (unsigned long)_orig.nraddr;                   \
       _argvec[1] = (unsigned long)(arg1);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 8(%%rax), %%rdi\n\t"                               \
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1091,15 +1296,17 @@
       _argvec[1] = (unsigned long)(arg1);                         \
       _argvec[2] = (unsigned long)(arg2);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 16(%%rax), %%rsi\n\t"                              \
          "movq 8(%%rax), %%rdi\n\t"                               \
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1114,6 +1321,7 @@
       _argvec[2] = (unsigned long)(arg2);                         \
       _argvec[3] = (unsigned long)(arg3);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 24(%%rax), %%rdx\n\t"                              \
          "movq 16(%%rax), %%rsi\n\t"                              \
@@ -1121,9 +1329,10 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1139,6 +1348,7 @@
       _argvec[3] = (unsigned long)(arg3);                         \
       _argvec[4] = (unsigned long)(arg4);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 32(%%rax), %%rcx\n\t"                              \
          "movq 24(%%rax), %%rdx\n\t"                              \
@@ -1147,9 +1357,10 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1166,6 +1377,7 @@
       _argvec[4] = (unsigned long)(arg4);                         \
       _argvec[5] = (unsigned long)(arg5);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 40(%%rax), %%r8\n\t"                               \
          "movq 32(%%rax), %%rcx\n\t"                              \
@@ -1175,9 +1387,10 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1195,6 +1408,7 @@
       _argvec[5] = (unsigned long)(arg5);                         \
       _argvec[6] = (unsigned long)(arg6);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "movq 48(%%rax), %%r9\n\t"                               \
          "movq 40(%%rax), %%r8\n\t"                               \
@@ -1203,11 +1417,12 @@
          "movq 16(%%rax), %%rsi\n\t"                              \
          "movq 8(%%rax), %%rdi\n\t"                               \
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
-         "addq $128,%%rsp\n\t"                                    \
          VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1227,7 +1442,8 @@
       _argvec[6] = (unsigned long)(arg6);                         \
       _argvec[7] = (unsigned long)(arg7);                         \
       __asm__ volatile(                                           \
-         "subq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
          "pushq 56(%%rax)\n\t"                                    \
          "movq 48(%%rax), %%r9\n\t"                               \
          "movq 40(%%rax), %%r8\n\t"                               \
@@ -1238,10 +1454,11 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $8, %%rsp\n"                                       \
-         "addq $128,%%rsp\n\t"                                    \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1262,6 +1479,7 @@
       _argvec[7] = (unsigned long)(arg7);                         \
       _argvec[8] = (unsigned long)(arg8);                         \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "pushq 64(%%rax)\n\t"                                    \
          "pushq 56(%%rax)\n\t"                                    \
@@ -1275,9 +1493,10 @@
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $16, %%rsp\n"                                      \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1299,7 +1518,8 @@
       _argvec[8] = (unsigned long)(arg8);                         \
       _argvec[9] = (unsigned long)(arg9);                         \
       __asm__ volatile(                                           \
-         "subq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
          "pushq 72(%%rax)\n\t"                                    \
          "pushq 64(%%rax)\n\t"                                    \
          "pushq 56(%%rax)\n\t"                                    \
@@ -1312,10 +1532,11 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $24, %%rsp\n"                                      \
-         "addq $128,%%rsp\n\t"                                    \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1338,6 +1559,7 @@
       _argvec[9] = (unsigned long)(arg9);                         \
       _argvec[10] = (unsigned long)(arg10);                       \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "pushq 80(%%rax)\n\t"                                    \
          "pushq 72(%%rax)\n\t"                                    \
@@ -1353,9 +1575,10 @@
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $32, %%rsp\n"                                      \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1379,7 +1602,8 @@
       _argvec[10] = (unsigned long)(arg10);                       \
       _argvec[11] = (unsigned long)(arg11);                       \
       __asm__ volatile(                                           \
-         "subq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
          "pushq 88(%%rax)\n\t"                                    \
          "pushq 80(%%rax)\n\t"                                    \
          "pushq 72(%%rax)\n\t"                                    \
@@ -1394,10 +1618,11 @@
          "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $40, %%rsp\n"                                      \
-         "addq $128,%%rsp\n\t"                                    \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -1422,6 +1647,7 @@
       _argvec[11] = (unsigned long)(arg11);                       \
       _argvec[12] = (unsigned long)(arg12);                       \
       __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
          "subq $128,%%rsp\n\t"                                    \
          "pushq 96(%%rax)\n\t"                                    \
          "pushq 88(%%rax)\n\t"                                    \
@@ -1439,14 +1665,15 @@
          VALGRIND_CALL_NOREDIR_RAX                                \
          "addq $48, %%rsp\n"                                      \
          "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
          : /*out*/   "=a" (_res)                                  \
-         : /*in*/    "a" (&_argvec[0])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
 
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
 
 /* ------------------------ ppc32-linux ------------------------ */
 
@@ -2439,54 +2666,28 @@
 
 #endif /* PLAT_ppc64_linux */
 
-/* ------------------------ ppc32-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
 
-#if defined(PLAT_ppc32_aix5)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+#if defined(PLAT_arm_linux)
 
 /* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS                                       \
-   "lr", "ctr", "xer",                                            \
-   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
-   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
-   "r11", "r12", "r13"
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
 
-/* Expand the stack frame, copying enough info that unwinding
-   still works.  Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
-         "addi 1,1,-" #_n_fr "\n\t"                               \
-         "lwz  3," #_n_fr "(1)\n\t"                               \
-         "stw  3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
-         "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
    long) == 4. */
 
 #define CALL_FN_W_v(lval, orig)                                   \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _argvec[1];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1] = (unsigned long)_orig.r2;                       \
-      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2495,27 +2696,18 @@
 #define CALL_FN_W_W(lval, orig, arg1)                             \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _argvec[2];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",  __CALLER_SAVED_REGS         \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
@@ -2523,28 +2715,19 @@
 #define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _argvec[3];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2553,30 +2736,21 @@
 #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _argvec[4];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2585,32 +2759,23 @@
 #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _argvec[5];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2619,34 +2784,27 @@
 #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _argvec[6];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t" /* arg2->r4 */                       \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #4 \n\t"                                    \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2655,36 +2813,29 @@
 #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _argvec[7];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #8 \n\t"                                    \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2694,38 +2845,31 @@
                                  arg7)                            \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _argvec[8];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #12 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2735,40 +2879,33 @@
                                  arg7,arg8)                       \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _argvec[9];                          \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "push {r0, r1, r2, r3} \n\t"                             \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #16 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2778,47 +2915,35 @@
                                  arg7,arg8,arg9)                  \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _argvec[10];                         \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
-         /* arg9 */                                               \
-         "lwz 3,36(11)\n\t"                                       \
-         "stw 3,56(1)\n\t"                                        \
-         /* args1-8 */                                            \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(64)                                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #20 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
@@ -2828,738 +2953,612 @@
                                   arg7,arg8,arg9,arg10)           \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _argvec[11];                         \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
-         /* arg10 */                                              \
-         "lwz 3,40(11)\n\t"                                       \
-         "stw 3,60(1)\n\t"                                        \
-         /* arg9 */                                               \
-         "lwz 3,36(11)\n\t"                                       \
-         "stw 3,56(1)\n\t"                                        \
-         /* args1-8 */                                            \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(64)                                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #24 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
 
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
-                                  arg7,arg8,arg9,arg10,arg11)     \
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _argvec[12];                         \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
-      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
-         /* arg11 */                                              \
-         "lwz 3,44(11)\n\t"                                       \
-         "stw 3,64(1)\n\t"                                        \
-         /* arg10 */                                              \
-         "lwz 3,40(11)\n\t"                                       \
-         "stw 3,60(1)\n\t"                                        \
-         /* arg9 */                                               \
-         "lwz 3,36(11)\n\t"                                       \
-         "stw 3,56(1)\n\t"                                        \
-         /* args1-8 */                                            \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(72)                                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #28 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS           \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
 
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
-                                arg7,arg8,arg9,arg10,arg11,arg12) \
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
    do {                                                           \
       volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _argvec[13];                         \
       volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
-      _argvec[2+11] = (unsigned long)arg11;                       \
-      _argvec[2+12] = (unsigned long)arg12;                       \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
       __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
-         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
-         /* arg12 */                                              \
-         "lwz 3,48(11)\n\t"                                       \
-         "stw 3,68(1)\n\t"                                        \
-         /* arg11 */                                              \
-         "lwz 3,44(11)\n\t"                                       \
-         "stw 3,64(1)\n\t"                                        \
-         /* arg10 */                                              \
-         "lwz 3,40(11)\n\t"                                       \
-         "stw 3,60(1)\n\t"                                        \
-         /* arg9 */                                               \
-         "lwz 3,36(11)\n\t"                                       \
-         "stw 3,56(1)\n\t"                                        \
-         /* args1-8 */                                            \
-         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
-         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
-         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
-         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
-         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
-         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
-         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
-         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
-         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(72)                                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "ldr r2, [%1, #48] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #32 \n\t"                                   \
+         "mov %0, r0"                                             \
          : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
+         : /*in*/    "0" (&_argvec[0])                            \
          : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
       );                                                          \
       lval = (__typeof__(lval)) _res;                             \
    } while (0)
 
-#endif /* PLAT_ppc32_aix5 */
+#endif /* PLAT_arm_linux */
 
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------- s390x-linux ------------------------- */
 
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_s390x_linux)
 
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+/* Similar workaround as amd64 (see above), but we use r11 as frame
+   pointer and save the old r11 in r7. r11 might be used for
+   argvec, therefore we copy argvec in r1 since r1 is clobbered
+   after the call anyway.  */
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#  define __FRAME_POINTER                                         \
+      ,"d"(__builtin_dwarf_cfa())
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      ".cfi_remember_state\n\t"                                   \
+      "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */          \
+      "lgr 7,11\n\t"                                              \
+      "lgr 11,%2\n\t"                                             \
+      ".cfi_def_cfa r11, 0\n\t"
+#  define VALGRIND_CFI_EPILOGUE                                   \
+      "lgr 11, 7\n\t"                                             \
+      ".cfi_restore_state\n\t"
+#else
+#  define __FRAME_POINTER
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      "lgr 1,%1\n\t"
+#  define VALGRIND_CFI_EPILOGUE
+#endif
 
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS                                       \
-   "lr", "ctr", "xer",                                            \
-   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
-   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
-   "r11", "r12", "r13"
 
-/* Expand the stack frame, copying enough info that unwinding
-   still works.  Trashes r3. */
 
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
-         "addi 1,1,-" #_n_fr "\n\t"                               \
-         "ld   3," #_n_fr "(1)\n\t"                               \
-         "std  3,0(1)\n\t"
 
-#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
-         "addi 1,1," #_n_fr "\n\t"
+/* These regs are trashed by the hidden call. Note that we overwrite
+   r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
+   function a proper return address. All others are ABI defined call
+   clobbers. */
+#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
+                           "f0","f1","f2","f3","f4","f5","f6","f7"
 
-/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
-   long) == 8. */
 
-#define CALL_FN_W_v(lval, orig)                                   \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+0];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1] = (unsigned long)_orig.r2;                       \
-      _argvec[2] = (unsigned long)_orig.nraddr;                   \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_v(lval, orig)                                  \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long  _argvec[1];                        \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 1, 0(1)\n\t"  /* target->r1 */                      \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "d" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7"     \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_W(lval, orig, arg1)                             \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+1];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+/* The call abi has the arguments in r2-r6 and stack */
+#define CALL_FN_W_W(lval, orig, arg1)                            \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[2];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7"     \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+2];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_WW(lval, orig, arg1, arg2)                     \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[3];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7"     \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+3];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3)              \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[4];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7"     \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+4];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4)       \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[5];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7"     \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+5];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5)   \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[6];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-160\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,160\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+6];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5,   \
+                     arg6)                                       \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[7];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-168\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,168\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
-                                 arg7)                            \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+7];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5,   \
+                     arg6, arg7)                                 \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[8];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-176\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,176\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
-                                 arg7,arg8)                       \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+8];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5,   \
+                     arg6, arg7 ,arg8)                           \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[9];                         \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      _argvec[8] = (unsigned long)arg8;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-184\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "mvc 176(8,15), 64(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,184\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
-                                 arg7,arg8,arg9)                  \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+9];                        \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
-         /* arg9 */                                               \
-         "ld  3,72(11)\n\t"                                       \
-         "std 3,112(1)\n\t"                                       \
-         /* args1-8 */                                            \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(128)                                \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5,   \
+                     arg6, arg7 ,arg8, arg9)                     \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[10];                        \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      _argvec[8] = (unsigned long)arg8;                          \
+      _argvec[9] = (unsigned long)arg9;                          \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-192\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "mvc 176(8,15), 64(1)\n\t"                              \
+         "mvc 184(8,15), 72(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,192\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
-                                  arg7,arg8,arg9,arg10)           \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+10];                       \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
-         /* arg10 */                                              \
-         "ld  3,80(11)\n\t"                                       \
-         "std 3,120(1)\n\t"                                       \
-         /* arg9 */                                               \
-         "ld  3,72(11)\n\t"                                       \
-         "std 3,112(1)\n\t"                                       \
-         /* args1-8 */                                            \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(128)                                \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5,  \
+                     arg6, arg7 ,arg8, arg9, arg10)              \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[11];                        \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      _argvec[8] = (unsigned long)arg8;                          \
+      _argvec[9] = (unsigned long)arg9;                          \
+      _argvec[10] = (unsigned long)arg10;                        \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-200\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "mvc 176(8,15), 64(1)\n\t"                              \
+         "mvc 184(8,15), 72(1)\n\t"                              \
+         "mvc 192(8,15), 80(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,200\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
-                                  arg7,arg8,arg9,arg10,arg11)     \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+11];                       \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
-      _argvec[2+11] = (unsigned long)arg11;                       \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
-         /* arg11 */                                              \
-         "ld  3,88(11)\n\t"                                       \
-         "std 3,128(1)\n\t"                                       \
-         /* arg10 */                                              \
-         "ld  3,80(11)\n\t"                                       \
-         "std 3,120(1)\n\t"                                       \
-         /* arg9 */                                               \
-         "ld  3,72(11)\n\t"                                       \
-         "std 3,112(1)\n\t"                                       \
-         /* args1-8 */                                            \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(144)                                \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5,  \
+                     arg6, arg7 ,arg8, arg9, arg10, arg11)       \
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[12];                        \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      _argvec[8] = (unsigned long)arg8;                          \
+      _argvec[9] = (unsigned long)arg9;                          \
+      _argvec[10] = (unsigned long)arg10;                        \
+      _argvec[11] = (unsigned long)arg11;                        \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-208\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "mvc 176(8,15), 64(1)\n\t"                              \
+         "mvc 184(8,15), 72(1)\n\t"                              \
+         "mvc 192(8,15), 80(1)\n\t"                              \
+         "mvc 200(8,15), 88(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,208\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
-                                arg7,arg8,arg9,arg10,arg11,arg12) \
-   do {                                                           \
-      volatile OrigFn        _orig = (orig);                      \
-      volatile unsigned long _argvec[3+12];                       \
-      volatile unsigned long _res;                                \
-      /* _argvec[0] holds current r2 across the call */           \
-      _argvec[1]   = (unsigned long)_orig.r2;                     \
-      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
-      _argvec[2+1] = (unsigned long)arg1;                         \
-      _argvec[2+2] = (unsigned long)arg2;                         \
-      _argvec[2+3] = (unsigned long)arg3;                         \
-      _argvec[2+4] = (unsigned long)arg4;                         \
-      _argvec[2+5] = (unsigned long)arg5;                         \
-      _argvec[2+6] = (unsigned long)arg6;                         \
-      _argvec[2+7] = (unsigned long)arg7;                         \
-      _argvec[2+8] = (unsigned long)arg8;                         \
-      _argvec[2+9] = (unsigned long)arg9;                         \
-      _argvec[2+10] = (unsigned long)arg10;                       \
-      _argvec[2+11] = (unsigned long)arg11;                       \
-      _argvec[2+12] = (unsigned long)arg12;                       \
-      __asm__ volatile(                                           \
-         "mr 11,%1\n\t"                                           \
-         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
-         "std  2,-16(11)\n\t" /* save tocptr */                   \
-         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
-         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
-         /* arg12 */                                              \
-         "ld  3,96(11)\n\t"                                       \
-         "std 3,136(1)\n\t"                                       \
-         /* arg11 */                                              \
-         "ld  3,88(11)\n\t"                                       \
-         "std 3,128(1)\n\t"                                       \
-         /* arg10 */                                              \
-         "ld  3,80(11)\n\t"                                       \
-         "std 3,120(1)\n\t"                                       \
-         /* arg9 */                                               \
-         "ld  3,72(11)\n\t"                                       \
-         "std 3,112(1)\n\t"                                       \
-         /* args1-8 */                                            \
-         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
-         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
-         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
-         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
-         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
-         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
-         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
-         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
-         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
-         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
-         "mr 11,%1\n\t"                                           \
-         "mr %0,3\n\t"                                            \
-         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
-         VG_CONTRACT_FRAME_BY(144)                                \
-         VG_CONTRACT_FRAME_BY(512)                                \
-         : /*out*/   "=r" (_res)                                  \
-         : /*in*/    "r" (&_argvec[2])                            \
-         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
-      );                                                          \
-      lval = (__typeof__(lval)) _res;                             \
+#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5,  \
+                     arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
+   do {                                                          \
+      volatile OrigFn        _orig = (orig);                     \
+      volatile unsigned long _argvec[13];                        \
+      volatile unsigned long _res;                               \
+      _argvec[0] = (unsigned long)_orig.nraddr;                  \
+      _argvec[1] = (unsigned long)arg1;                          \
+      _argvec[2] = (unsigned long)arg2;                          \
+      _argvec[3] = (unsigned long)arg3;                          \
+      _argvec[4] = (unsigned long)arg4;                          \
+      _argvec[5] = (unsigned long)arg5;                          \
+      _argvec[6] = (unsigned long)arg6;                          \
+      _argvec[7] = (unsigned long)arg7;                          \
+      _argvec[8] = (unsigned long)arg8;                          \
+      _argvec[9] = (unsigned long)arg9;                          \
+      _argvec[10] = (unsigned long)arg10;                        \
+      _argvec[11] = (unsigned long)arg11;                        \
+      _argvec[12] = (unsigned long)arg12;                        \
+      __asm__ volatile(                                          \
+         VALGRIND_CFI_PROLOGUE                                   \
+         "aghi 15,-216\n\t"                                      \
+         "lg 2, 8(1)\n\t"                                        \
+         "lg 3,16(1)\n\t"                                        \
+         "lg 4,24(1)\n\t"                                        \
+         "lg 5,32(1)\n\t"                                        \
+         "lg 6,40(1)\n\t"                                        \
+         "mvc 160(8,15), 48(1)\n\t"                              \
+         "mvc 168(8,15), 56(1)\n\t"                              \
+         "mvc 176(8,15), 64(1)\n\t"                              \
+         "mvc 184(8,15), 72(1)\n\t"                              \
+         "mvc 192(8,15), 80(1)\n\t"                              \
+         "mvc 200(8,15), 88(1)\n\t"                              \
+         "mvc 208(8,15), 96(1)\n\t"                              \
+         "lg 1, 0(1)\n\t"                                        \
+         VALGRIND_CALL_NOREDIR_R1                                \
+         "lgr %0, 2\n\t"                                         \
+         "aghi 15,216\n\t"                                       \
+         VALGRIND_CFI_EPILOGUE                                   \
+         : /*out*/   "=d" (_res)                                 \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER           \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+      );                                                         \
+      lval = (__typeof__(lval)) _res;                            \
    } while (0)
 
-#endif /* PLAT_ppc64_aix5 */
+
+#endif /* PLAT_s390x_linux */
 
 
 /* ------------------------------------------------------------------ */
@@ -3605,9 +3604,14 @@
              errors. */
           VG_USERREQ__COUNT_ERRORS = 0x1201,
 
+          /* Allows a string (gdb monitor command) to be passed to the tool
+             Used for interaction with vgdb/gdb */
+          VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
+
           /* These are useful and can be interpreted by any tool that
              tracks malloc() et al, by using vg_replace_malloc.c. */
           VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+          VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
           VG_USERREQ__FREELIKE_BLOCK   = 0x1302,
           /* Memory pool support. */
           VG_USERREQ__CREATE_MEMPOOL   = 0x1303,
@@ -3620,30 +3624,43 @@
           VG_USERREQ__MEMPOOL_EXISTS   = 0x130a,
 
           /* Allow printfs to valgrind log. */
+          /* The first two pass the va_list argument by value, which
+             assumes it is the same size as or smaller than a UWord,
+             which generally isn't the case.  Hence are deprecated.
+             The second two pass the vargs by reference and so are
+             immune to this problem. */
+          /* both :: char* fmt, va_list vargs (DEPRECATED) */
           VG_USERREQ__PRINTF           = 0x1401,
           VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+          /* both :: char* fmt, va_list* vargs */
+          VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+          VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
 
           /* Stack support. */
           VG_USERREQ__STACK_REGISTER   = 0x1501,
           VG_USERREQ__STACK_DEREGISTER = 0x1502,
-          VG_USERREQ__STACK_CHANGE     = 0x1503
+          VG_USERREQ__STACK_CHANGE     = 0x1503,
+
+          /* Wine support */
+          VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+          /* Querying of debug info. */
+          VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
    } Vg_ClientRequest;
 
 #if !defined(__GNUC__)
 #  define __extension__ /* */
 #endif
 
+
 /* Returns the number of Valgrinds this code is running under.  That
    is, 0 if running natively, 1 if running under Valgrind, 2 if
    running under Valgrind which is running under another Valgrind,
    etc. */
-#define RUNNING_ON_VALGRIND  __extension__                        \
-   ({unsigned int _qzz_res;                                       \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */,          \
-                               VG_USERREQ__RUNNING_ON_VALGRIND,   \
-                               0, 0, 0, 0, 0);                    \
-    _qzz_res;                                                     \
-   })
+#define RUNNING_ON_VALGRIND                                           \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */,         \
+                                    VG_USERREQ__RUNNING_ON_VALGRIND,  \
+                                    0, 0, 0, 0, 0)                    \
 
 
 /* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
@@ -3651,56 +3668,93 @@
    since it provides a way to make sure valgrind will retranslate the
    invalidated area.  Returns no value. */
 #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len)         \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                  \
                                VG_USERREQ__DISCARD_TRANSLATIONS,  \
-                               _qzz_addr, _qzz_len, 0, 0, 0);     \
-   }
+                               _qzz_addr, _qzz_len, 0, 0, 0)
 
 
 /* These requests are for getting Valgrind itself to print something.
-   Possibly with a backtrace.  This is a really ugly hack. */
+   Possibly with a backtrace.  This is a really ugly hack.  The return value
+   is the number of characters printed, excluding the "**<pid>** " part at the
+   start and the backtrace (if present). */
 
-#if defined(NVALGRIND)
-
-#  define VALGRIND_PRINTF(...)
-#  define VALGRIND_PRINTF_BACKTRACE(...)
-
-#else /* NVALGRIND */
-
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
 /* Modern GCC will optimize the static routine out if unused,
    and unused attribute will shut down warnings about it.  */
 static int VALGRIND_PRINTF(const char *format, ...)
    __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
 static int
+#if defined(_MSC_VER)
+__inline
+#endif
 VALGRIND_PRINTF(const char *format, ...)
 {
+#if defined(NVALGRIND)
+   return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+   uintptr_t _qzz_res;
+#else
    unsigned long _qzz_res;
+#endif
    va_list vargs;
    va_start(vargs, format);
-   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
-                              (unsigned long)format, (unsigned long)vargs, 
+#if defined(_MSC_VER)
+   _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
                               0, 0, 0);
+#else
+   _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
    va_end(vargs);
    return (int)_qzz_res;
+#endif /* NVALGRIND */
 }
 
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
 static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
    __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
 static int
+#if defined(_MSC_VER)
+__inline
+#endif
 VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
 {
+#if defined(NVALGRIND)
+   return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+   uintptr_t _qzz_res;
+#else
    unsigned long _qzz_res;
+#endif
    va_list vargs;
    va_start(vargs, format);
-   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
-                              (unsigned long)format, (unsigned long)vargs, 
+#if defined(_MSC_VER)
+   _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
                               0, 0, 0);
+#else
+   _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
    va_end(vargs);
    return (int)_qzz_res;
-}
-
 #endif /* NVALGRIND */
+}
 
 
 /* These requests allow control to move from the simulated CPU to the
@@ -3727,199 +3781,253 @@
    with a lot in the past.
 */
 #define VALGRIND_NON_SIMD_CALL0(_qyy_fn)                          \
-   __extension__                                                  \
-   ({unsigned long _qyy_res;                                      \
-    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
-                               VG_USERREQ__CLIENT_CALL0,          \
-                               _qyy_fn,                           \
-                               0, 0, 0, 0);                       \
-    _qyy_res;                                                     \
-   })
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,       \
+                                    VG_USERREQ__CLIENT_CALL0,     \
+                                    _qyy_fn,                      \
+                                    0, 0, 0, 0)
 
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)               \
-   __extension__                                                  \
-   ({unsigned long _qyy_res;                                      \
-    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
-                               VG_USERREQ__CLIENT_CALL1,          \
-                               _qyy_fn,                           \
-                               _qyy_arg1, 0, 0, 0);               \
-    _qyy_res;                                                     \
-   })
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)                    \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,            \
+                                    VG_USERREQ__CLIENT_CALL1,          \
+                                    _qyy_fn,                           \
+                                    _qyy_arg1, 0, 0, 0)
 
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)    \
-   __extension__                                                  \
-   ({unsigned long _qyy_res;                                      \
-    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
-                               VG_USERREQ__CLIENT_CALL2,          \
-                               _qyy_fn,                           \
-                               _qyy_arg1, _qyy_arg2, 0, 0);       \
-    _qyy_res;                                                     \
-   })
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)         \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,            \
+                                    VG_USERREQ__CLIENT_CALL2,          \
+                                    _qyy_fn,                           \
+                                    _qyy_arg1, _qyy_arg2, 0, 0)
 
 #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
-   __extension__                                                  \
-   ({unsigned long _qyy_res;                                      \
-    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
-                               VG_USERREQ__CLIENT_CALL3,          \
-                               _qyy_fn,                           \
-                               _qyy_arg1, _qyy_arg2,              \
-                               _qyy_arg3, 0);                     \
-    _qyy_res;                                                     \
-   })
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,             \
+                                    VG_USERREQ__CLIENT_CALL3,           \
+                                    _qyy_fn,                            \
+                                    _qyy_arg1, _qyy_arg2,               \
+                                    _qyy_arg3, 0)
 
 
 /* Counts the number of errors that have been recorded by a tool.  Nb:
    the tool must record the errors with VG_(maybe_record_error)() or
    VG_(unique_error)() for them to be counted. */
 #define VALGRIND_COUNT_ERRORS                                     \
-   __extension__                                                  \
-   ({unsigned int _qyy_res;                                       \
-    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(                    \
+                               0 /* default return */,            \
                                VG_USERREQ__COUNT_ERRORS,          \
-                               0, 0, 0, 0, 0);                    \
-    _qyy_res;                                                     \
-   })
+                               0, 0, 0, 0, 0)
 
-/* Mark a block of memory as having been allocated by a malloc()-like
-   function.  `addr' is the start of the usable block (ie. after any
-   redzone) `rzB' is redzone size if the allocator can apply redzones;
-   use '0' if not.  Adding redzones makes it more likely Valgrind will spot
-   block overruns.  `is_zeroed' indicates if the memory is zeroed, as it is
-   for calloc().  Put it immediately after the point where a block is
-   allocated. 
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+   when heap blocks are allocated in order to give accurate results.  This
+   happens automatically for the standard allocator functions such as
+   malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+   delete[], etc.
+
+   But if your program uses a custom allocator, this doesn't automatically
+   happen, and Valgrind will not do as well.  For example, if you allocate
+   superblocks with mmap() and then allocates chunks of the superblocks, all
+   Valgrind's observations will be at the mmap() level and it won't know that
+   the chunks should be considered separate entities.  In Memcheck's case,
+   that means you probably won't get heap block overrun detection (because
+   there won't be redzones marked as unaddressable) and you definitely won't
+   get any leak detection.
+
+   The following client requests allow a custom allocator to be annotated so
+   that it can be handled accurately by Valgrind.
+
+   VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+   by a malloc()-like function.  For Memcheck (an illustrative case), this
+   does two things:
+
+   - It records that the block has been allocated.  This means any addresses
+     within the block mentioned in error messages will be
+     identified as belonging to the block.  It also means that if the block
+     isn't freed it will be detected by the leak checker.
+
+   - It marks the block as being addressable and undefined (if 'is_zeroed' is
+     not set), or addressable and defined (if 'is_zeroed' is set).  This
+     controls how accesses to the block by the program are handled.
    
-   If you're using Memcheck: If you're allocating memory via superblocks,
-   and then handing out small chunks of each superblock, if you don't have
-   redzones on your small blocks, it's worth marking the superblock with
-   VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
-   detected.  But if you can put redzones on, it's probably better to not do
-   this, so that messages for small overruns are described in terms of the
-   small block rather than the superblock (but if you have a big overrun
-   that skips over a redzone, you could miss an error this way).  See
-   memcheck/tests/custom_alloc.c for an example.
+   'addr' is the start of the usable block (ie. after any
+   redzone), 'sizeB' is its size.  'rzB' is the redzone size if the allocator
+   can apply redzones -- these are blocks of padding at the start and end of
+   each block.  Adding redzones is recommended as it makes it much more likely
+   Valgrind will spot block overruns.  `is_zeroed' indicates if the memory is
+   zeroed (or filled with another predictable value), as is the case for
+   calloc().
+   
+   VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+   heap block -- that will be used by the client program -- is allocated.
+   It's best to put it at the outermost level of the allocator if possible;
+   for example, if you have a function my_alloc() which calls
+   internal_alloc(), and the client request is put inside internal_alloc(),
+   stack traces relating to the heap block will contain entries for both
+   my_alloc() and internal_alloc(), which is probably not what you want.
 
-   WARNING: if your allocator uses malloc() or 'new' to allocate
-   superblocks, rather than mmap() or brk(), this will not work properly --
-   you'll likely get assertion failures during leak detection.  This is
-   because Valgrind doesn't like seeing overlapping heap blocks.  Sorry.
+   For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+   custom blocks from within a heap block, B, that has been allocated with
+   malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+   -- the custom blocks will take precedence.
 
-   Nb: block must be freed via a free()-like function specified
-   with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+   VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK.  For
+   Memcheck, it does two things:
+
+   - It records that the block has been deallocated.  This assumes that the
+     block was annotated as having been allocated via
+     VALGRIND_MALLOCLIKE_BLOCK.  Otherwise, an error will be issued.
+
+   - It marks the block as being unaddressable.
+
+   VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+   heap block is deallocated.
+
+   VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
+   Memcheck, it does four things:
+
+   - It records that the size of a block has been changed.  This assumes that
+     the block was annotated as having been allocated via
+     VALGRIND_MALLOCLIKE_BLOCK.  Otherwise, an error will be issued.
+
+   - If the block shrunk, it marks the freed memory as being unaddressable.
+
+   - If the block grew, it marks the new area as undefined and defines a red
+     zone past the end of the new block.
+
+   - The V-bits of the overlap between the old and the new block are preserved.
+
+   VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
+   and before deallocation of the old block.
+
+   In many cases, these three client requests will not be enough to get your
+   allocator working well with Memcheck.  More specifically, if your allocator
+   writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+   will be necessary to mark the memory as addressable just before the zeroing
+   occurs, otherwise you'll get a lot of invalid write errors.  For example,
+   you'll need to do this if your allocator recycles freed blocks, but it
+   zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+   Alternatively, if your allocator reuses freed blocks for allocator-internal
+   data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+   Really, what's happening is a blurring of the lines between the client
+   program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+   memory should be considered unaddressable to the client program, but the
+   allocator knows more than the rest of the client program and so may be able
+   to safely access it.  Extra client requests are necessary for Valgrind to
+   understand the distinction between the allocator and the rest of the
+   program.
+
+   Ignored if addr == 0.
+*/
 #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)    \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MALLOCLIKE_BLOCK,      \
-                               addr, sizeB, rzB, is_zeroed, 0);   \
-   }
+                               addr, sizeB, rzB, is_zeroed, 0)
 
-/* Mark a block of memory as having been freed by a free()-like function.
-   `rzB' is redzone size;  it must match that given to
-   VALGRIND_MALLOCLIKE_BLOCK.  Memory not freed will be detected by the leak
-   checker.  Put it immediately after the point where the block is freed. */
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+   Ignored if addr == 0.
+*/
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
+                               VG_USERREQ__RESIZEINPLACE_BLOCK,   \
+                               addr, oldSizeB, newSizeB, rzB, 0)
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+   Ignored if addr == 0.
+*/
 #define VALGRIND_FREELIKE_BLOCK(addr, rzB)                        \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__FREELIKE_BLOCK,        \
-                               addr, rzB, 0, 0, 0);               \
-   }
+                               addr, rzB, 0, 0, 0)
 
 /* Create a memory pool. */
 #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)             \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__CREATE_MEMPOOL,        \
-                               pool, rzB, is_zeroed, 0, 0);       \
-   }
+                               pool, rzB, is_zeroed, 0, 0)
 
 /* Destroy a memory pool. */
 #define VALGRIND_DESTROY_MEMPOOL(pool)                            \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__DESTROY_MEMPOOL,       \
-                               pool, 0, 0, 0, 0);                 \
-   }
+                               pool, 0, 0, 0, 0)
 
 /* Associate a piece of memory with a memory pool. */
 #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)                  \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MEMPOOL_ALLOC,         \
-                               pool, addr, size, 0, 0);           \
-   }
+                               pool, addr, size, 0, 0)
 
 /* Disassociate a piece of memory from a memory pool. */
 #define VALGRIND_MEMPOOL_FREE(pool, addr)                         \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MEMPOOL_FREE,          \
-                               pool, addr, 0, 0, 0);              \
-   }
+                               pool, addr, 0, 0, 0)
 
 /* Disassociate any pieces outside a particular range. */
 #define VALGRIND_MEMPOOL_TRIM(pool, addr, size)                   \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MEMPOOL_TRIM,          \
-                               pool, addr, size, 0, 0);           \
-   }
+                               pool, addr, size, 0, 0)
 
 /* Resize and/or move a piece associated with a memory pool. */
 #define VALGRIND_MOVE_MEMPOOL(poolA, poolB)                       \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MOVE_MEMPOOL,          \
-                               poolA, poolB, 0, 0, 0);            \
-   }
+                               poolA, poolB, 0, 0, 0)
 
 /* Resize and/or move a piece associated with a memory pool. */
 #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size)         \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__MEMPOOL_CHANGE,        \
-                               pool, addrA, addrB, size, 0);      \
-   }
+                               pool, addrA, addrB, size, 0)
 
 /* Return 1 if a mempool exists, else 0. */
 #define VALGRIND_MEMPOOL_EXISTS(pool)                             \
-   ({unsigned int _qzz_res;                                       \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                  \
                                VG_USERREQ__MEMPOOL_EXISTS,        \
-                               pool, 0, 0, 0, 0);                 \
-    _qzz_res;                                                     \
-   })
+                               pool, 0, 0, 0, 0)
 
 /* Mark a piece of memory as being a stack. Returns a stack id. */
 #define VALGRIND_STACK_REGISTER(start, end)                       \
-   ({unsigned int _qzz_res;                                       \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                  \
                                VG_USERREQ__STACK_REGISTER,        \
-                               start, end, 0, 0, 0);              \
-    _qzz_res;                                                     \
-   })
+                               start, end, 0, 0, 0)
 
 /* Unmark the piece of memory associated with a stack id as being a
    stack. */
 #define VALGRIND_STACK_DEREGISTER(id)                             \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                  \
                                VG_USERREQ__STACK_DEREGISTER,      \
-                               id, 0, 0, 0, 0);                   \
-   }
+                               id, 0, 0, 0, 0)
 
 /* Change the start and end address of the stack id. */
 #define VALGRIND_STACK_CHANGE(id, start, end)                     \
-   {unsigned int _qzz_res;                                        \
-    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
                                VG_USERREQ__STACK_CHANGE,          \
-                               id, start, end, 0, 0);             \
-   }
+                               id, start, end, 0, 0)
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta)   \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                            \
+                               VG_USERREQ__LOAD_PDB_DEBUGINFO,    \
+                               fd, ptr, total_size, delta, 0)
+
+/* Map a code address to a source file name and line number.  buf64
+   must point to a 64-byte buffer in the caller's address space.  The
+   result will be dumped in there and is guaranteed to be zero
+   terminated.  If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64)                    \
+    (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                  \
+                               VG_USERREQ__MAP_IP_TO_SRCLOC,      \
+                               addr, buf64, 0, 0, 0)
 
 
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
 #undef PLAT_x86_linux
 #undef PLAT_amd64_linux
 #undef PLAT_ppc32_linux
 #undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
 
 #endif   /* __VALGRIND_H */
diff --git a/src/token.cc b/src/token.cc
index feca7be..7ba7ed3 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -33,21 +33,21 @@
 
 #define T(name, string, precedence) #name,
 const char* const Token::name_[NUM_TOKENS] = {
-  TOKEN_LIST(T, T, IGNORE_TOKEN)
+  TOKEN_LIST(T, T)
 };
 #undef T
 
 
 #define T(name, string, precedence) string,
 const char* const Token::string_[NUM_TOKENS] = {
-  TOKEN_LIST(T, T, IGNORE_TOKEN)
+  TOKEN_LIST(T, T)
 };
 #undef T
 
 
 #define T(name, string, precedence) precedence,
 const int8_t Token::precedence_[NUM_TOKENS] = {
-  TOKEN_LIST(T, T, IGNORE_TOKEN)
+  TOKEN_LIST(T, T)
 };
 #undef T
 
@@ -55,7 +55,7 @@
 #define KT(a, b, c) 'T',
 #define KK(a, b, c) 'K',
 const char Token::token_type[] = {
-  TOKEN_LIST(KT, KK, IGNORE_TOKEN)
+  TOKEN_LIST(KT, KK)
 };
 #undef KT
 #undef KK
diff --git a/src/token.h b/src/token.h
index a0afbc1..eb825c1 100644
--- a/src/token.h
+++ b/src/token.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,6 @@
 //
 //   T: Non-keyword tokens
 //   K: Keyword tokens
-//   F: Future (reserved) keyword tokens
 
 // IGNORE_TOKEN is a convenience macro that can be supplied as
 // an argument (at any position) for a TOKEN_LIST call. It does
@@ -49,7 +48,7 @@
 
 #define IGNORE_TOKEN(name, string, precedence)
 
-#define TOKEN_LIST(T, K, F)                                             \
+#define TOKEN_LIST(T, K)                                                \
   /* End of source indicator. */                                        \
   T(EOS, "EOS", 0)                                                      \
                                                                         \
@@ -72,6 +71,7 @@
   /* this block of enum values being contiguous and sorted in the */    \
   /* same order! */                                                     \
   T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
+  T(INIT_LET, "=init_let", 2)  /* AST-use only. */                      \
   T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
   T(ASSIGN, "=", 2)                                                     \
   T(ASSIGN_BIT_OR, "|=", 2)                                             \
@@ -167,8 +167,9 @@
                                                                         \
   /* Future reserved words (ECMA-262, section 7.6.1.2). */              \
   T(FUTURE_RESERVED_WORD, NULL, 0)                                      \
+  T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                               \
   K(CONST, "const", 0)                                                  \
-  K(NATIVE, "native", 0)                                                \
+  K(LET, "let", 0)                                                      \
                                                                         \
   /* Illegal token - not able to scan. */                               \
   T(ILLEGAL, "ILLEGAL", 0)                                              \
@@ -182,7 +183,7 @@
   // All token values.
 #define T(name, string, precedence) name,
   enum Value {
-    TOKEN_LIST(T, T, IGNORE_TOKEN)
+    TOKEN_LIST(T, T)
     NUM_TOKENS
   };
 #undef T
diff --git a/src/top.cc b/src/top.cc
deleted file mode 100644
index b9207c8..0000000
--- a/src/top.cc
+++ /dev/null
@@ -1,983 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "messages.h"
-#include "platform.h"
-#include "simulator.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-
-// TODO(isolates): move to isolate.cc. This stuff is kept here to
-// simplify merging.
-
-namespace v8 {
-namespace internal {
-
-ThreadLocalTop::ThreadLocalTop() {
-  InitializeInternal();
-  // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
-  // before an isolate is initialized. The initialize methods below do
-  // not touch it to preserve its value.
-  ignore_out_of_memory_ = false;
-}
-
-
-void ThreadLocalTop::InitializeInternal() {
-  c_entry_fp_ = 0;
-  handler_ = 0;
-#ifdef USE_SIMULATOR
-  simulator_ = NULL;
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  js_entry_sp_ = NULL;
-  external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
-  current_vm_state_ = EXTERNAL;
-#endif
-  try_catch_handler_address_ = NULL;
-  context_ = NULL;
-  thread_id_ = ThreadId::Invalid();
-  external_caught_exception_ = false;
-  failed_access_check_callback_ = NULL;
-  save_context_ = NULL;
-  catcher_ = NULL;
-}
-
-
-void ThreadLocalTop::Initialize() {
-  InitializeInternal();
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
-  simulator_ = Simulator::current(Isolate::Current());
-#elif V8_TARGET_ARCH_MIPS
-  simulator_ = Simulator::current(Isolate::Current());
-#endif
-#endif
-  thread_id_ = ThreadId::Current();
-}
-
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
-  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
-  return isolate_addresses_[id];
-}
-
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
-  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
-  Iterate(v, thread);
-  return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v) {
-  v->VisitThread(this, thread_local_top());
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v, char* t) {
-  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
-  v->VisitThread(this, thread);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
-  // Visit the roots from the top for a given thread.
-  Object* pending;
-  // The pending exception can sometimes be a failure.  We can't show
-  // that to the GC, which only understands objects.
-  if (thread->pending_exception_->ToObject(&pending)) {
-    v->VisitPointer(&pending);
-    thread->pending_exception_ = pending;  // In case GC updated it.
-  }
-  v->VisitPointer(&(thread->pending_message_obj_));
-  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
-  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
-  Object* scheduled;
-  if (thread->scheduled_exception_->ToObject(&scheduled)) {
-    v->VisitPointer(&scheduled);
-    thread->scheduled_exception_ = scheduled;
-  }
-
-  for (v8::TryCatch* block = thread->TryCatchHandler();
-       block != NULL;
-       block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
-    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
-    v->VisitPointer(BitCast<Object**>(&(block->message_)));
-  }
-
-  // Iterate over pointers on native execution stack.
-  for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
-    it.frame()->Iterate(v);
-  }
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v) {
-  ThreadLocalTop* current_t = thread_local_top();
-  Iterate(v, current_t);
-}
-
-
-void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
-  // The ARM simulator has a separate JS stack.  We therefore register
-  // the C++ try catch handler with the simulator and get back an
-  // address that can be used for comparisons with addresses into the
-  // JS stack.  When running without the simulator, the address
-  // returned will be the address of the C++ try catch handler itself.
-  Address address = reinterpret_cast<Address>(
-      SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
-  thread_local_top()->set_try_catch_handler_address(address);
-}
-
-
-void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
-  ASSERT(thread_local_top()->TryCatchHandler() == that);
-  thread_local_top()->set_try_catch_handler_address(
-      reinterpret_cast<Address>(that->next_));
-  thread_local_top()->catcher_ = NULL;
-  SimulatorStack::UnregisterCTryCatch();
-}
-
-
-Handle<String> Isolate::StackTraceString() {
-  if (stack_trace_nesting_level_ == 0) {
-    stack_trace_nesting_level_++;
-    HeapStringAllocator allocator;
-    StringStream::ClearMentionedObjectCache();
-    StringStream accumulator(&allocator);
-    incomplete_message_ = &accumulator;
-    PrintStack(&accumulator);
-    Handle<String> stack_trace = accumulator.ToString();
-    incomplete_message_ = NULL;
-    stack_trace_nesting_level_ = 0;
-    return stack_trace;
-  } else if (stack_trace_nesting_level_ == 1) {
-    stack_trace_nesting_level_++;
-    OS::PrintError(
-      "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
-      "If you are lucky you may find a partial stack dump on stdout.\n\n");
-    incomplete_message_->OutputToStdOut();
-    return factory()->empty_symbol();
-  } else {
-    OS::Abort();
-    // Unreachable
-    return factory()->empty_symbol();
-  }
-}
-
-
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
-    int frame_limit, StackTrace::StackTraceOptions options) {
-  // Ensure no negative values.
-  int limit = Max(frame_limit, 0);
-  Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
-
-  Handle<String> column_key = factory()->LookupAsciiSymbol("column");
-  Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
-  Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
-  Handle<String> name_or_source_url_key =
-      factory()->LookupAsciiSymbol("nameOrSourceURL");
-  Handle<String> script_name_or_source_url_key =
-      factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
-  Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
-  Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
-  Handle<String> constructor_key =
-      factory()->LookupAsciiSymbol("isConstructor");
-
-  StackTraceFrameIterator it(this);
-  int frames_seen = 0;
-  while (!it.done() && (frames_seen < limit)) {
-    JavaScriptFrame* frame = it.frame();
-    // Set initial size to the maximum inlining level + 1 for the outermost
-    // function.
-    List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
-    frame->Summarize(&frames);
-    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
-      // Create a JSObject to hold the information for the StackFrame.
-      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
-
-      Handle<JSFunction> fun = frames[i].function();
-      Handle<Script> script(Script::cast(fun->shared()->script()));
-
-      if (options & StackTrace::kLineNumber) {
-        int script_line_offset = script->line_offset()->value();
-        int position = frames[i].code()->SourcePosition(frames[i].pc());
-        int line_number = GetScriptLineNumber(script, position);
-        // line_number is already shifted by the script_line_offset.
-        int relative_line_number = line_number - script_line_offset;
-        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
-          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-          int start = (relative_line_number == 0) ? 0 :
-              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
-          int column_offset = position - start;
-          if (relative_line_number == 0) {
-            // For the case where the code is on the same line as the script
-            // tag.
-            column_offset += script->column_offset()->value();
-          }
-          SetLocalPropertyNoThrow(stackFrame, column_key,
-                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
-        }
-        SetLocalPropertyNoThrow(stackFrame, line_key,
-                                Handle<Smi>(Smi::FromInt(line_number + 1)));
-      }
-
-      if (options & StackTrace::kScriptName) {
-        Handle<Object> script_name(script->name(), this);
-        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
-      }
-
-      if (options & StackTrace::kScriptNameOrSourceURL) {
-        Handle<Object> script_name(script->name(), this);
-        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
-        Handle<Object> property = GetProperty(script_wrapper,
-                                              name_or_source_url_key);
-        ASSERT(property->IsJSFunction());
-        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
-        bool caught_exception;
-        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
-                                                   NULL, &caught_exception);
-        if (caught_exception) {
-          result = factory()->undefined_value();
-        }
-        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
-                                result);
-      }
-
-      if (options & StackTrace::kFunctionName) {
-        Handle<Object> fun_name(fun->shared()->name(), this);
-        if (fun_name->ToBoolean()->IsFalse()) {
-          fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
-        }
-        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
-      }
-
-      if (options & StackTrace::kIsEval) {
-        int type = Smi::cast(script->compilation_type())->value();
-        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
-            factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
-      }
-
-      if (options & StackTrace::kIsConstructor) {
-        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
-            factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
-      }
-
-      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
-      frames_seen++;
-    }
-    it.Advance();
-  }
-
-  stack_trace->set_length(Smi::FromInt(frames_seen));
-  return stack_trace;
-}
-
-
-void Isolate::PrintStack() {
-  if (stack_trace_nesting_level_ == 0) {
-    stack_trace_nesting_level_++;
-
-    StringAllocator* allocator;
-    if (preallocated_message_space_ == NULL) {
-      allocator = new HeapStringAllocator();
-    } else {
-      allocator = preallocated_message_space_;
-    }
-
-    StringStream::ClearMentionedObjectCache();
-    StringStream accumulator(allocator);
-    incomplete_message_ = &accumulator;
-    PrintStack(&accumulator);
-    accumulator.OutputToStdOut();
-    InitializeLoggingAndCounters();
-    accumulator.Log();
-    incomplete_message_ = NULL;
-    stack_trace_nesting_level_ = 0;
-    if (preallocated_message_space_ == NULL) {
-      // Remove the HeapStringAllocator created above.
-      delete allocator;
-    }
-  } else if (stack_trace_nesting_level_ == 1) {
-    stack_trace_nesting_level_++;
-    OS::PrintError(
-      "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
-      "If you are lucky you may find a partial stack dump on stdout.\n\n");
-    incomplete_message_->OutputToStdOut();
-  }
-}
-
-
-static void PrintFrames(StringStream* accumulator,
-                        StackFrame::PrintMode mode) {
-  StackFrameIterator it;
-  for (int i = 0; !it.done(); it.Advance()) {
-    it.frame()->Print(accumulator, mode, i++);
-  }
-}
-
-
-void Isolate::PrintStack(StringStream* accumulator) {
-  if (!IsInitialized()) {
-    accumulator->Add(
-        "\n==== Stack trace is not available ==========================\n\n");
-    accumulator->Add(
-        "\n==== Isolate for the thread is not initialized =============\n\n");
-    return;
-  }
-  // The MentionedObjectCache is not GC-proof at the moment.
-  AssertNoAllocation nogc;
-  ASSERT(StringStream::IsMentionedObjectCacheClear());
-
-  // Avoid printing anything if there are no frames.
-  if (c_entry_fp(thread_local_top()) == 0) return;
-
-  accumulator->Add(
-      "\n==== Stack trace ============================================\n\n");
-  PrintFrames(accumulator, StackFrame::OVERVIEW);
-
-  accumulator->Add(
-      "\n==== Details ================================================\n\n");
-  PrintFrames(accumulator, StackFrame::DETAILS);
-
-  accumulator->PrintMentionedObjectCache();
-  accumulator->Add("=====================\n\n");
-}
-
-
-void Isolate::SetFailedAccessCheckCallback(
-    v8::FailedAccessCheckCallback callback) {
-  thread_local_top()->failed_access_check_callback_ = callback;
-}
-
-
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
-  if (!thread_local_top()->failed_access_check_callback_) return;
-
-  ASSERT(receiver->IsAccessCheckNeeded());
-  ASSERT(context());
-
-  // Get the data object from access check info.
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return;
-  Object* data_obj =
-      constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return;
-
-  HandleScope scope;
-  Handle<JSObject> receiver_handle(receiver);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
-  thread_local_top()->failed_access_check_callback_(
-    v8::Utils::ToLocal(receiver_handle),
-    type,
-    v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
-  YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
-                                           JSObject* receiver,
-                                           v8::AccessType type) {
-  // During bootstrapping, callback functions are not enabled yet.
-  if (isolate->bootstrapper()->IsActive()) return YES;
-
-  if (receiver->IsJSGlobalProxy()) {
-    Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
-    if (!receiver_context->IsContext()) return NO;
-
-    // Get the global context of current top context.
-    // avoid using Isolate::global_context() because it uses Handle.
-    Context* global_context = isolate->context()->global()->global_context();
-    if (receiver_context == global_context) return YES;
-
-    if (Context::cast(receiver_context)->security_token() ==
-        global_context->security_token())
-      return YES;
-  }
-
-  return UNKNOWN;
-}
-
-
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
-                             v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
-
-  // The callers of this method are not expecting a GC.
-  AssertNoAllocation no_gc;
-
-  // Skip checks for hidden properties access.  Note, we do not
-  // require existence of a context in this case.
-  if (key == heap_.hidden_symbol()) return true;
-
-  // Check for compatibility between the security tokens in the
-  // current lexical context and the accessed object.
-  ASSERT(context());
-
-  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
-  if (decision != UNKNOWN) return decision == YES;
-
-  // Get named access check callback
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return false;
-
-  Object* data_obj =
-     constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return false;
-
-  Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
-  v8::NamedSecurityCallback callback =
-      v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
-  if (!callback) return false;
-
-  HandleScope scope(this);
-  Handle<JSObject> receiver_handle(receiver, this);
-  Handle<Object> key_handle(key, this);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
-  LOG(this, ApiNamedSecurityCheck(key));
-  bool result = false;
-  {
-    // Leaving JavaScript.
-    VMState state(this, EXTERNAL);
-    result = callback(v8::Utils::ToLocal(receiver_handle),
-                      v8::Utils::ToLocal(key_handle),
-                      type,
-                      v8::Utils::ToLocal(data));
-  }
-  return result;
-}
-
-
-bool Isolate::MayIndexedAccess(JSObject* receiver,
-                               uint32_t index,
-                               v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
-  // Check for compatibility between the security tokens in the
-  // current lexical context and the accessed object.
-  ASSERT(context());
-
-  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
-  if (decision != UNKNOWN) return decision == YES;
-
-  // Get indexed access check callback
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return false;
-
-  Object* data_obj =
-      constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return false;
-
-  Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
-  v8::IndexedSecurityCallback callback =
-      v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
-  if (!callback) return false;
-
-  HandleScope scope(this);
-  Handle<JSObject> receiver_handle(receiver, this);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
-  LOG(this, ApiIndexedSecurityCheck(index));
-  bool result = false;
-  {
-    // Leaving JavaScript.
-    VMState state(this, EXTERNAL);
-    result = callback(v8::Utils::ToLocal(receiver_handle),
-                      index,
-                      type,
-                      v8::Utils::ToLocal(data));
-  }
-  return result;
-}
-
-
-const char* const Isolate::kStackOverflowMessage =
-  "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Isolate::StackOverflow() {
-  HandleScope scope;
-  Handle<String> key = factory()->stack_overflow_symbol();
-  Handle<JSObject> boilerplate =
-      Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
-  Handle<Object> exception = Copy(boilerplate);
-  // TODO(1240995): To avoid having to call JavaScript code to compute
-  // the message for stack overflow exceptions which is very likely to
-  // double fault with another stack overflow exception, we use a
-  // precomputed message.
-  DoThrow(*exception, NULL);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::TerminateExecution() {
-  DoThrow(heap_.termination_exception(), NULL);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
-  DoThrow(exception, location);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
-  bool can_be_caught_externally = false;
-  ShouldReportException(&can_be_caught_externally,
-                        is_catchable_by_javascript(exception));
-  thread_local_top()->catcher_ = can_be_caught_externally ?
-      try_catch_handler() : NULL;
-
-  // Set the exception being re-thrown.
-  set_pending_exception(exception);
-  if (exception->IsFailure()) return exception->ToFailureUnchecked();
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::ThrowIllegalOperation() {
-  return Throw(heap_.illegal_access_symbol());
-}
-
-
-void Isolate::ScheduleThrow(Object* exception) {
-  // When scheduling a throw we first throw the exception to get the
-  // error reporting if it is uncaught before rescheduling it.
-  Throw(exception);
-  thread_local_top()->scheduled_exception_ = pending_exception();
-  thread_local_top()->external_caught_exception_ = false;
-  clear_pending_exception();
-}
-
-
-Failure* Isolate::PromoteScheduledException() {
-  MaybeObject* thrown = scheduled_exception();
-  clear_scheduled_exception();
-  // Re-throw the exception to avoid getting repeated error reporting.
-  return ReThrow(thrown);
-}
-
-
-void Isolate::PrintCurrentStackTrace(FILE* out) {
-  StackTraceFrameIterator it(this);
-  while (!it.done()) {
-    HandleScope scope;
-    // Find code position if recorded in relocation info.
-    JavaScriptFrame* frame = it.frame();
-    int pos = frame->LookupCode()->SourcePosition(frame->pc());
-    Handle<Object> pos_obj(Smi::FromInt(pos));
-    // Fetch function and receiver.
-    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
-    Handle<Object> recv(frame->receiver());
-    // Advance to the next JavaScript frame and determine if the
-    // current frame is the top-level frame.
-    it.Advance();
-    Handle<Object> is_top_level = it.done()
-        ? factory()->true_value()
-        : factory()->false_value();
-    // Generate and print stack trace line.
-    Handle<String> line =
-        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
-    if (line->length() > 0) {
-      line->PrintOn(out);
-      fprintf(out, "\n");
-    }
-  }
-}
-
-
-void Isolate::ComputeLocation(MessageLocation* target) {
-  *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
-  StackTraceFrameIterator it(this);
-  if (!it.done()) {
-    JavaScriptFrame* frame = it.frame();
-    JSFunction* fun = JSFunction::cast(frame->function());
-    Object* script = fun->shared()->script();
-    if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined())) {
-      int pos = frame->LookupCode()->SourcePosition(frame->pc());
-      // Compute the location from the function and the reloc info.
-      Handle<Script> casted_script(Script::cast(script));
-      *target = MessageLocation(casted_script, pos, pos + 1);
-    }
-  }
-}
-
-
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
-                                    bool catchable_by_javascript) {
-  // Find the top-most try-catch handler.
-  StackHandler* handler =
-      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && !handler->is_try_catch()) {
-    handler = handler->next();
-  }
-
-  // Get the address of the external handler so we can compare the address to
-  // determine which one is closer to the top of the stack.
-  Address external_handler_address =
-      thread_local_top()->try_catch_handler_address();
-
-  // The exception has been externally caught if and only if there is
-  // an external handler which is on top of the top-most try-catch
-  // handler.
-  *can_be_caught_externally = external_handler_address != NULL &&
-      (handler == NULL || handler->address() > external_handler_address ||
-       !catchable_by_javascript);
-
-  if (*can_be_caught_externally) {
-    // Only report the exception if the external handler is verbose.
-    return try_catch_handler()->is_verbose_;
-  } else {
-    // Report the exception if it isn't caught by JavaScript code.
-    return handler == NULL;
-  }
-}
-
-
-void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
-  ASSERT(!has_pending_exception());
-
-  HandleScope scope;
-  Object* exception_object = Smi::FromInt(0);
-  bool is_object = exception->ToObject(&exception_object);
-  Handle<Object> exception_handle(exception_object);
-
-  // Determine reporting and whether the exception is caught externally.
-  bool catchable_by_javascript = is_catchable_by_javascript(exception);
-  // Only real objects can be caught by JS.
-  ASSERT(!catchable_by_javascript || is_object);
-  bool can_be_caught_externally = false;
-  bool should_report_exception =
-      ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
-  bool report_exception = catchable_by_javascript && should_report_exception;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Notify debugger of exception.
-  if (catchable_by_javascript) {
-    debugger_->OnException(exception_handle, report_exception);
-  }
-#endif
-
-  // Generate the message.
-  Handle<Object> message_obj;
-  MessageLocation potential_computed_location;
-  bool try_catch_needs_message =
-      can_be_caught_externally &&
-      try_catch_handler()->capture_message_;
-  if (report_exception || try_catch_needs_message) {
-    if (location == NULL) {
-      // If no location was specified we use a computed one instead
-      ComputeLocation(&potential_computed_location);
-      location = &potential_computed_location;
-    }
-    if (!bootstrapper()->IsActive()) {
-      // It's not safe to try to make message objects or collect stack
-      // traces while the bootstrapper is active since the infrastructure
-      // may not have been properly initialized.
-      Handle<String> stack_trace;
-      if (FLAG_trace_exception) stack_trace = StackTraceString();
-      Handle<JSArray> stack_trace_object;
-      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
-          stack_trace_object = CaptureCurrentStackTrace(
-              stack_trace_for_uncaught_exceptions_frame_limit_,
-              stack_trace_for_uncaught_exceptions_options_);
-      }
-      ASSERT(is_object);  // Can't use the handle unless there's a real object.
-      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
-          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
-          stack_trace_object);
-    }
-  }
-
-  // Save the message for reporting if the the exception remains uncaught.
-  thread_local_top()->has_pending_message_ = report_exception;
-  if (!message_obj.is_null()) {
-    thread_local_top()->pending_message_obj_ = *message_obj;
-    if (location != NULL) {
-      thread_local_top()->pending_message_script_ = *location->script();
-      thread_local_top()->pending_message_start_pos_ = location->start_pos();
-      thread_local_top()->pending_message_end_pos_ = location->end_pos();
-    }
-  }
-
-  // Do not forget to clean catcher_ if currently thrown exception cannot
-  // be caught.  If necessary, ReThrow will update the catcher.
-  thread_local_top()->catcher_ = can_be_caught_externally ?
-      try_catch_handler() : NULL;
-
-  // NOTE: Notifying the debugger or generating the message
-  // may have caused new exceptions. For now, we just ignore
-  // that and set the pending exception to the original one.
-  if (is_object) {
-    set_pending_exception(*exception_handle);
-  } else {
-    // Failures are not on the heap so they neither need nor work with handles.
-    ASSERT(exception_handle->IsFailure());
-    set_pending_exception(exception);
-  }
-}
-
-
-bool Isolate::IsExternallyCaught() {
-  ASSERT(has_pending_exception());
-
-  if ((thread_local_top()->catcher_ == NULL) ||
-      (try_catch_handler() != thread_local_top()->catcher_)) {
-    // When throwing the exception, we found no v8::TryCatch
-    // which should care about this exception.
-    return false;
-  }
-
-  if (!is_catchable_by_javascript(pending_exception())) {
-    return true;
-  }
-
-  // Get the address of the external handler so we can compare the address to
-  // determine which one is closer to the top of the stack.
-  Address external_handler_address =
-      thread_local_top()->try_catch_handler_address();
-  ASSERT(external_handler_address != NULL);
-
-  // The exception has been externally caught if and only if there is
-  // an external handler which is on top of the top-most try-finally
-  // handler.
-  // There should be no try-catch blocks as they would prohibit us from
-  // finding external catcher in the first place (see catcher_ check above).
-  //
-  // Note, that finally clause would rethrow an exception unless it's
-  // aborted by jumps in control flow like return, break, etc. and we'll
-  // have another chances to set proper v8::TryCatch.
-  StackHandler* handler =
-      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && handler->address() < external_handler_address) {
-    ASSERT(!handler->is_try_catch());
-    if (handler->is_try_finally()) return false;
-
-    handler = handler->next();
-  }
-
-  return true;
-}
-
-
-void Isolate::ReportPendingMessages() {
-  ASSERT(has_pending_exception());
-  PropagatePendingExceptionToExternalTryCatch();
-
-  // If the pending exception is OutOfMemoryException set out_of_memory in
-  // the global context.  Note: We have to mark the global context here
-  // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
-  // set it.
-  HandleScope scope;
-  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
-    context()->mark_out_of_memory();
-  } else if (thread_local_top_.pending_exception_ ==
-             heap()->termination_exception()) {
-    // Do nothing: if needed, the exception has been already propagated to
-    // v8::TryCatch.
-  } else {
-    if (thread_local_top_.has_pending_message_) {
-      thread_local_top_.has_pending_message_ = false;
-      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
-        HandleScope scope;
-        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
-        if (thread_local_top_.pending_message_script_ != NULL) {
-          Handle<Script> script(thread_local_top_.pending_message_script_);
-          int start_pos = thread_local_top_.pending_message_start_pos_;
-          int end_pos = thread_local_top_.pending_message_end_pos_;
-          MessageLocation location(script, start_pos, end_pos);
-          MessageHandler::ReportMessage(this, &location, message_obj);
-        } else {
-          MessageHandler::ReportMessage(this, NULL, message_obj);
-        }
-      }
-    }
-  }
-  clear_pending_message();
-}
-
-
-void Isolate::TraceException(bool flag) {
-  FLAG_trace_exception = flag;  // TODO(isolates): This is an unfortunate use.
-}
-
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
-  ASSERT(has_pending_exception());
-  PropagatePendingExceptionToExternalTryCatch();
-
-  // Allways reschedule out of memory exceptions.
-  if (!is_out_of_memory()) {
-    bool is_termination_exception =
-        pending_exception() == heap_.termination_exception();
-
-    // Do not reschedule the exception if this is the bottom call.
-    bool clear_exception = is_bottom_call;
-
-    if (is_termination_exception) {
-      if (is_bottom_call) {
-        thread_local_top()->external_caught_exception_ = false;
-        clear_pending_exception();
-        return false;
-      }
-    } else if (thread_local_top()->external_caught_exception_) {
-      // If the exception is externally caught, clear it if there are no
-      // JavaScript frames on the way to the C++ frame that has the
-      // external handler.
-      ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
-      Address external_handler_address =
-          thread_local_top()->try_catch_handler_address();
-      JavaScriptFrameIterator it;
-      if (it.done() || (it.frame()->sp() > external_handler_address)) {
-        clear_exception = true;
-      }
-    }
-
-    // Clear the exception if needed.
-    if (clear_exception) {
-      thread_local_top()->external_caught_exception_ = false;
-      clear_pending_exception();
-      return false;
-    }
-  }
-
-  // Reschedule the exception.
-  thread_local_top()->scheduled_exception_ = pending_exception();
-  clear_pending_exception();
-  return true;
-}
-
-
-void Isolate::SetCaptureStackTraceForUncaughtExceptions(
-      bool capture,
-      int frame_limit,
-      StackTrace::StackTraceOptions options) {
-  capture_stack_trace_for_uncaught_exceptions_ = capture;
-  stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
-  stack_trace_for_uncaught_exceptions_options_ = options;
-}
-
-
-bool Isolate::is_out_of_memory() {
-  if (has_pending_exception()) {
-    MaybeObject* e = pending_exception();
-    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
-      return true;
-    }
-  }
-  if (has_scheduled_exception()) {
-    MaybeObject* e = scheduled_exception();
-    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-Handle<Context> Isolate::global_context() {
-  GlobalObject* global = thread_local_top()->context_->global();
-  return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Isolate::GetCallingGlobalContext() {
-  JavaScriptFrameIterator it;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  if (debug_->InDebugger()) {
-    while (!it.done()) {
-      JavaScriptFrame* frame = it.frame();
-      Context* context = Context::cast(frame->context());
-      if (context->global_context() == *debug_->debug_context()) {
-        it.Advance();
-      } else {
-        break;
-      }
-    }
-  }
-#endif  // ENABLE_DEBUGGER_SUPPORT
-  if (it.done()) return Handle<Context>::null();
-  JavaScriptFrame* frame = it.frame();
-  Context* context = Context::cast(frame->context());
-  return Handle<Context>(context->global_context());
-}
-
-
-char* Isolate::ArchiveThread(char* to) {
-  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
-    RuntimeProfiler::IsolateExitedJS(this);
-  }
-  memcpy(to, reinterpret_cast<char*>(thread_local_top()),
-         sizeof(ThreadLocalTop));
-  InitializeThreadLocal();
-  return to + sizeof(ThreadLocalTop);
-}
-
-
-char* Isolate::RestoreThread(char* from) {
-  memcpy(reinterpret_cast<char*>(thread_local_top()), from,
-         sizeof(ThreadLocalTop));
-  // This might be just paranoia, but it seems to be needed in case a
-  // thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
-  thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
-  thread_local_top()->simulator_ = Simulator::current(this);
-#endif
-#endif
-  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
-    RuntimeProfiler::IsolateEnteredJS(this);
-  }
-  return from + sizeof(ThreadLocalTop);
-}
-
-} }  // namespace v8::internal
diff --git a/src/type-info.cc b/src/type-info.cc
index 4069c83..4df7ece 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "code-stubs.h"
 #include "compiler.h"
 #include "ic.h"
 #include "macro-assembler.h"
@@ -58,132 +59,150 @@
 }
 
 
-STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
-
-
 TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
                                        Handle<Context> global_context) {
   global_context_ = global_context;
-  PopulateMap(code);
+  BuildDictionary(code);
   ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
 }
 
 
-Handle<Object> TypeFeedbackOracle::GetInfo(int pos) {
-  int entry = dictionary_->FindEntry(pos);
-  return entry != NumberDictionary::kNotFound
+Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
+  int entry = dictionary_->FindEntry(ast_id);
+  return entry != UnseededNumberDictionary::kNotFound
       ? Handle<Object>(dictionary_->ValueAt(entry))
       : Isolate::Current()->factory()->undefined_value();
 }
 
 
-bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->position()));
+bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return code->kind() == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC &&
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_keyed_load_stub() &&
+        code->ic_state() == MONOMORPHIC &&
+        Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
         code->FindFirstMap() != NULL;
   }
   return false;
 }
 
 
-bool TypeFeedbackOracle::StoreIsMonomorphic(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->position()));
+bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
+  Handle<Object> map_or_code(GetInfo(expr->id()));
+  if (map_or_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    Builtins* builtins = Isolate::Current()->builtins();
+    return code->is_keyed_load_stub() &&
+        *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
+        code->ic_state() == MEGAMORPHIC;
+  }
+  return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return code->kind() == Code::KEYED_EXTERNAL_ARRAY_STORE_IC &&
-        code->FindFirstMap() != NULL;
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_keyed_store_stub() &&
+        code->ic_state() == MONOMORPHIC &&
+        Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
+  }
+  return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
+  Handle<Object> map_or_code(GetInfo(expr->id()));
+  if (map_or_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    Builtins* builtins = Isolate::Current()->builtins();
+    return code->is_keyed_store_stub() &&
+        *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
+        *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
+        code->ic_state() == MEGAMORPHIC;
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
-  Handle<Object> value = GetInfo(expr->position());
+  Handle<Object> value = GetInfo(expr->id());
   return value->IsMap() || value->IsSmi();
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
-  ASSERT(LoadIsMonomorphic(expr));
-  Handle<Object> map_or_code(
-      Handle<HeapObject>::cast(GetInfo(expr->position())));
+  ASSERT(LoadIsMonomorphicNormal(expr));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return Handle<Map>(code->FindFirstMap());
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    Map* first_map = code->FindFirstMap();
+    ASSERT(first_map != NULL);
+    return Handle<Map>(first_map);
   }
-  return Handle<Map>(Map::cast(*map_or_code));
+  return Handle<Map>::cast(map_or_code);
 }
 
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
-  ASSERT(StoreIsMonomorphic(expr));
-  Handle<HeapObject> map_or_code(
-      Handle<HeapObject>::cast(GetInfo(expr->position())));
+  ASSERT(StoreIsMonomorphicNormal(expr));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
     return Handle<Map>(code->FindFirstMap());
   }
-  return Handle<Map>(Map::cast(*map_or_code));
+  return Handle<Map>::cast(map_or_code);
 }
 
 
-ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
-                                                   Handle<String> name) {
+void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
+                                           Handle<String> name,
+                                           SmallMapList* types) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  CollectReceiverTypes(expr->id(), name, flags, types);
 }
 
 
-ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
-                                                    Handle<String> name) {
+void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
+                                            Handle<String> name,
+                                            SmallMapList* types) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  CollectReceiverTypes(expr->id(), name, flags, types);
 }
 
 
-ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
-                                                   Handle<String> name) {
+void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
+                                           Handle<String> name,
+                                           CallKind call_kind,
+                                           SmallMapList* types) {
   int arity = expr->arguments()->length();
-  // Note: these flags won't let us get maps from stubs with
-  // non-default extra ic state in the megamorphic case. In the more
-  // important monomorphic case the map is obtained directly, so it's
-  // not a problem until we decide to emit more polymorphic code.
+
+  // Note: Currently we do not take string extra ic data into account
+  // here.
+  Code::ExtraICState extra_ic_state =
+      CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
+
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
                                                     NORMAL,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     OWN_MAP,
-                                                    NOT_IN_LOOP,
                                                     arity);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  CollectReceiverTypes(expr->id(), name, flags, types);
 }
 
 
 CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
-  Handle<Object> value = GetInfo(expr->position());
+  Handle<Object> value = GetInfo(expr->id());
   if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
   CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
   ASSERT(check != RECEIVER_MAP_CHECK);
   return check;
 }
 
-ExternalArrayType TypeFeedbackOracle::GetKeyedLoadExternalArrayType(
-    Property* expr) {
-  Handle<Object> stub = GetInfo(expr->position());
-  ASSERT(stub->IsCode());
-  return Code::cast(*stub)->external_array_type();
-}
-
-ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
-    Expression* expr) {
-  Handle<Object> stub = GetInfo(expr->position());
-  ASSERT(stub->IsCode());
-  return Code::cast(*stub)->external_array_type();
-}
-
 Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
     CheckType check) {
   JSFunction* function = NULL;
@@ -207,13 +226,13 @@
 
 
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
-  return *GetInfo(expr->position()) ==
+  return *GetInfo(expr->id()) ==
       Isolate::Current()->builtins()->builtin(id);
 }
 
 
 TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
-  Handle<Object> object = GetInfo(expr->position());
+  Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
@@ -223,12 +242,14 @@
   switch (state) {
     case CompareIC::UNINITIALIZED:
       // Uninitialized means never executed.
-      // TODO(fschneider): Introduce a separate value for never-executed ICs.
-      return unknown;
+      return TypeInfo::Uninitialized();
     case CompareIC::SMIS:
       return TypeInfo::Smi();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
+    case CompareIC::SYMBOLS:
+    case CompareIC::STRINGS:
+      return TypeInfo::String();
     case CompareIC::OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
@@ -239,44 +260,74 @@
 }
 
 
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
-  Handle<Object> object = GetInfo(expr->position());
+bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  if (!object->IsCode()) return false;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return false;
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  return state == CompareIC::SYMBOLS;
+}
+
+
+TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
-  if (code->is_type_recording_binary_op_stub()) {
-    TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
-        code->type_recording_binary_op_type());
-    TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
-        code->type_recording_binary_op_result_type());
+  ASSERT(code->is_unary_op_stub());
+  UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
+      code->unary_op_type());
+  switch (type) {
+    case UnaryOpIC::SMI:
+      return TypeInfo::Smi();
+    case UnaryOpIC::HEAP_NUMBER:
+      return TypeInfo::Double();
+    default:
+      return unknown;
+  }
+}
+
+
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (code->is_binary_op_stub()) {
+    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_type());
+    BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_result_type());
 
     switch (type) {
-      case TRBinaryOpIC::UNINITIALIZED:
+      case BinaryOpIC::UNINITIALIZED:
         // Uninitialized means never executed.
-        // TODO(fschneider): Introduce a separate value for never-executed ICs
-        return unknown;
-      case TRBinaryOpIC::SMI:
+        return TypeInfo::Uninitialized();
+      case BinaryOpIC::SMI:
         switch (result_type) {
-          case TRBinaryOpIC::UNINITIALIZED:
-          case TRBinaryOpIC::SMI:
+          case BinaryOpIC::UNINITIALIZED:
+          case BinaryOpIC::SMI:
             return TypeInfo::Smi();
-          case TRBinaryOpIC::INT32:
+          case BinaryOpIC::INT32:
             return TypeInfo::Integer32();
-          case TRBinaryOpIC::HEAP_NUMBER:
+          case BinaryOpIC::HEAP_NUMBER:
             return TypeInfo::Double();
           default:
             return unknown;
         }
-      case TRBinaryOpIC::INT32:
+      case BinaryOpIC::INT32:
         if (expr->op() == Token::DIV ||
-            result_type == TRBinaryOpIC::HEAP_NUMBER) {
+            result_type == BinaryOpIC::HEAP_NUMBER) {
           return TypeInfo::Double();
         }
         return TypeInfo::Integer32();
-      case TRBinaryOpIC::HEAP_NUMBER:
+      case BinaryOpIC::HEAP_NUMBER:
         return TypeInfo::Double();
-      case TRBinaryOpIC::STRING:
-      case TRBinaryOpIC::GENERIC:
+      case BinaryOpIC::BOTH_STRING:
+        return TypeInfo::String();
+      case BinaryOpIC::STRING:
+      case BinaryOpIC::GENERIC:
         return unknown;
      default:
         return unknown;
@@ -287,7 +338,7 @@
 
 
 TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
-  Handle<Object> object = GetInfo(clause->position());
+  Handle<Object> object = GetInfo(clause->CompareId());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
@@ -313,142 +364,187 @@
 }
 
 
-ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
-                                                      Handle<String> name,
-                                                      Code::Flags flags) {
+TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+  Handle<Object> object = GetInfo(expr->CountId());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_binary_op_stub()) return unknown;
+
+  BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+      code->binary_op_type());
+  switch (type) {
+    case BinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::SMI:
+      return TypeInfo::Smi();
+    case BinaryOpIC::INT32:
+      return TypeInfo::Integer32();
+    case BinaryOpIC::HEAP_NUMBER:
+      return TypeInfo::Double();
+    case BinaryOpIC::BOTH_STRING:
+    case BinaryOpIC::STRING:
+    case BinaryOpIC::GENERIC:
+      return unknown;
+    default:
+      return unknown;
+  }
+  UNREACHABLE();
+  return unknown;
+}
+
+
+void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
+                                              Handle<String> name,
+                                              Code::Flags flags,
+                                              SmallMapList* types) {
   Isolate* isolate = Isolate::Current();
-  Handle<Object> object = GetInfo(position);
-  if (object->IsUndefined() || object->IsSmi()) return NULL;
+  Handle<Object> object = GetInfo(ast_id);
+  if (object->IsUndefined() || object->IsSmi()) return;
 
   if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
     // TODO(fschneider): We could collect the maps and signal that
     // we need a generic store (or load) here.
     ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
-    return NULL;
   } else if (object->IsMap()) {
-    ZoneMapList* types = new ZoneMapList(1);
     types->Add(Handle<Map>::cast(object));
-    return types;
   } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
-    ZoneMapList* types = new ZoneMapList(4);
+    types->Reserve(4);
     ASSERT(object->IsCode());
     isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
-    return types->length() > 0 ? types : NULL;
-  } else {
-    return NULL;
   }
 }
 
 
-void TypeFeedbackOracle::SetInfo(int position, Object* target) {
-  MaybeObject* maybe_result = dictionary_->AtNumberPut(position, target);
-  USE(maybe_result);
-#ifdef DEBUG
-  Object* result;
-  // Dictionary has been allocated with sufficient size for all elements.
-  ASSERT(maybe_result->ToObject(&result));
-  ASSERT(*dictionary_ == result);
-#endif
-}
-
-
-void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
-  Isolate* isolate = Isolate::Current();
-  HandleScope scope(isolate);
-
-  const int kInitialCapacity = 16;
-  List<int> code_positions(kInitialCapacity);
-  List<int> source_positions(kInitialCapacity);
-  CollectPositions(*code, &code_positions, &source_positions);
-
-  ASSERT(dictionary_.is_null());  // Only initialize once.
-  dictionary_ = isolate->factory()->NewNumberDictionary(
-      code_positions.length());
-
-  int length = code_positions.length();
-  ASSERT(source_positions.length() == length);
-  for (int i = 0; i < length; i++) {
+void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
+                                                   SmallMapList* types) {
+  Handle<Object> object = GetInfo(ast_id);
+  if (!object->IsCode()) return;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (code->kind() == Code::KEYED_LOAD_IC ||
+      code->kind() == Code::KEYED_STORE_IC) {
     AssertNoAllocation no_allocation;
-    RelocInfo info(code->instruction_start() + code_positions[i],
-                   RelocInfo::CODE_TARGET, 0);
-    Code* target = Code::GetCodeFromTargetAddress(info.target_address());
-    int position = source_positions[i];
-    InlineCacheState state = target->ic_state();
-    Code::Kind kind = target->kind();
-
-    if (kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
-        kind == Code::COMPARE_IC) {
-      // TODO(kasperl): Avoid having multiple ICs with the same
-      // position by making sure that we have position information
-      // recorded for all binary ICs.
-      int entry = dictionary_->FindEntry(position);
-      if (entry == NumberDictionary::kNotFound) {
-        SetInfo(position, target);
+    int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+    for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+      RelocInfo* info = it.rinfo();
+      Object* object = info->target_object();
+      if (object->IsMap()) {
+        types->Add(Handle<Map>(Map::cast(object)));
       }
-    } else if (state == MONOMORPHIC) {
-      if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
-          kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
-        SetInfo(position, target);
-      } else if (target->kind() != Code::CALL_IC ||
-          target->check_type() == RECEIVER_MAP_CHECK) {
-        Map* map = target->FindFirstMap();
-        if (map == NULL) {
-          SetInfo(position, target);
-        } else {
-          SetInfo(position, map);
-        }
-      } else {
-        ASSERT(target->kind() == Code::CALL_IC);
-        CheckType check = target->check_type();
-        ASSERT(check != RECEIVER_MAP_CHECK);
-        SetInfo(position, Smi::FromInt(check));
-      }
-    } else if (state == MEGAMORPHIC) {
-      SetInfo(position, target);
     }
   }
+}
+
+
+byte TypeFeedbackOracle::ToBooleanTypes(unsigned ast_id) {
+  Handle<Object> object = GetInfo(ast_id);
+  return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
+}
+
+
+// Things are a bit tricky here: The iterator for the RelocInfos and the infos
+// themselves are not GC-safe, so we first get all infos, then we create the
+// dictionary (possibly triggering GC), and finally we relocate the collected
+// infos before we process them.
+void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
+  AssertNoAllocation no_allocation;
+  ZoneList<RelocInfo> infos(16);
+  HandleScope scope;
+  GetRelocInfos(code, &infos);
+  CreateDictionary(code, &infos);
+  ProcessRelocInfos(&infos);
   // Allocate handle in the parent scope.
   dictionary_ = scope.CloseAndEscape(dictionary_);
 }
 
 
-void TypeFeedbackOracle::CollectPositions(Code* code,
-                                          List<int>* code_positions,
-                                          List<int>* source_positions) {
-  AssertNoAllocation no_allocation;
-  int position = 0;
-  // Because the ICs we use for global variables access in the full
-  // code generator do not have any meaningful positions, we avoid
-  // collecting those by filtering out contextual code targets.
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
-      RelocInfo::kPositionMask;
-  for (RelocIterator it(code, mask); !it.done(); it.next()) {
-    RelocInfo* info = it.rinfo();
-    RelocInfo::Mode mode = info->rmode();
-    if (RelocInfo::IsCodeTarget(mode)) {
-      Code* target = Code::GetCodeFromTargetAddress(info->target_address());
-      if (target->is_inline_cache_stub()) {
-        InlineCacheState state = target->ic_state();
-        Code::Kind kind = target->kind();
-        if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
-          if (target->type_recording_binary_op_type() ==
-              TRBinaryOpIC::GENERIC) {
-            continue;
-          }
-        } else if (kind == Code::COMPARE_IC) {
-          if (target->compare_state() == CompareIC::GENERIC) continue;
-        } else {
-          if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
-        }
-        code_positions->Add(
-            static_cast<int>(info->pc() - code->instruction_start()));
-        source_positions->Add(position);
-      }
-    } else {
-      ASSERT(RelocInfo::IsPosition(mode));
-      position = static_cast<int>(info->data());
-    }
+void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
+                                       ZoneList<RelocInfo>* infos) {
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+  for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+    infos->Add(*it.rinfo());
   }
 }
 
+
+void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
+                                          ZoneList<RelocInfo>* infos) {
+  DisableAssertNoAllocation allocation_allowed;
+  byte* old_start = code->instruction_start();
+  dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
+  byte* new_start = code->instruction_start();
+  RelocateRelocInfos(infos, old_start, new_start);
+}
+
+
+void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+                                            byte* old_start,
+                                            byte* new_start) {
+  for (int i = 0; i < infos->length(); i++) {
+    RelocInfo* info = &(*infos)[i];
+    info->set_pc(new_start + (info->pc() - old_start));
+  }
+}
+
+
+void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
+  for (int i = 0; i < infos->length(); i++) {
+    unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
+    Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
+    ProcessTarget(ast_id, target);
+  }
+}
+
+
+void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
+  switch (target->kind()) {
+    case Code::LOAD_IC:
+    case Code::STORE_IC:
+    case Code::CALL_IC:
+    case Code::KEYED_CALL_IC:
+      if (target->ic_state() == MONOMORPHIC) {
+        if (target->kind() == Code::CALL_IC &&
+            target->check_type() != RECEIVER_MAP_CHECK) {
+          SetInfo(ast_id,  Smi::FromInt(target->check_type()));
+        } else {
+          Object* map = target->FindFirstMap();
+          SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+        }
+      } else if (target->ic_state() == MEGAMORPHIC) {
+        SetInfo(ast_id, target);
+      }
+      break;
+
+    case Code::KEYED_LOAD_IC:
+    case Code::KEYED_STORE_IC:
+      if (target->ic_state() == MONOMORPHIC ||
+          target->ic_state() == MEGAMORPHIC) {
+        SetInfo(ast_id, target);
+      }
+      break;
+
+    case Code::UNARY_OP_IC:
+    case Code::BINARY_OP_IC:
+    case Code::COMPARE_IC:
+    case Code::TO_BOOLEAN_IC:
+      SetInfo(ast_id, target);
+      break;
+
+    default:
+      break;
+  }
+}
+
+
+void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
+  ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
+  MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
+  USE(maybe_result);
+#ifdef DEBUG
+  Object* result = NULL;
+  // Dictionary has been allocated with sufficient size for all elements.
+  ASSERT(maybe_result->ToObject(&result));
+  ASSERT(*dictionary_ == result);
+#endif
+}
+
 } }  // namespace v8::internal
diff --git a/src/type-info.h b/src/type-info.h
index f6e6729..a031740 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -28,26 +28,28 @@
 #ifndef V8_TYPE_INFO_H_
 #define V8_TYPE_INFO_H_
 
+#include "allocation.h"
 #include "globals.h"
-#include "zone.h"
 #include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
+const int kMaxKeyedPolymorphism = 4;
+
 //         Unknown
-//           |   |
-//           |   \--------------|
-//      Primitive             Non-primitive
-//           |   \--------|     |
-//         Number      String   |
-//         /    |         |     |
-//    Double  Integer32   |    /
-//        |      |       /    /
-//        |     Smi     /    /
-//        |      |     /    /
-//        |      |    /    /
-//        Uninitialized.--/
+//           |   \____________
+//           |                |
+//      Primitive       Non-primitive
+//           |   \_______     |
+//           |           |    |
+//        Number       String |
+//         /   \         |    |
+//    Double  Integer32  |   /
+//        |      |      /   /
+//        |     Smi    /   /
+//        |      |    / __/
+//        Uninitialized.
 
 class TypeInfo {
  public:
@@ -71,32 +73,6 @@
   // We haven't started collecting info yet.
   static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
 
-  // Return compact representation.  Very sensitive to enum values below!
-  // Compacting drops information about primitive types and strings types.
-  // We use the compact representation when we only care about number types.
-  int ThreeBitRepresentation() {
-    ASSERT(type_ != kUninitialized);
-    int answer = type_ & 0xf;
-    answer = answer > 6 ? answer - 2 : answer;
-    ASSERT(answer >= 0);
-    ASSERT(answer <= 7);
-    return answer;
-  }
-
-  // Decode compact representation.  Very sensitive to enum values below!
-  static TypeInfo ExpandedRepresentation(int three_bit_representation) {
-    Type t = static_cast<Type>(three_bit_representation > 4 ?
-                               three_bit_representation + 2 :
-                               three_bit_representation);
-    t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
-    ASSERT(t == kUnknown ||
-           t == kNumber ||
-           t == kInteger32 ||
-           t == kSmi ||
-           t == kDouble);
-    return TypeInfo(t);
-  }
-
   int ToInt() {
     return type_;
   }
@@ -229,58 +205,82 @@
 class Assignment;
 class BinaryOperation;
 class Call;
+class CaseClause;
 class CompareOperation;
 class CompilationInfo;
+class CountOperation;
 class Property;
-class CaseClause;
+class SmallMapList;
+class UnaryOperation;
+
 
 class TypeFeedbackOracle BASE_EMBEDDED {
  public:
   TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
-  bool LoadIsMonomorphic(Property* expr);
-  bool StoreIsMonomorphic(Expression* expr);
+  bool LoadIsMonomorphicNormal(Property* expr);
+  bool LoadIsMegamorphicWithTypeInfo(Property* expr);
+  bool StoreIsMonomorphicNormal(Expression* expr);
+  bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
   bool CallIsMonomorphic(Call* expr);
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
   Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
 
-  ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
-  ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
-  ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
-
-  ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
-  ExternalArrayType GetKeyedStoreExternalArrayType(Expression* expr);
+  void LoadReceiverTypes(Property* expr,
+                         Handle<String> name,
+                         SmallMapList* types);
+  void StoreReceiverTypes(Assignment* expr,
+                          Handle<String> name,
+                          SmallMapList* types);
+  void CallReceiverTypes(Call* expr,
+                         Handle<String> name,
+                         CallKind call_kind,
+                         SmallMapList* types);
+  void CollectKeyedReceiverTypes(unsigned ast_id,
+                                 SmallMapList* types);
 
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
 
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
+  // TODO(1571) We can't use ToBooleanStub::Types as the return value because
+  // of various cylces in our headers. Death to tons of implementations in
+  // headers!! :-P
+  byte ToBooleanTypes(unsigned ast_id);
+
   // Get type information for arithmetic operations and compares.
+  TypeInfo UnaryType(UnaryOperation* expr);
   TypeInfo BinaryType(BinaryOperation* expr);
   TypeInfo CompareType(CompareOperation* expr);
+  bool IsSymbolCompare(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
+  TypeInfo IncrementType(CountOperation* expr);
 
  private:
-  ZoneMapList* CollectReceiverTypes(int position,
-                                    Handle<String> name,
-                                    Code::Flags flags);
+  void CollectReceiverTypes(unsigned ast_id,
+                            Handle<String> name,
+                            Code::Flags flags,
+                            SmallMapList* types);
 
-  void SetInfo(int position, Object* target);
+  void SetInfo(unsigned ast_id, Object* target);
 
-  void PopulateMap(Handle<Code> code);
-
-  void CollectPositions(Code* code,
-                        List<int>* code_positions,
-                        List<int>* source_positions);
+  void BuildDictionary(Handle<Code> code);
+  void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
+  void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
+  void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+                          byte* old_start,
+                          byte* new_start);
+  void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
+  void ProcessTarget(unsigned ast_id, Code* target);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
-  Handle<Object> GetInfo(int pos);
+  Handle<Object> GetInfo(unsigned ast_id);
 
   Handle<Context> global_context_;
-  Handle<NumberDictionary> dictionary_;
+  Handle<UnseededNumberDictionary> dictionary_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
 };
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
index 443d5ce..59a426b 100644
--- a/src/unbound-queue.h
+++ b/src/unbound-queue.h
@@ -28,6 +28,8 @@
 #ifndef V8_UNBOUND_QUEUE_
 #define V8_UNBOUND_QUEUE_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/uri.js b/src/uri.js
index e94b3fe..c910d75 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -166,7 +166,10 @@
 // ECMA-262, section 15.1.3
 function Encode(uri, unescape) {
   var uriLength = uri.length;
-  var result = new $Array(uriLength);
+  // We are going to pass result to %StringFromCharCodeArray
+  // which does not expect any getters/setters installed
+  // on the incoming array.
+  var result = new InternalArray(uriLength);
   var index = 0;
   for (var k = 0; k < uriLength; k++) {
     var cc1 = uri.charCodeAt(k);
@@ -192,7 +195,10 @@
 // ECMA-262, section 15.1.3
 function Decode(uri, reserved) {
   var uriLength = uri.length;
-  var result = new $Array(uriLength);
+  // We are going to pass result to %StringFromCharCodeArray
+  // which does not expect any getters/setters installed
+  // on the incoming array.
+  var result = new InternalArray(uriLength);
   var index = 0;
   for (var k = 0; k < uriLength; k++) {
     var ch = uri.charAt(k);
@@ -386,8 +392,9 @@
 
 // -------------------------------------------------------------------
 
-function SetupURI() {
-  // Setup non-enumerable URI functions on the global object and set
+function SetUpUri() {
+  %CheckIsBootstrapping();
+  // Set up non-enumerable URI functions on the global object and set
   // their names.
   InstallFunctions(global, DONT_ENUM, $Array(
     "escape", URIEscape,
@@ -399,4 +406,4 @@
   ));
 }
 
-SetupURI();
+SetUpUri();
diff --git a/src/frame-element.cc b/src/utils-inl.h
similarity index 79%
copy from src/frame-element.cc
copy to src/utils-inl.h
index f629900..76a3c10 100644
--- a/src/frame-element.cc
+++ b/src/utils-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_UTILS_INL_H_
+#define V8_UTILS_INL_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "list-inl.h"
 
 namespace v8 {
 namespace internal {
 
+template<typename T, int growth_factor, int max_growth>
+void Collector<T, growth_factor, max_growth>::Reset() {
+  for (int i = chunks_.length() - 1; i >= 0; i--) {
+    chunks_.at(i).Dispose();
+  }
+  chunks_.Rewind(0);
+  index_ = 0;
+  size_ = 0;
+}
 
 } }  // namespace v8::internal
+
+#endif  // V8_UTILS_INL_H_
diff --git a/src/utils.cc b/src/utils.cc
index b466301..89ef4c6 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,211 +26,26 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-#include "sys/stat.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
 
-void PrintF(const char* format, ...) {
-  va_list arguments;
-  va_start(arguments, format);
-  OS::VPrint(format, arguments);
-  va_end(arguments);
-}
-
-
-void PrintF(FILE* out, const char* format, ...) {
-  va_list arguments;
-  va_start(arguments, format);
-  OS::VFPrint(out, format, arguments);
-  va_end(arguments);
-}
-
-
-void Flush(FILE* out) {
-  fflush(out);
-}
-
-
-char* ReadLine(const char* prompt) {
-  char* result = NULL;
-  char line_buf[256];
-  int offset = 0;
-  bool keep_going = true;
-  fprintf(stdout, "%s", prompt);
-  fflush(stdout);
-  while (keep_going) {
-    if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
-      // fgets got an error. Just give up.
-      if (result != NULL) {
-        DeleteArray(result);
-      }
-      return NULL;
-    }
-    int len = StrLength(line_buf);
-    if (len > 1 &&
-        line_buf[len - 2] == '\\' &&
-        line_buf[len - 1] == '\n') {
-      // When we read a line that ends with a "\" we remove the escape and
-      // append the remainder.
-      line_buf[len - 2] = '\n';
-      line_buf[len - 1] = 0;
-      len -= 1;
-    } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
-      // Since we read a new line we are done reading the line. This
-      // will exit the loop after copying this buffer into the result.
-      keep_going = false;
-    }
-    if (result == NULL) {
-      // Allocate the initial result and make room for the terminating '\0'
-      result = NewArray<char>(len + 1);
-    } else {
-      // Allocate a new result with enough room for the new addition.
-      int new_len = offset + len + 1;
-      char* new_result = NewArray<char>(new_len);
-      // Copy the existing input into the new array and set the new
-      // array as the result.
-      memcpy(new_result, result, offset * kCharSize);
-      DeleteArray(result);
-      result = new_result;
-    }
-    // Copy the newly read line into the result.
-    memcpy(result + offset, line_buf, len * kCharSize);
-    offset += len;
-  }
-  ASSERT(result != NULL);
-  result[offset] = '\0';
-  return result;
-}
-
-
-char* ReadCharsFromFile(const char* filename,
-                        int* size,
-                        int extra_space,
-                        bool verbose) {
-  FILE* file = OS::FOpen(filename, "rb");
-  if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
-    if (verbose) {
-      OS::PrintError("Cannot read from file %s.\n", filename);
-    }
-    return NULL;
-  }
-
-  // Get the size of the file and rewind it.
-  *size = ftell(file);
-  rewind(file);
-
-  char* result = NewArray<char>(*size + extra_space);
-  for (int i = 0; i < *size;) {
-    int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
-    if (read <= 0) {
-      fclose(file);
-      DeleteArray(result);
-      return NULL;
-    }
-    i += read;
-  }
-  fclose(file);
-  return result;
-}
-
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
-  char* chars = ReadCharsFromFile(filename, size, 0, verbose);
-  return reinterpret_cast<byte*>(chars);
-}
-
-
-Vector<const char> ReadFile(const char* filename,
-                            bool* exists,
-                            bool verbose) {
-  int size;
-  char* result = ReadCharsFromFile(filename, &size, 1, verbose);
-  if (!result) {
-    *exists = false;
-    return Vector<const char>::empty();
-  }
-  result[size] = '\0';
-  *exists = true;
-  return Vector<const char>(result, size);
-}
-
-
-int WriteCharsToFile(const char* str, int size, FILE* f) {
-  int total = 0;
-  while (total < size) {
-    int write = static_cast<int>(fwrite(str, 1, size - total, f));
-    if (write == 0) {
-      return total;
-    }
-    total += write;
-    str += write;
-  }
-  return total;
-}
-
-
-int AppendChars(const char* filename,
-                const char* str,
-                int size,
-                bool verbose) {
-  FILE* f = OS::FOpen(filename, "ab");
-  if (f == NULL) {
-    if (verbose) {
-      OS::PrintError("Cannot open file %s for writing.\n", filename);
-    }
-    return 0;
-  }
-  int written = WriteCharsToFile(str, size, f);
-  fclose(f);
-  return written;
-}
-
-
-int WriteChars(const char* filename,
-               const char* str,
-               int size,
-               bool verbose) {
-  FILE* f = OS::FOpen(filename, "wb");
-  if (f == NULL) {
-    if (verbose) {
-      OS::PrintError("Cannot open file %s for writing.\n", filename);
-    }
-    return 0;
-  }
-  int written = WriteCharsToFile(str, size, f);
-  fclose(f);
-  return written;
-}
-
-
-int WriteBytes(const char* filename,
-               const byte* bytes,
-               int size,
-               bool verbose) {
-  const char* str = reinterpret_cast<const char*>(bytes);
-  return WriteChars(filename, str, size, verbose);
-}
-
-
-StringBuilder::StringBuilder(int size) {
+SimpleStringBuilder::SimpleStringBuilder(int size) {
   buffer_ = Vector<char>::New(size);
   position_ = 0;
 }
 
 
-void StringBuilder::AddString(const char* s) {
+void SimpleStringBuilder::AddString(const char* s) {
   AddSubstring(s, StrLength(s));
 }
 
 
-void StringBuilder::AddSubstring(const char* s, int n) {
+void SimpleStringBuilder::AddSubstring(const char* s, int n) {
   ASSERT(!is_finalized() && position_ + n < buffer_.length());
   ASSERT(static_cast<size_t>(n) <= strlen(s));
   memcpy(&buffer_[position_], s, n * kCharSize);
@@ -238,33 +53,32 @@
 }
 
 
-void StringBuilder::AddFormatted(const char* format, ...) {
-  va_list arguments;
-  va_start(arguments, format);
-  AddFormattedList(format, arguments);
-  va_end(arguments);
-}
-
-
-void StringBuilder::AddFormattedList(const char* format, va_list list) {
-  ASSERT(!is_finalized() && position_ < buffer_.length());
-  int n = OS::VSNPrintF(buffer_ + position_, format, list);
-  if (n < 0 || n >= (buffer_.length() - position_)) {
-    position_ = buffer_.length();
-  } else {
-    position_ += n;
-  }
-}
-
-
-void StringBuilder::AddPadding(char c, int count) {
+void SimpleStringBuilder::AddPadding(char c, int count) {
   for (int i = 0; i < count; i++) {
     AddCharacter(c);
   }
 }
 
 
-char* StringBuilder::Finalize() {
+void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
+  uint32_t number = static_cast<uint32_t>(value);
+  if (value < 0) {
+    AddCharacter('-');
+    number = static_cast<uint32_t>(-value);
+  }
+  int digits = 1;
+  for (uint32_t factor = 10; digits < 10; digits++, factor *= 10) {
+    if (factor > number) break;
+  }
+  position_ += digits;
+  for (int i = 1; i <= digits; i++) {
+    buffer_[position_ - i] = '0' + static_cast<char>(number % 10);
+    number /= 10;
+  }
+}
+
+
+char* SimpleStringBuilder::Finalize() {
   ASSERT(!is_finalized() && position_ < buffer_.length());
   buffer_[position_] = '\0';
   // Make sure nobody managed to add a 0-character to the
@@ -275,97 +89,4 @@
   return buffer_.start();
 }
 
-
-MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
-    : filename_(NULL),
-      data_(NULL),
-      length_(0),
-      remove_file_on_cleanup_(false) {
-  Init(filename);
-}
-
-
-MemoryMappedExternalResource::
-    MemoryMappedExternalResource(const char* filename,
-                                 bool remove_file_on_cleanup)
-    : filename_(NULL),
-      data_(NULL),
-      length_(0),
-      remove_file_on_cleanup_(remove_file_on_cleanup) {
-  Init(filename);
-}
-
-
-MemoryMappedExternalResource::~MemoryMappedExternalResource() {
-  // Release the resources if we had successfully acquired them:
-  if (file_ != NULL) {
-    delete file_;
-    if (remove_file_on_cleanup_) {
-      OS::Remove(filename_);
-    }
-    DeleteArray<char>(filename_);
-  }
-}
-
-
-void MemoryMappedExternalResource::Init(const char* filename) {
-  file_ = OS::MemoryMappedFile::open(filename);
-  if (file_ != NULL) {
-    filename_ = StrDup(filename);
-    data_ = reinterpret_cast<char*>(file_->memory());
-    length_ = file_->size();
-  }
-}
-
-
-bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
-  bool is_ascii = true;
-
-  int line_no = 1;
-  const char* start_of_line = data_;
-  const char* end = data_ + length_;
-  for (const char* p = data_; p < end; p++) {
-    char c = *p;
-    if ((c & 0x80) != 0) {
-      // Non-ascii detected:
-      is_ascii = false;
-
-      // Report the error and abort if appropriate:
-      if (abort_if_failed) {
-        int char_no = static_cast<int>(p - start_of_line) - 1;
-
-        ASSERT(filename_ != NULL);
-        PrintF("\n\n\n"
-               "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
-               c, filename_, line_no, char_no);
-
-        // Allow for some context up to kNumberOfLeadingContextChars chars
-        // before the offending non-ascii char to help the user see where
-        // the offending char is.
-        const int kNumberOfLeadingContextChars = 10;
-        const char* err_context = p - kNumberOfLeadingContextChars;
-        if (err_context < data_) {
-          err_context = data_;
-        }
-        // Compute the length of the error context and print it.
-        int err_context_length = static_cast<int>(p - err_context);
-        if (err_context_length != 0) {
-          PrintF(" after \"%.*s\"", err_context_length, err_context);
-        }
-        PrintF(".\n\n\n");
-        OS::Abort();
-      }
-
-      break;  // Non-ascii detected.  No need to continue scanning.
-    }
-    if (c == '\n') {
-      start_of_line = p;
-      line_no++;
-    }
-  }
-
-  return is_ascii;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index b89f284..cf7819e 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include <stdlib.h>
 #include <string.h>
+#include <climits>
 
 #include "globals.h"
 #include "checks.h"
@@ -52,11 +53,9 @@
 
 
 // X must be a power of 2.  Returns the number of trailing zeros.
-template <typename T>
-static inline int WhichPowerOf2(T x) {
+static inline int WhichPowerOf2(uint32_t x) {
   ASSERT(IsPowerOf2(x));
   ASSERT(x != 0);
-  if (x < 0) return 31;
   int bits = 0;
 #ifdef DEBUG
   int original_x = x;
@@ -204,16 +203,17 @@
 template<class T, int shift, int size>
 class BitField {
  public:
+  // A uint32_t mask of bit field.  To use all bits of a uint32 in a
+  // bitfield without compiler warnings we have to compute 2^32 without
+  // using a shift count of 32.
+  static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
+
+  // Value for the field with all bits set.
+  static const T kMax = static_cast<T>((1U << size) - 1);
+
   // Tells whether the provided value fits into the bit field.
   static bool is_valid(T value) {
-    return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
-  }
-
-  // Returns a uint32_t mask of bit field.
-  static uint32_t mask() {
-    // To use all bits of a uint32 in a bitfield without compiler warnings we
-    // have to compute 2^32 without using a shift count of 32.
-    return ((1U << shift) << size) - (1U << shift);
+    return (static_cast<uint32_t>(value) & ~static_cast<uint32_t>(kMax)) == 0;
   }
 
   // Returns a uint32_t with the bit field value encoded.
@@ -222,14 +222,14 @@
     return static_cast<uint32_t>(value) << shift;
   }
 
-  // Extracts the bit field from the value.
-  static T decode(uint32_t value) {
-    return static_cast<T>((value & mask()) >> shift);
+  // Returns a uint32_t with the bit field value updated.
+  static uint32_t update(uint32_t previous, T value) {
+    return (previous & ~kMask) | encode(value);
   }
 
-  // Value for the field with all bits set.
-  static T max() {
-    return decode(mask());
+  // Extracts the bit field from the value.
+  static T decode(uint32_t value) {
+    return static_cast<T>((value & kMask) >> shift);
   }
 };
 
@@ -237,10 +237,13 @@
 // ----------------------------------------------------------------------------
 // Hash function.
 
+static const uint32_t kZeroHashSeed = 0;
+
 // Thomas Wang, Integer Hash Functions.
 // http://www.concentric.net/~Ttwang/tech/inthash.htm
-static inline uint32_t ComputeIntegerHash(uint32_t key) {
+static inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
   uint32_t hash = key;
+  hash = hash ^ seed;
   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
   hash = hash ^ (hash >> 12);
   hash = hash + (hash << 2);
@@ -251,6 +254,13 @@
 }
 
 
+static inline uint32_t ComputePointerHash(void* ptr) {
+  return ComputeIntegerHash(
+      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
+      v8::internal::kZeroHashSeed);
+}
+
+
 // ----------------------------------------------------------------------------
 // Miscellaneous
 
@@ -487,9 +497,6 @@
  public:
   explicit Collector(int initial_capacity = kMinCapacity)
       : index_(0), size_(0) {
-    if (initial_capacity < kMinCapacity) {
-      initial_capacity = kMinCapacity;
-    }
     current_chunk_ = Vector<T>::New(initial_capacity);
   }
 
@@ -576,14 +583,7 @@
   }
 
   // Resets the collector to be empty.
-  virtual void Reset() {
-    for (int i = chunks_.length() - 1; i >= 0; i--) {
-      chunks_.at(i).Dispose();
-    }
-    chunks_.Rewind(0);
-    index_ = 0;
-    size_ = 0;
-  }
+  virtual void Reset();
 
   // Total number of elements added to collector so far.
   inline int size() { return size_; }
@@ -598,25 +598,23 @@
   // Creates a new current chunk, and stores the old chunk in the chunks_ list.
   void Grow(int min_capacity) {
     ASSERT(growth_factor > 1);
-    int growth = current_chunk_.length() * (growth_factor - 1);
-    if (growth > max_growth) {
-      growth = max_growth;
-    }
-    int new_capacity = current_chunk_.length() + growth;
-    if (new_capacity < min_capacity) {
-      new_capacity = min_capacity + growth;
-    }
-    Vector<T> new_chunk = Vector<T>::New(new_capacity);
-    int new_index = PrepareGrow(new_chunk);
-    if (index_ > 0) {
-      chunks_.Add(current_chunk_.SubVector(0, index_));
+    int new_capacity;
+    int current_length = current_chunk_.length();
+    if (current_length < kMinCapacity) {
+      // The collector started out as empty.
+      new_capacity = min_capacity * growth_factor;
+      if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
     } else {
-      // Can happen if the call to PrepareGrow moves everything into
-      // the new chunk.
-      current_chunk_.Dispose();
+      int growth = current_length * (growth_factor - 1);
+      if (growth > max_growth) {
+        growth = max_growth;
+      }
+      new_capacity = current_length + growth;
+      if (new_capacity < min_capacity) {
+        new_capacity = min_capacity + growth;
+      }
     }
-    current_chunk_ = new_chunk;
-    index_ = new_index;
+    NewChunk(new_capacity);
     ASSERT(index_ + min_capacity <= current_chunk_.length());
   }
 
@@ -624,8 +622,15 @@
   // some of the current data into the new chunk. The function may update
   // the current index_ value to represent data no longer in the current chunk.
   // Returns the initial index of the new chunk (after copied data).
-  virtual int PrepareGrow(Vector<T> new_chunk)  {
-    return 0;
+  virtual void NewChunk(int new_capacity)  {
+    Vector<T> new_chunk = Vector<T>::New(new_capacity);
+    if (index_ > 0) {
+      chunks_.Add(current_chunk_.SubVector(0, index_));
+    } else {
+      current_chunk_.Dispose();
+    }
+    current_chunk_ = new_chunk;
+    index_ = 0;
   }
 };
 
@@ -680,20 +685,26 @@
   int sequence_start_;
 
   // Move the currently active sequence to the new chunk.
-  virtual int PrepareGrow(Vector<T> new_chunk) {
-    if (sequence_start_ != kNoSequence) {
-      int sequence_length = this->index_ - sequence_start_;
-      // The new chunk is always larger than the current chunk, so there
-      // is room for the copy.
-      ASSERT(sequence_length < new_chunk.length());
-      for (int i = 0; i < sequence_length; i++) {
-        new_chunk[i] = this->current_chunk_[sequence_start_ + i];
-      }
-      this->index_ = sequence_start_;
-      sequence_start_ = 0;
-      return sequence_length;
+  virtual void NewChunk(int new_capacity) {
+    if (sequence_start_ == kNoSequence) {
+      // Fall back on default behavior if no sequence has been started.
+      this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
+      return;
     }
-    return 0;
+    int sequence_length = this->index_ - sequence_start_;
+    Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
+    ASSERT(sequence_length < new_chunk.length());
+    for (int i = 0; i < sequence_length; i++) {
+      new_chunk[i] = this->current_chunk_[sequence_start_ + i];
+    }
+    if (sequence_start_ > 0) {
+      this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
+    } else {
+      this->current_chunk_.Dispose();
+    }
+    this->current_chunk_ = new_chunk;
+    this->index_ = sequence_length;
+    sequence_start_ = 0;
   }
 };
 
@@ -791,6 +802,123 @@
   return BitCastHelper<Dest, Source>::cast(source);
 }
 
+
+template<typename ElementType, int NumElements>
+class EmbeddedContainer {
+ public:
+  EmbeddedContainer() : elems_() { }
+
+  int length() { return NumElements; }
+  ElementType& operator[](int i) {
+    ASSERT(i < length());
+    return elems_[i];
+  }
+
+ private:
+  ElementType elems_[NumElements];
+};
+
+
+template<typename ElementType>
+class EmbeddedContainer<ElementType, 0> {
+ public:
+  int length() { return 0; }
+  ElementType& operator[](int i) {
+    UNREACHABLE();
+    static ElementType t = 0;
+    return t;
+  }
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+// This simple base class does not allow formatted output.
+class SimpleStringBuilder {
+ public:
+  // Create a string builder with a buffer of the given size. The
+  // buffer is allocated through NewArray<char> and must be
+  // deallocated by the caller of Finalize().
+  explicit SimpleStringBuilder(int size);
+
+  SimpleStringBuilder(char* buffer, int size)
+      : buffer_(buffer, size), position_(0) { }
+
+  ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
+
+  int size() const { return buffer_.length(); }
+
+  // Get the current position in the builder.
+  int position() const {
+    ASSERT(!is_finalized());
+    return position_;
+  }
+
+  // Reset the position.
+  void Reset() { position_ = 0; }
+
+  // Add a single character to the builder. It is not allowed to add
+  // 0-characters; use the Finalize() method to terminate the string
+  // instead.
+  void AddCharacter(char c) {
+    ASSERT(c != '\0');
+    ASSERT(!is_finalized() && position_ < buffer_.length());
+    buffer_[position_++] = c;
+  }
+
+  // Add an entire string to the builder. Uses strlen() internally to
+  // compute the length of the input string.
+  void AddString(const char* s);
+
+  // Add the first 'n' characters of the given string 's' to the
+  // builder. The input string must have enough characters.
+  void AddSubstring(const char* s, int n);
+
+  // Add character padding to the builder. If count is non-positive,
+  // nothing is added to the builder.
+  void AddPadding(char c, int count);
+
+  // Add the decimal representation of the value.
+  void AddDecimalInteger(int value);
+
+  // Finalize the string by 0-terminating it and returning the buffer.
+  char* Finalize();
+
+ protected:
+  Vector<char> buffer_;
+  int position_;
+
+  bool is_finalized() const { return position_ < 0; }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
+};
+
+
+// A poor man's version of STL's bitset: A bit set of enums E (without explicit
+// values), fitting into an integral type T.
+template <class E, class T = int>
+class EnumSet {
+ public:
+  explicit EnumSet(T bits = 0) : bits_(bits) {}
+  bool IsEmpty() const { return bits_ == 0; }
+  bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
+  void Add(E element) { bits_ |= Mask(element); }
+  void Remove(E element) { bits_ &= ~Mask(element); }
+  T ToIntegral() const { return bits_; }
+
+ private:
+  T Mask(E element) const {
+    // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
+    // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
+    ASSERT(element < static_cast<int>(sizeof(T) * CHAR_BIT));
+    return 1 << element;
+  }
+
+  T bits_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_UTILS_H_
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 5e765b2..2de8303 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -126,14 +126,16 @@
      V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
-  SC(map_slow_to_fast_elements, V8.MapSlowToFastElements)             \
-  SC(map_fast_to_slow_elements, V8.MapFastToSlowElements)             \
+  SC(map_to_fast_elements, V8.MapToFastElements)                      \
+  SC(map_to_fast_double_elements, V8.MapToFastDoubleElements)         \
+  SC(map_to_slow_elements, V8.MapToSlowElements)                      \
   SC(map_to_external_array_elements, V8.MapToExternalArrayElements)   \
   /* How is the generic keyed-load stub used? */                      \
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
   SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
   SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                \
+  SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs)      \
   SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)   \
   /* How is the generic keyed-call stub used? */                      \
   SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast)         \
@@ -168,17 +170,15 @@
   SC(named_load_inline_field, V8.NamedLoadInlineFast)                 \
   SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric)            \
   SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast)                  \
-  SC(named_load_full, V8.NamedLoadFull)                               \
-  SC(keyed_load_full, V8.KeyedLoadFull)                               \
   SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric)          \
   SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast)                \
   SC(named_store_inline_generic, V8.NamedStoreInlineGeneric)          \
   SC(named_store_inline_fast, V8.NamedStoreInlineFast)                \
-  SC(keyed_store_full, V8.KeyedStoreFull)                             \
-  SC(named_store_full, V8.NamedStoreFull)                             \
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
+  SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)    \
+  SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
   SC(store_normal_miss, V8.StoreNormalMiss)                           \
   SC(store_normal_hit, V8.StoreNormalHit)                             \
   SC(cow_arrays_created_stub, V8.COWArraysCreatedStub)                \
diff --git a/src/v8.cc b/src/v8.cc
index 0b562fc..1e9b5dc 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "isolate.h"
+#include "elements.h"
 #include "bootstrapper.h"
 #include "debug.h"
 #include "deoptimizer.h"
@@ -50,6 +51,9 @@
 bool V8::has_fatal_error_ = false;
 bool V8::use_crankshaft_ = true;
 
+static Mutex* entropy_mutex = OS::CreateMutex();
+static EntropySource entropy_source;
+
 
 bool V8::Initialize(Deserializer* des) {
   InitializeOncePerProcess();
@@ -100,42 +104,45 @@
 }
 
 
-static uint32_t random_seed() {
-  if (FLAG_random_seed == 0) {
-    return random();
+static void seed_random(uint32_t* state) {
+  for (int i = 0; i < 2; ++i) {
+    if (FLAG_random_seed != 0) {
+      state[i] = FLAG_random_seed;
+    } else if (entropy_source != NULL) {
+      uint32_t val;
+      ScopedLock lock(entropy_mutex);
+      entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
+      state[i] = val;
+    } else {
+      state[i] = random();
+    }
   }
-  return FLAG_random_seed;
 }
 
 
-typedef struct {
-  uint32_t hi;
-  uint32_t lo;
-} random_state;
-
-
 // Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(random_state *state) {
-  // Initialize seed using the system random(). If one of the seeds
-  // should ever become zero again, or if random() returns zero, we
-  // avoid getting stuck with zero bits in hi or lo by re-initializing
-  // them on demand.
-  if (state->hi == 0) state->hi = random_seed();
-  if (state->lo == 0) state->lo = random_seed();
+static uint32_t random_base(uint32_t* state) {
+  // Initialize seed using the system random().
+  // No non-zero seed will ever become zero again.
+  if (state[0] == 0) seed_random(state);
 
-  // Mix the bits.
-  state->hi = 36969 * (state->hi & 0xFFFF) + (state->hi >> 16);
-  state->lo = 18273 * (state->lo & 0xFFFF) + (state->lo >> 16);
-  return (state->hi << 16) + (state->lo & 0xFFFF);
+  // Mix the bits.  Never replaces state[i] with 0 if it is nonzero.
+  state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+  state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
+
+  return (state[0] << 14) + (state[1] & 0x3FFFF);
+}
+
+
+void V8::SetEntropySource(EntropySource source) {
+  entropy_source = source;
 }
 
 
 // Used by JavaScript APIs
 uint32_t V8::Random(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
-  // TODO(isolates): move lo and hi to isolate
-  static random_state state = {0, 0};
-  return random_base(&state);
+  return random_base(isolate->random_seed());
 }
 
 
@@ -144,9 +151,7 @@
 // leaks that could be used in an exploit.
 uint32_t V8::RandomPrivate(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
-  // TODO(isolates): move lo and hi to isolate
-  static random_state state = {0, 0};
-  return random_base(&state);
+  return random_base(isolate->private_random_seed());
 }
 
 
@@ -208,6 +213,8 @@
 
   // Peephole optimization might interfere with deoptimization.
   FLAG_peephole_optimization = !use_crankshaft_;
+
+  ElementsAccessor::InitializeOncePerProcess();
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 776fa9c..e565ca5 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,13 +35,10 @@
 #if defined(GOOGLE3)
 // Google3 special flag handling.
 #if defined(DEBUG) && defined(NDEBUG)
-// If both are defined in Google3, then we are building an optimized v8 with
-// assertions enabled.
+// V8 only uses DEBUG and whenever it is set we are building a debug
+// version of V8. We do not use NDEBUG and simply undef it here for
+// consistency.
 #undef NDEBUG
-#elif !defined(DEBUG) && !defined(NDEBUG)
-// If neither is defined in Google3, then we are building a debug v8. Mark it
-// as such.
-#define DEBUG
 #endif
 #endif  // defined(GOOGLE3)
 
@@ -66,6 +63,7 @@
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
+#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -93,6 +91,9 @@
   static void FatalProcessOutOfMemory(const char* location,
                                       bool take_snapshot = false);
 
+  // Allows an entropy source to be provided for use in random number
+  // generation.
+  static void SetEntropySource(EntropySource source);
   // Random number generation support. Not cryptographically safe.
   static uint32_t Random(Isolate* isolate);
   // We use random numbers internally in memory allocation and in the
diff --git a/src/v8conversions.cc b/src/v8conversions.cc
new file mode 100644
index 0000000..bf175e5
--- /dev/null
+++ b/src/v8conversions.cc
@@ -0,0 +1,128 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+
+#include "v8.h"
+
+#include "conversions-inl.h"
+#include "v8conversions.h"
+#include "dtoa.h"
+#include "factory.h"
+#include "strtod.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// C++-style iterator adaptor for StringInputBuffer
+// (unlike C++ iterators the end-marker has different type).
+class StringInputBufferIterator {
+ public:
+  class EndMarker {};
+
+  explicit StringInputBufferIterator(StringInputBuffer* buffer);
+
+  int operator*() const;
+  void operator++();
+  bool operator==(EndMarker const&) const { return end_; }
+  bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+  StringInputBuffer* const buffer_;
+  int current_;
+  bool end_;
+};
+
+
+StringInputBufferIterator::StringInputBufferIterator(
+    StringInputBuffer* buffer) : buffer_(buffer) {
+  ++(*this);
+}
+
+int StringInputBufferIterator::operator*() const {
+  return current_;
+}
+
+
+void StringInputBufferIterator::operator++() {
+  end_ = !buffer_->has_more();
+  if (!end_) {
+    current_ = buffer_->GetNext();
+  }
+}
+}  // End anonymous namespace.
+
+
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* str, int flags, double empty_string_val) {
+  StringShape shape(str);
+  if (shape.IsSequentialAscii()) {
+    const char* begin = SeqAsciiString::cast(str)->GetChars();
+    const char* end = begin + str->length();
+    return InternalStringToDouble(unicode_cache, begin, end, flags,
+                                  empty_string_val);
+  } else if (shape.IsSequentialTwoByte()) {
+    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+    const uc16* end = begin + str->length();
+    return InternalStringToDouble(unicode_cache, begin, end, flags,
+                                  empty_string_val);
+  } else {
+    StringInputBuffer buffer(str);
+    return InternalStringToDouble(unicode_cache,
+                                  StringInputBufferIterator(&buffer),
+                                  StringInputBufferIterator::EndMarker(),
+                                  flags,
+                                  empty_string_val);
+  }
+}
+
+
+double StringToInt(UnicodeCache* unicode_cache,
+                   String* str,
+                   int radix) {
+  StringShape shape(str);
+  if (shape.IsSequentialAscii()) {
+    const char* begin = SeqAsciiString::cast(str)->GetChars();
+    const char* end = begin + str->length();
+    return InternalStringToInt(unicode_cache, begin, end, radix);
+  } else if (shape.IsSequentialTwoByte()) {
+    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+    const uc16* end = begin + str->length();
+    return InternalStringToInt(unicode_cache, begin, end, radix);
+  } else {
+    StringInputBuffer buffer(str);
+    return InternalStringToInt(unicode_cache,
+                               StringInputBufferIterator(&buffer),
+                               StringInputBufferIterator::EndMarker(),
+                               radix);
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/frame-element.cc b/src/v8conversions.h
similarity index 64%
copy from src/frame-element.cc
copy to src/v8conversions.h
index f629900..1840e3a 100644
--- a/src/frame-element.cc
+++ b/src/v8conversions.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,36 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_V8CONVERSIONS_H_
+#define V8_V8CONVERSIONS_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "conversions.h"
 
 namespace v8 {
 namespace internal {
 
+// Convert from Number object to C integer.
+static inline int32_t NumberToInt32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToInt32(number->Number());
+}
+
+
+static inline uint32_t NumberToUint32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToUint32(number->Number());
+}
+
+
+// Converts a string into a double value according to ECMA-262 9.3.1
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* str,
+                      int flags,
+                      double empty_string_val = 0);
+
+// Converts a string into an integer.
+double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
 
 } }  // namespace v8::internal
+
+#endif  // V8_V8CONVERSIONS_H_
diff --git a/src/v8dll-main.cc b/src/v8dll-main.cc
index 3d4b3a3..49d8689 100644
--- a/src/v8dll-main.cc
+++ b/src/v8dll-main.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <windows.h>
-
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
 #include "../include/v8.h"
 
+#ifdef WIN32
+#include <windows.h>  // NOLINT
+
 extern "C" {
 BOOL WINAPI DllMain(HANDLE hinstDLL,
                     DWORD dwReason,
@@ -37,3 +41,4 @@
   return TRUE;
 }
 }
+#endif
diff --git a/src/v8globals.h b/src/v8globals.h
index 2a01dfd..bf843e5 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -131,7 +131,8 @@
 class FunctionEntry;
 class FunctionLiteral;
 class FunctionTemplateInfo;
-class NumberDictionary;
+class SeededNumberDictionary;
+class UnseededNumberDictionary;
 class StringDictionary;
 template <typename T> class Handle;
 class Heap;
@@ -154,7 +155,7 @@
 class MaybeObject;
 class OldSpace;
 class Property;
-class Proxy;
+class Foreign;
 class RegExpNode;
 struct RegExpCompileData;
 class RegExpTree;
@@ -185,6 +186,8 @@
 
 typedef bool (*WeakSlotCallback)(Object** pointer);
 
+typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
+
 // -----------------------------------------------------------------------------
 // Miscellaneous
 
@@ -218,7 +221,12 @@
 
 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
 
-enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
+enum VisitMode {
+  VISIT_ALL,
+  VISIT_ALL_IN_SCAVENGE,
+  VISIT_ALL_IN_SWEEP_NEWSPACE,
+  VISIT_ONLY_STRONG
+};
 
 // Flag indicating whether code is built into the VM (one of the natives files).
 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
@@ -295,15 +303,11 @@
 };
 
 
-enum InLoopFlag {
-  NOT_IN_LOOP,
-  IN_LOOP
-};
-
-
 enum CallFunctionFlags {
   NO_CALL_FUNCTION_FLAGS = 0,
-  RECEIVER_MIGHT_BE_VALUE = 1 << 0  // Receiver might not be a JSObject.
+  // Receiver might implicitly be the global objects. If it is, the
+  // hole is passed to the call function stub.
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
 };
 
 
@@ -322,11 +326,12 @@
   FIELD                     = 1,  // only in fast mode
   CONSTANT_FUNCTION         = 2,  // only in fast mode
   CALLBACKS                 = 3,
-  INTERCEPTOR               = 4,  // only in lookup results, not in descriptors.
-  MAP_TRANSITION            = 5,  // only in fast mode
-  EXTERNAL_ARRAY_TRANSITION = 6,
-  CONSTANT_TRANSITION       = 7,  // only in fast mode
-  NULL_DESCRIPTOR           = 8,  // only in fast mode
+  HANDLER                   = 4,  // only in lookup results, not in descriptors
+  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
+  MAP_TRANSITION            = 6,  // only in fast mode
+  ELEMENTS_TRANSITION       = 7,
+  CONSTANT_TRANSITION       = 8,  // only in fast mode
+  NULL_DESCRIPTOR           = 9,  // only in fast mode
   // All properties before MAP_TRANSITION are real.
   FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
   // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
@@ -385,12 +390,11 @@
 };
 
 
-// Logging and profiling.
-// A StateTag represents a possible state of the VM.  When compiled with
-// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
-// Creating a VMState object enters a state by pushing on the stack, and
-// destroying a VMState object leaves a state by popping the current state
-// from the stack.
+// Logging and profiling.  A StateTag represents a possible state of
+// the VM. The logger maintains a stack of these. Creating a VMState
+// object enters a state by pushing on the stack, and destroying a
+// VMState object leaves a state by popping the current state from the
+// stack.
 
 #define STATE_TAG_LIST(V) \
   V(JS)                   \
@@ -481,6 +485,32 @@
   kInvalidStrictFlag
 };
 
+
+// Used to specify if a macro instruction must perform a smi check on tagged
+// values.
+enum SmiCheckType {
+  DONT_DO_SMI_CHECK = 0,
+  DO_SMI_CHECK
+};
+
+
+// Used to specify whether a receiver is implicitly or explicitly
+// provided to a call.
+enum CallKind {
+  CALL_AS_METHOD = 0,
+  CALL_AS_FUNCTION
+};
+
+
+static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+
+const uint64_t kHoleNanInt64 =
+    (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+const uint64_t kLastNonNaNInt64 =
+    (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 429cea5..588bdb2 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,6 @@
 const $isNaN = GlobalIsNaN;
 const $isFinite = GlobalIsFinite;
 
-
 // ----------------------------------------------------------------------------
 
 
@@ -56,6 +55,7 @@
     %FunctionSetName(f, key);
     %FunctionRemovePrototype(f);
     %SetProperty(object, key, f, attributes);
+    %SetNativeFlag(f);
   }
   %ToFastProperties(object);
 }
@@ -65,28 +65,56 @@
 // functions on String.prototype etc. and then restore the old function
 // with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
 function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
+  %CheckIsBootstrapping();
   var hidden_prototype = new $Object();
   %SetHiddenPrototype(object, hidden_prototype);
   InstallFunctions(hidden_prototype, attributes, functions);
 }
 
 
+// Prevents changes to the prototype of a built-infunction.
+// The "prototype" property of the function object is made non-configurable,
+// and the prototype object is made non-extensible. The latter prevents
+// changing the __proto__ property.
+function SetUpLockedPrototype(constructor, fields, methods) {
+  %CheckIsBootstrapping();
+  var prototype = constructor.prototype;
+  // Install functions first, because this function is used to initialize
+  // PropertyDescriptor itself.
+  var property_count = (methods.length >> 1) + (fields ? fields.length : 0);
+  if (property_count >= 4) {
+    %OptimizeObjectForAddingMultipleProperties(prototype, property_count);
+  }
+  if (fields) {
+    for (var i = 0; i < fields.length; i++) {
+      %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE);
+    }
+  }
+  for (var i = 0; i < methods.length; i += 2) {
+    var key = methods[i];
+    var f = methods[i + 1];
+    %SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
+    %SetNativeFlag(f);
+  }
+  prototype.__proto__ = null;
+  %ToFastProperties(prototype);
+}
+
+
 // ----------------------------------------------------------------------------
 
 
 // ECMA 262 - 15.1.4
 function GlobalIsNaN(number) {
-  var n = ToNumber(number);
-  return NUMBER_IS_NAN(n);
+  if (!IS_NUMBER(number)) number = NonNumberToNumber(number);
+  return NUMBER_IS_NAN(number);
 }
 
 
 // ECMA 262 - 15.1.5
 function GlobalIsFinite(number) {
   if (!IS_NUMBER(number)) number = NonNumberToNumber(number);
-
-  // NaN - NaN == NaN, Infinity - Infinity == NaN, -Infinity - -Infinity == NaN.
-  return %_IsSmi(number) || number - number == 0;
+  return NUMBER_IS_FINITE(number);
 }
 
 
@@ -105,13 +133,16 @@
       // Truncate number.
       return string | 0;
     }
-    if (IS_UNDEFINED(radix)) radix = 0;
+    string = TO_STRING_INLINE(string);
+    radix = radix | 0;
   } else {
+    // The spec says ToString should be evaluated before ToInt32.
+    string = TO_STRING_INLINE(string);
     radix = TO_INT32(radix);
     if (!(radix == 0 || (2 <= radix && radix <= 36)))
       return $NaN;
   }
-  string = TO_STRING_INLINE(string);
+
   if (%_HasCachedArrayIndex(string) &&
       (radix == 0 || radix == 10)) {
     return %_GetCachedArrayIndex(string);
@@ -131,10 +162,19 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
+  var receiver = this;
   var global_receiver = %GlobalReceiver(global);
-  var this_is_global_receiver = (this === global_receiver);
+
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = global_receiver;
+  }
+
+  var this_is_global_receiver = (receiver === global_receiver);
   var global_is_detached = (global === global_receiver);
 
+  // For consistency with JSC we require the global object passed to
+  // eval to be the global object from which 'eval' originated. This
+  // is not mandated by the spec.
   if (!this_is_global_receiver || global_is_detached) {
     throw new $EvalError('The "this" object passed to eval must ' +
                          'be the global object from which eval originated');
@@ -143,14 +183,15 @@
   var f = %CompileString(x);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(this, f);
+  return %_CallFunction(receiver, f);
 }
 
 
 // ----------------------------------------------------------------------------
 
-
-function SetupGlobal() {
+// Set up global object.
+function SetUpGlobal() {
+  %CheckIsBootstrapping();
   // ECMA 262 - 15.1.1.1.
   %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
 
@@ -160,7 +201,7 @@
   // ECMA-262 - 15.1.1.3.
   %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
 
-  // Setup non-enumerable function on the global object.
+  // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
     "isNaN", GlobalIsNaN,
     "isFinite", GlobalIsFinite,
@@ -170,8 +211,7 @@
   ));
 }
 
-SetupGlobal();
-
+SetUpGlobal();
 
 // ----------------------------------------------------------------------------
 // Boolean (first part of definition)
@@ -196,12 +236,20 @@
 
 // ECMA-262 - 15.2.4.2
 function ObjectToString() {
+  if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    return '[object Undefined]';
+  }
+  if (IS_NULL(this)) return '[object Null]';
   return "[object " + %_ClassOf(ToObject(this)) + "]";
 }
 
 
 // ECMA-262 - 15.2.4.3
 function ObjectToLocaleString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Object.prototype.toLocaleString"]);
+  }
   return this.toString();
 }
 
@@ -214,12 +262,20 @@
 
 // ECMA-262 - 15.2.4.5
 function ObjectHasOwnProperty(V) {
-  return %HasLocalProperty(ToObject(this), ToString(V));
+  if (%IsJSProxy(this)) {
+    var handler = %GetHandler(this);
+    return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, TO_STRING_INLINE(V));
+  }
+  return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
 }
 
 
 // ECMA-262 - 15.2.4.6
 function ObjectIsPrototypeOf(V) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Object.prototype.isPrototypeOf"]);
+  }
   if (!IS_SPEC_OBJECT(V)) return false;
   return %IsInPrototypeChain(this, V);
 }
@@ -227,39 +283,47 @@
 
 // ECMA-262 - 15.2.4.6
 function ObjectPropertyIsEnumerable(V) {
-  return %IsPropertyEnumerable(ToObject(this), ToString(V));
+  var P = ToString(V);
+  if (%IsJSProxy(this)) {
+    var desc = GetOwnProperty(this, P);
+    return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
+  }
+  return %IsPropertyEnumerable(ToObject(this), P);
 }
 
 
 // Extensions for providing property getters and setters.
 function ObjectDefineGetter(name, fun) {
-  if (this == null && !IS_UNDETECTABLE(this)) {
-    throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
+  var receiver = this;
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = %GlobalReceiver(global);
   }
-  if (!IS_FUNCTION(fun)) {
+  if (!IS_SPEC_FUNCTION(fun)) {
     throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
   }
   var desc = new PropertyDescriptor();
   desc.setGet(fun);
   desc.setEnumerable(true);
   desc.setConfigurable(true);
-  DefineOwnProperty(ToObject(this), ToString(name), desc, false);
+  DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
 }
 
 
 function ObjectLookupGetter(name) {
-  if (this == null && !IS_UNDETECTABLE(this)) {
-    throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
+  var receiver = this;
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = %GlobalReceiver(global);
   }
-  return %LookupAccessor(ToObject(this), ToString(name), GETTER);
+  return %LookupAccessor(ToObject(receiver), ToString(name), GETTER);
 }
 
 
 function ObjectDefineSetter(name, fun) {
-  if (this == null && !IS_UNDETECTABLE(this)) {
-    throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
+  var receiver = this;
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = %GlobalReceiver(global);
   }
-  if (!IS_FUNCTION(fun)) {
+  if (!IS_SPEC_FUNCTION(fun)) {
     throw new $TypeError(
         'Object.prototype.__defineSetter__: Expecting function');
   }
@@ -267,21 +331,27 @@
   desc.setSet(fun);
   desc.setEnumerable(true);
   desc.setConfigurable(true);
-  DefineOwnProperty(ToObject(this), ToString(name), desc, false);
+  DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
 }
 
 
 function ObjectLookupSetter(name) {
-  if (this == null && !IS_UNDETECTABLE(this)) {
-    throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
+  var receiver = this;
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = %GlobalReceiver(global);
   }
-  return %LookupAccessor(ToObject(this), ToString(name), SETTER);
+  return %LookupAccessor(ToObject(receiver), ToString(name), SETTER);
 }
 
 
 function ObjectKeys(obj) {
   if (!IS_SPEC_OBJECT(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
+  if (%IsJSProxy(obj)) {
+    var handler = %GetHandler(obj);
+    var names = CallTrap0(handler, "keys", DerivedKeysTrap);
+    return ToStringArray(names);
+  }
   return %LocalKeys(obj);
 }
 
@@ -289,14 +359,14 @@
 // ES5 8.10.1.
 function IsAccessorDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return false;
-  return desc.hasGetter_ || desc.hasSetter_;
+  return desc.hasGetter() || desc.hasSetter();
 }
 
 
 // ES5 8.10.2.
 function IsDataDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return false;
-  return desc.hasValue_ || desc.hasWritable_;
+  return desc.hasValue() || desc.hasWritable();
 }
 
 
@@ -310,23 +380,54 @@
   return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
 }
 
+
 // ES5 8.10.4
 function FromPropertyDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return desc;
-  var obj = new $Object();
+
   if (IsDataDescriptor(desc)) {
-    obj.value = desc.getValue();
-    obj.writable = desc.isWritable();
+    return { value: desc.getValue(),
+             writable: desc.isWritable(),
+             enumerable: desc.isEnumerable(),
+             configurable: desc.isConfigurable() };
   }
-  if (IsAccessorDescriptor(desc)) {
-    obj.get = desc.getGet();
-    obj.set = desc.getSet();
+  // Must be an AccessorDescriptor then. We never return a generic descriptor.
+  return { get: desc.getGet(),
+           set: desc.getSet(),
+           enumerable: desc.isEnumerable(),
+           configurable: desc.isConfigurable() };
+}
+
+
+// Harmony Proxies
+function FromGenericPropertyDescriptor(desc) {
+  if (IS_UNDEFINED(desc)) return desc;
+  var obj = new $Object();
+
+  if (desc.hasValue()) {
+    %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE);
   }
-  obj.enumerable = desc.isEnumerable();
-  obj.configurable = desc.isConfigurable();
+  if (desc.hasWritable()) {
+    %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE);
+  }
+  if (desc.hasGetter()) {
+    %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE);
+  }
+  if (desc.hasSetter()) {
+    %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE);
+  }
+  if (desc.hasEnumerable()) {
+    %IgnoreAttributesAndSetProperty(obj, "enumerable",
+                                    desc.isEnumerable(), NONE);
+  }
+  if (desc.hasConfigurable()) {
+    %IgnoreAttributesAndSetProperty(obj, "configurable",
+                                    desc.isConfigurable(), NONE);
+  }
   return obj;
 }
 
+
 // ES5 8.10.5.
 function ToPropertyDescriptor(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
@@ -352,7 +453,7 @@
 
   if ("get" in obj) {
     var get = obj.get;
-    if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
+    if (!IS_UNDEFINED(get) && !IS_SPEC_FUNCTION(get)) {
       throw MakeTypeError("getter_must_be_callable", [get]);
     }
     desc.setGet(get);
@@ -360,7 +461,7 @@
 
   if ("set" in obj) {
     var set = obj.set;
-    if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
+    if (!IS_UNDEFINED(set) && !IS_SPEC_FUNCTION(set)) {
       throw MakeTypeError("setter_must_be_callable", [set]);
     }
     desc.setSet(set);
@@ -373,6 +474,23 @@
 }
 
 
+// For Harmony proxies.
+function ToCompletePropertyDescriptor(obj) {
+  var desc = ToPropertyDescriptor(obj)
+  if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
+    if (!desc.hasValue()) desc.setValue(void 0);
+    if (!desc.hasWritable()) desc.setWritable(false);
+  } else {
+    // Is accessor descriptor.
+    if (!desc.hasGetter()) desc.setGet(void 0);
+    if (!desc.hasSetter()) desc.setSet(void 0);
+  }
+  if (!desc.hasEnumerable()) desc.setEnumerable(false);
+  if (!desc.hasConfigurable()) desc.setConfigurable(false);
+  return desc;
+}
+
+
 function PropertyDescriptor() {
   // Initialize here so they are all in-object and have the same map.
   // Default values from ES5 8.6.1.
@@ -390,105 +508,83 @@
   this.hasSetter_ = false;
 }
 
-PropertyDescriptor.prototype.__proto__ = null;
-PropertyDescriptor.prototype.toString = function() {
-  return "[object PropertyDescriptor]";
-};
-
-PropertyDescriptor.prototype.setValue = function(value) {
-  this.value_ = value;
-  this.hasValue_ = true;
-}
-
-
-PropertyDescriptor.prototype.getValue = function() {
-  return this.value_;
-}
-
-
-PropertyDescriptor.prototype.hasValue = function() {
-  return this.hasValue_;
-}
-
-
-PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
-  this.enumerable_ = enumerable;
-  this.hasEnumerable_ = true;
-}
-
-
-PropertyDescriptor.prototype.isEnumerable = function () {
-  return this.enumerable_;
-}
-
-
-PropertyDescriptor.prototype.hasEnumerable = function() {
-  return this.hasEnumerable_;
-}
-
-
-PropertyDescriptor.prototype.setWritable = function(writable) {
-  this.writable_ = writable;
-  this.hasWritable_ = true;
-}
-
-
-PropertyDescriptor.prototype.isWritable = function() {
-  return this.writable_;
-}
-
-
-PropertyDescriptor.prototype.hasWritable = function() {
-  return this.hasWritable_;
-}
-
-
-PropertyDescriptor.prototype.setConfigurable = function(configurable) {
-  this.configurable_ = configurable;
-  this.hasConfigurable_ = true;
-}
-
-
-PropertyDescriptor.prototype.hasConfigurable = function() {
-  return this.hasConfigurable_;
-}
-
-
-PropertyDescriptor.prototype.isConfigurable = function() {
-  return this.configurable_;
-}
-
-
-PropertyDescriptor.prototype.setGet = function(get) {
-  this.get_ = get;
-  this.hasGetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getGet = function() {
-  return this.get_;
-}
-
-
-PropertyDescriptor.prototype.hasGetter = function() {
-  return this.hasGetter_;
-}
-
-
-PropertyDescriptor.prototype.setSet = function(set) {
-  this.set_ = set;
-  this.hasSetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getSet = function() {
-  return this.set_;
-}
-
-
-PropertyDescriptor.prototype.hasSetter = function() {
-  return this.hasSetter_;
-}
+SetUpLockedPrototype(PropertyDescriptor, $Array(
+    "value_",
+    "hasValue_",
+    "writable_",
+    "hasWritable_",
+    "enumerable_",
+    "hasEnumerable_",
+    "configurable_",
+    "hasConfigurable_",
+    "get_",
+    "hasGetter_",
+    "set_",
+    "hasSetter_"
+  ), $Array(
+    "toString", function() {
+      return "[object PropertyDescriptor]";
+    },
+    "setValue", function(value) {
+      this.value_ = value;
+      this.hasValue_ = true;
+    },
+    "getValue", function() {
+      return this.value_;
+    },
+    "hasValue", function() {
+      return this.hasValue_;
+    },
+    "setEnumerable", function(enumerable) {
+      this.enumerable_ = enumerable;
+        this.hasEnumerable_ = true;
+    },
+    "isEnumerable", function () {
+      return this.enumerable_;
+    },
+    "hasEnumerable", function() {
+      return this.hasEnumerable_;
+    },
+    "setWritable", function(writable) {
+      this.writable_ = writable;
+      this.hasWritable_ = true;
+    },
+    "isWritable", function() {
+      return this.writable_;
+    },
+    "hasWritable", function() {
+      return this.hasWritable_;
+    },
+    "setConfigurable", function(configurable) {
+      this.configurable_ = configurable;
+      this.hasConfigurable_ = true;
+    },
+    "hasConfigurable", function() {
+      return this.hasConfigurable_;
+    },
+    "isConfigurable", function() {
+      return this.configurable_;
+    },
+    "setGet", function(get) {
+      this.get_ = get;
+        this.hasGetter_ = true;
+    },
+    "getGet", function() {
+      return this.get_;
+    },
+    "hasGetter", function() {
+      return this.hasGetter_;
+    },
+    "setSet", function(set) {
+      this.set_ = set;
+      this.hasSetter_ = true;
+    },
+    "getSet", function() {
+      return this.set_;
+    },
+    "hasSetter", function() {
+      return this.hasSetter_;
+  }));
 
 
 // Converts an array returned from Runtime_GetOwnProperty to an actual
@@ -519,29 +615,55 @@
 }
 
 
-// ES5 section 8.12.2.
-function GetProperty(obj, p) {
-  var prop = GetOwnProperty(obj);
-  if (!IS_UNDEFINED(prop)) return prop;
-  var proto = obj.__proto__;
-  if (IS_NULL(proto)) return void 0;
-  return GetProperty(proto, p);
+// For Harmony proxies.
+function GetTrap(handler, name, defaultTrap) {
+  var trap = handler[name];
+  if (IS_UNDEFINED(trap)) {
+    if (IS_UNDEFINED(defaultTrap)) {
+      throw MakeTypeError("handler_trap_missing", [handler, name]);
+    }
+    trap = defaultTrap;
+  } else if (!IS_SPEC_FUNCTION(trap)) {
+    throw MakeTypeError("handler_trap_must_be_callable", [handler, name]);
+  }
+  return trap;
 }
 
 
-// ES5 section 8.12.6
-function HasProperty(obj, p) {
-  var desc = GetProperty(obj, p);
-  return IS_UNDEFINED(desc) ? false : true;
+function CallTrap0(handler, name, defaultTrap) {
+  return %_CallFunction(handler, GetTrap(handler, name, defaultTrap));
+}
+
+
+function CallTrap1(handler, name, defaultTrap, x) {
+  return %_CallFunction(handler, x, GetTrap(handler, name, defaultTrap));
+}
+
+
+function CallTrap2(handler, name, defaultTrap, x, y) {
+  return %_CallFunction(handler, x, y, GetTrap(handler, name, defaultTrap));
 }
 
 
 // ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
+function GetOwnProperty(obj, v) {
+  var p = ToString(v);
+  if (%IsJSProxy(obj)) {
+    var handler = %GetHandler(obj);
+    var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
+    if (IS_UNDEFINED(descriptor)) return descriptor;
+    var desc = ToCompletePropertyDescriptor(descriptor);
+    if (!desc.isConfigurable()) {
+      throw MakeTypeError("proxy_prop_not_configurable",
+                          [handler, "getOwnPropertyDescriptor", p, descriptor]);
+    }
+    return desc;
+  }
+
   // GetOwnProperty returns an array indexed by the constants
   // defined in macros.py.
   // If p is not a property on obj undefined is returned.
-  var props = %GetOwnProperty(ToObject(obj), ToString(p));
+  var props = %GetOwnProperty(ToObject(obj), ToString(v));
 
   // A false value here means that access checks failed.
   if (props === false) return void 0;
@@ -550,8 +672,29 @@
 }
 
 
+// Harmony proxies.
+function DefineProxyProperty(obj, p, attributes, should_throw) {
+  var handler = %GetHandler(obj);
+  var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
+  if (!ToBoolean(result)) {
+    if (should_throw) {
+      throw MakeTypeError("handler_returned_false",
+                          [handler, "defineProperty"]);
+    } else {
+      return false;
+    }
+  }
+  return true;
+}
+
+
 // ES5 8.12.9.
 function DefineOwnProperty(obj, p, desc, should_throw) {
+  if (%IsJSProxy(obj)) {
+    var attributes = FromGenericPropertyDescriptor(desc);
+    return DefineProxyProperty(obj, p, attributes, should_throw);
+  }
+
   var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
   // A false value here means that access checks failed.
   if (current_or_access === false) return void 0;
@@ -563,7 +706,7 @@
   // Step 3
   if (IS_UNDEFINED(current) && !extensible) {
     if (should_throw) {
-      throw MakeTypeError("define_disallowed", ["defineProperty"]);
+      throw MakeTypeError("define_disallowed", [p]);
     } else {
       return;
     }
@@ -593,7 +736,7 @@
           (desc.hasEnumerable() &&
            desc.isEnumerable() != current.isEnumerable())) {
         if (should_throw) {
-          throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          throw MakeTypeError("redefine_disallowed", [p]);
         } else {
           return;
         }
@@ -603,7 +746,7 @@
         // Step 9a
         if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
           if (should_throw) {
-            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            throw MakeTypeError("redefine_disallowed", [p]);
           } else {
             return;
           }
@@ -612,7 +755,7 @@
         if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
           if (!current.isWritable() && desc.isWritable()) {
             if (should_throw) {
-              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+              throw MakeTypeError("redefine_disallowed", [p]);
             } else {
               return;
             }
@@ -620,7 +763,7 @@
           if (!current.isWritable() && desc.hasValue() &&
               !SameValue(desc.getValue(), current.getValue())) {
             if (should_throw) {
-              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+              throw MakeTypeError("redefine_disallowed", [p]);
             } else {
               return;
             }
@@ -630,14 +773,14 @@
         if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
           if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
             if (should_throw) {
-              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+              throw MakeTypeError("redefine_disallowed", [p]);
             } else {
               return;
             }
           }
           if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
             if (should_throw) {
-              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+              throw MakeTypeError("redefine_disallowed", [p]);
             } else {
               return;
             }
@@ -719,24 +862,52 @@
 function ObjectGetPrototypeOf(obj) {
   if (!IS_SPEC_OBJECT(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
-  return obj.__proto__;
+  return %GetPrototype(obj);
 }
 
 
 // ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
   if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
+    throw MakeTypeError("obj_ctor_property_non_object",
+                        ["getOwnPropertyDescriptor"]);
   var desc = GetOwnProperty(obj, p);
   return FromPropertyDescriptor(desc);
 }
 
 
+// For Harmony proxies
+function ToStringArray(obj, trap) {
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]);
+  }
+  var n = ToUint32(obj.length);
+  var array = new $Array(n);
+  var names = {}
+  for (var index = 0; index < n; index++) {
+    var s = ToString(obj[index]);
+    if (s in names) {
+      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+    }
+    array[index] = s;
+    names.s = 0;
+  }
+  return array;
+}
+
+
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
   if (!IS_SPEC_OBJECT(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
 
+  // Special handling for proxies.
+  if (%IsJSProxy(obj)) {
+    var handler = %GetHandler(obj);
+    var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
+    return ToStringArray(names, "getOwnPropertyNames");
+  }
+
   // Find all the indexed properties.
 
   // Get the local element names.
@@ -802,48 +973,107 @@
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
   }
   var name = ToString(p);
-  var desc = ToPropertyDescriptor(attributes);
-  DefineOwnProperty(obj, name, desc, true);
+  if (%IsJSProxy(obj)) {
+    // Clone the attributes object for protection.
+    // TODO(rossberg): not spec'ed yet, so not sure if this should involve
+    // non-own properties as it does (or non-enumerable ones, as it doesn't?).
+    var attributesClone = {};
+    for (var a in attributes) {
+      attributesClone[a] = attributes[a];
+    }
+    DefineProxyProperty(obj, name, attributesClone, true);
+    // The following would implement the spec as in the current proposal,
+    // but after recent comments on es-discuss, is most likely obsolete.
+    /*
+    var defineObj = FromGenericPropertyDescriptor(desc);
+    var names = ObjectGetOwnPropertyNames(attributes);
+    var standardNames =
+      {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
+    for (var i = 0; i < names.length; i++) {
+      var N = names[i];
+      if (!(%HasLocalProperty(standardNames, N))) {
+        var attr = GetOwnProperty(attributes, N);
+        DefineOwnProperty(descObj, N, attr, true);
+      }
+    }
+    // This is really confusing the types, but it is what the proxies spec
+    // currently requires:
+    desc = descObj;
+    */
+  } else {
+    var desc = ToPropertyDescriptor(attributes);
+    DefineOwnProperty(obj, name, desc, true);
+  }
   return obj;
 }
 
 
+function GetOwnEnumerablePropertyNames(properties) {
+  var names = new InternalArray();
+  for (var key in properties) {
+    if (%HasLocalProperty(properties, key)) {
+      names.push(key);
+    }
+  }
+  return names;
+}
+
+
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
   if (!IS_SPEC_OBJECT(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
-  var key_values = [];
-  for (var key in props) {
-    if (%HasLocalProperty(props, key)) {
-      key_values.push(key);
-      var value = props[key];
-      var desc = ToPropertyDescriptor(value);
-      key_values.push(desc);
-    }
-  }
-  for (var i = 0; i < key_values.length; i += 2) {
-    var key = key_values[i];
-    var desc = key_values[i + 1];
-    DefineOwnProperty(obj, key, desc, true);
+  var names = GetOwnEnumerablePropertyNames(props);
+  for (var i = 0; i < names.length; i++) {
+    var name = names[i];
+    var desc = ToPropertyDescriptor(props[name]);
+    DefineOwnProperty(obj, name, desc, true);
   }
   return obj;
 }
 
 
+// Harmony proxies.
+function ProxyFix(obj) {
+  var handler = %GetHandler(obj);
+  var props = CallTrap0(handler, "fix", void 0);
+  if (IS_UNDEFINED(props)) {
+    throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
+  }
+
+  if (IS_SPEC_FUNCTION(obj)) {
+    var callTrap = %GetCallTrap(obj);
+    var constructTrap = %GetConstructTrap(obj);
+    var code = DelegateCallAndConstruct(callTrap, constructTrap);
+    %Fix(obj);  // becomes a regular function
+    %SetCode(obj, code);
+  } else {
+    %Fix(obj);
+  }
+  ObjectDefineProperties(obj, props);
+}
+
+
 // ES5 section 15.2.3.8.
 function ObjectSeal(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
   }
+  if (%IsJSProxy(obj)) {
+    ProxyFix(obj);
+  }
   var names = ObjectGetOwnPropertyNames(obj);
   for (var i = 0; i < names.length; i++) {
     var name = names[i];
     var desc = GetOwnProperty(obj, name);
-    if (desc.isConfigurable()) desc.setConfigurable(false);
-    DefineOwnProperty(obj, name, desc, true);
+    if (desc.isConfigurable()) {
+      desc.setConfigurable(false);
+      DefineOwnProperty(obj, name, desc, true);
+    }
   }
-  return ObjectPreventExtension(obj);
+  %PreventExtensions(obj);
+  return obj;
 }
 
 
@@ -852,15 +1082,21 @@
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
   }
+  if (%IsJSProxy(obj)) {
+    ProxyFix(obj);
+  }
   var names = ObjectGetOwnPropertyNames(obj);
   for (var i = 0; i < names.length; i++) {
     var name = names[i];
     var desc = GetOwnProperty(obj, name);
-    if (IsDataDescriptor(desc)) desc.setWritable(false);
-    if (desc.isConfigurable()) desc.setConfigurable(false);
-    DefineOwnProperty(obj, name, desc, true);
+    if (desc.isWritable() || desc.isConfigurable()) {
+      if (IsDataDescriptor(desc)) desc.setWritable(false);
+      desc.setConfigurable(false);
+      DefineOwnProperty(obj, name, desc, true);
+    }
   }
-  return ObjectPreventExtension(obj);
+  %PreventExtensions(obj);
+  return obj;
 }
 
 
@@ -869,6 +1105,9 @@
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
   }
+  if (%IsJSProxy(obj)) {
+    ProxyFix(obj);
+  }
   %PreventExtensions(obj);
   return obj;
 }
@@ -879,6 +1118,9 @@
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
   }
+  if (%IsJSProxy(obj)) {
+    return false;
+  }
   var names = ObjectGetOwnPropertyNames(obj);
   for (var i = 0; i < names.length; i++) {
     var name = names[i];
@@ -897,6 +1139,9 @@
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
   }
+  if (%IsJSProxy(obj)) {
+    return false;
+  }
   var names = ObjectGetOwnPropertyNames(obj);
   for (var i = 0; i < names.length; i++) {
     var name = names[i];
@@ -914,7 +1159,10 @@
 // ES5 section 15.2.3.13
 function ObjectIsExtensible(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["isExtensible"]);
+  }
+  if (%IsJSProxy(obj)) {
+    return true;
   }
   return %IsExtensible(obj);
 }
@@ -933,10 +1181,11 @@
 %SetExpectedNumberOfProperties($Object, 4);
 
 // ----------------------------------------------------------------------------
+// Object
 
-
-function SetupObject() {
-  // Setup non-enumerable functions on the Object.prototype object.
+function SetUpObject() {
+  %CheckIsBootstrapping();
+  // Set Up non-enumerable functions on the Object.prototype object.
   InstallFunctions($Object.prototype, DONT_ENUM, $Array(
     "toString", ObjectToString,
     "toLocaleString", ObjectToLocaleString,
@@ -966,8 +1215,7 @@
   ));
 }
 
-SetupObject();
-
+SetUpObject();
 
 // ----------------------------------------------------------------------------
 // Boolean
@@ -998,14 +1246,16 @@
 // ----------------------------------------------------------------------------
 
 
-function SetupBoolean() {
+function SetUpBoolean () {
+  %CheckIsBootstrapping();
   InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
     "toString", BooleanToString,
     "valueOf", BooleanValueOf
   ));
 }
 
-SetupBoolean();
+SetUpBoolean();
+
 
 // ----------------------------------------------------------------------------
 // Number
@@ -1050,6 +1300,10 @@
 
 // ECMA-262 section 15.7.4.3
 function NumberToLocaleString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toLocaleString"]);
+  }
   return this.toString();
 }
 
@@ -1070,6 +1324,10 @@
   if (f < 0 || f > 20) {
     throw new $RangeError("toFixed() digits argument must be between 0 and 20");
   }
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toFixed"]);
+  }
   var x = ToNumber(this);
   return %NumberToFixed(x, f);
 }
@@ -1084,6 +1342,10 @@
       throw new $RangeError("toExponential() argument must be between 0 and 20");
     }
   }
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toExponential"]);
+  }
   var x = ToNumber(this);
   return %NumberToExponential(x, f);
 }
@@ -1091,6 +1353,10 @@
 
 // ECMA-262 section 15.7.4.7
 function NumberToPrecision(precision) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toPrecision"]);
+  }
   if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
   var p = TO_INTEGER(precision);
   if (p < 1 || p > 21) {
@@ -1103,9 +1369,10 @@
 
 // ----------------------------------------------------------------------------
 
-function SetupNumber() {
+function SetUpNumber() {
+  %CheckIsBootstrapping();
   %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
-  // Setup the constructor property on the Number prototype object.
+  // Set up the constructor property on the Number prototype object.
   %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
 
   %OptimizeObjectForAddingMultipleProperties($Number, 5);
@@ -1134,7 +1401,7 @@
                DONT_ENUM | DONT_DELETE | READ_ONLY);
   %ToFastProperties($Number);
 
-  // Setup non-enumerable functions on the Number prototype object.
+  // Set up non-enumerable functions on the Number prototype object.
   InstallFunctions($Number.prototype, DONT_ENUM, $Array(
     "toString", NumberToString,
     "toLocaleString", NumberToLocaleString,
@@ -1145,7 +1412,7 @@
   ));
 }
 
-SetupNumber();
+SetUpNumber();
 
 
 // ----------------------------------------------------------------------------
@@ -1154,6 +1421,10 @@
 $Function.prototype.constructor = $Function;
 
 function FunctionSourceString(func) {
+  while (%IsJSFunctionProxy(func)) {
+    func = %GetCallTrap(func);
+  }
+
   if (!IS_FUNCTION(func)) {
     throw new $TypeError('Function.prototype.toString is not generic');
   }
@@ -1169,7 +1440,9 @@
     }
   }
 
-  var name = %FunctionGetName(func);
+  var name = %FunctionNameShouldPrintAsAnonymous(func)
+      ? 'anonymous'
+      : %FunctionGetName(func);
   return 'function ' + name + source;
 }
 
@@ -1181,12 +1454,13 @@
 
 // ES5 15.3.4.5
 function FunctionBind(this_arg) { // Length is 1.
-  if (!IS_FUNCTION(this)) {
+  if (!IS_SPEC_FUNCTION(this)) {
       throw new $TypeError('Bind must be called on a function');
   }
   // this_arg is not an argument that should be bound.
   var argc_bound = (%_ArgumentsLength() || 1) - 1;
   var fn = this;
+
   if (argc_bound == 0) {
     var result = function() {
       if (%_IsConstructCall()) {
@@ -1195,8 +1469,7 @@
         // materializing it and guarantee that this function will be optimized.
         return %NewObjectFromBound(fn, null);
       }
-
-      return fn.apply(this_arg, arguments);
+      return %Apply(fn, this_arg, arguments, 0, %_ArgumentsLength());
     };
   } else {
     var bound_args = new InternalArray(argc_bound);
@@ -1226,7 +1499,7 @@
       for (var i = 0; i < argc; i++) {
         args[argc_bound + i] = %_Arguments(i);
       }
-      return fn.apply(this_arg, args);
+      return %Apply(fn, this_arg, args, 0, argc + argc_bound);
     };
   }
 
@@ -1237,10 +1510,16 @@
   // is called and make them non-enumerable and non-configurable.
   // To be consistent with our normal functions we leave this as it is.
 
-  // Set the correct length.
-  var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
-  %FunctionSetLength(result, length);
-
+  %FunctionRemovePrototype(result);
+  %FunctionSetBound(result);
+  // Set the correct length. If this is a function proxy, this.length might
+  // throw, or return a bogus result. Leave length alone in that case.
+  // TODO(rossberg): This is underspecified in the current proxy proposal.
+  try {
+    var old_length = ToInteger(this.length);
+    var length = (old_length - argc_bound) > 0 ? old_length - argc_bound : 0;
+    %BoundFunctionSetLength(result, length);
+  } catch(x) {}
   return result;
 }
 
@@ -1263,7 +1542,7 @@
   // The call to SetNewFunctionAttributes will ensure the prototype
   // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
   var f = %CompileString(source)();
-  %FunctionSetName(f, "anonymous");
+  %FunctionMarkNameShouldPrintAsAnonymous(f);
   return %SetNewFunctionAttributes(f);
 }
 
@@ -1271,11 +1550,12 @@
 
 // ----------------------------------------------------------------------------
 
-function SetupFunction() {
+function SetUpFunction() {
+  %CheckIsBootstrapping();
   InstallFunctions($Function.prototype, DONT_ENUM, $Array(
     "bind", FunctionBind,
     "toString", FunctionToString
   ));
 }
 
-SetupFunction();
+SetUpFunction();
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 4b033fc..3881d66 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -43,90 +43,99 @@
 
 
 // Constructor for the Locker object.  Once the Locker is constructed the
-// current thread will be guaranteed to have the big V8 lock.
-Locker::Locker() : has_lock_(false), top_level_(true) {
-  // TODO(isolates): When Locker has Isolate parameter and it is provided, grab
-  // that one instead of using the current one.
-  // We pull default isolate for Locker constructor w/o p[arameter.
-  // A thread should not enter an isolate before acquiring a lock,
-  // in cases which mandate using Lockers.
-  // So getting a lock is the first thing threads do in a scenario where
-  // multple threads share an isolate. Hence, we need to access
-  // 'locking isolate' before we can actually enter into default isolate.
-  internal::Isolate* isolate = internal::Isolate::GetDefaultIsolateForLocking();
-  ASSERT(isolate != NULL);
-
+// current thread will be guaranteed to have the lock for a given isolate.
+Locker::Locker(v8::Isolate* isolate)
+  : has_lock_(false),
+    top_level_(true),
+    isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+  if (isolate_ == NULL) {
+    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+  }
   // Record that the Locker has been used at least once.
   active_ = true;
   // Get the big lock if necessary.
-  if (!isolate->thread_manager()->IsLockedByCurrentThread()) {
-    isolate->thread_manager()->Lock();
+  if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
+    isolate_->thread_manager()->Lock();
     has_lock_ = true;
 
-    if (isolate->IsDefaultIsolate()) {
-      // This only enters if not yet entered.
-      internal::Isolate::EnterDefaultIsolate();
-    }
-
-    ASSERT(internal::Thread::HasThreadLocal(
-        internal::Isolate::thread_id_key()));
-
     // Make sure that V8 is initialized.  Archiving of threads interferes
     // with deserialization by adding additional root pointers, so we must
     // initialize here, before anyone can call ~Locker() or Unlocker().
-    if (!isolate->IsInitialized()) {
+    if (!isolate_->IsInitialized()) {
+      isolate_->Enter();
       V8::Initialize();
+      isolate_->Exit();
     }
+
     // This may be a locker within an unlocker in which case we have to
     // get the saved state for this thread and restore it.
-    if (isolate->thread_manager()->RestoreThread()) {
+    if (isolate_->thread_manager()->RestoreThread()) {
       top_level_ = false;
     } else {
-      internal::ExecutionAccess access(isolate);
-      isolate->stack_guard()->ClearThread(access);
-      isolate->stack_guard()->InitThread(access);
+      internal::ExecutionAccess access(isolate_);
+      isolate_->stack_guard()->ClearThread(access);
+      isolate_->stack_guard()->InitThread(access);
+    }
+    if (isolate_->IsDefaultIsolate()) {
+      // This only enters if not yet entered.
+      internal::Isolate::EnterDefaultIsolate();
     }
   }
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
 }
 
 
-bool Locker::IsLocked() {
-  return internal::Isolate::Current()->thread_manager()->
-      IsLockedByCurrentThread();
+bool Locker::IsLocked(v8::Isolate* isolate) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  if (internal_isolate == NULL) {
+    internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
+  }
+  return internal_isolate->thread_manager()->IsLockedByCurrentThread();
+}
+
+
+bool Locker::IsActive() {
+  return active_;
 }
 
 
 Locker::~Locker() {
-  // TODO(isolate): this should use a field storing the isolate it
-  // locked instead.
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
   if (has_lock_) {
-    if (top_level_) {
-      isolate->thread_manager()->FreeThreadResources();
-    } else {
-      isolate->thread_manager()->ArchiveThread();
+    if (isolate_->IsDefaultIsolate()) {
+      isolate_->Exit();
     }
-    isolate->thread_manager()->Unlock();
+    if (top_level_) {
+      isolate_->thread_manager()->FreeThreadResources();
+    } else {
+      isolate_->thread_manager()->ArchiveThread();
+    }
+    isolate_->thread_manager()->Unlock();
   }
 }
 
 
-Unlocker::Unlocker() {
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
-  isolate->thread_manager()->ArchiveThread();
-  isolate->thread_manager()->Unlock();
+Unlocker::Unlocker(v8::Isolate* isolate)
+  : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+  if (isolate_ == NULL) {
+    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+  }
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+  if (isolate_->IsDefaultIsolate()) {
+    isolate_->Exit();
+  }
+  isolate_->thread_manager()->ArchiveThread();
+  isolate_->thread_manager()->Unlock();
 }
 
 
 Unlocker::~Unlocker() {
-  // TODO(isolates): check it's the isolate we unlocked.
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(!isolate->thread_manager()->IsLockedByCurrentThread());
-  isolate->thread_manager()->Lock();
-  isolate->thread_manager()->RestoreThread();
+  ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
+  isolate_->thread_manager()->Lock();
+  isolate_->thread_manager()->RestoreThread();
+  if (isolate_->IsDefaultIsolate()) {
+    isolate_->Enter();
+  }
 }
 
 
@@ -144,17 +153,20 @@
 
 
 bool ThreadManager::RestoreThread() {
+  ASSERT(IsLockedByCurrentThread());
   // First check whether the current thread has been 'lazily archived', ie
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
   if (lazily_archived_thread_.Equals(ThreadId::Current())) {
     lazily_archived_thread_ = ThreadId::Invalid();
-    ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
-           lazily_archived_thread_state_);
+    Isolate::PerIsolateThreadData* per_thread =
+        isolate_->FindPerThreadDataForThisThread();
+    ASSERT(per_thread != NULL);
+    ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
     lazily_archived_thread_state_->set_id(ThreadId::Invalid());
     lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
     lazily_archived_thread_state_ = NULL;
-    Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
+    per_thread->set_thread_state(NULL);
     return true;
   }
 
@@ -168,7 +180,7 @@
     EagerlyArchiveThread();
   }
   Isolate::PerIsolateThreadData* per_thread =
-      Isolate::CurrentPerIsolateThreadData();
+      isolate_->FindPerThreadDataForThisThread();
   if (per_thread == NULL || per_thread->thread_state() == NULL) {
     // This is a new thread.
     isolate_->stack_guard()->InitThread(access);
@@ -178,7 +190,7 @@
   char* from = state->data();
   from = isolate_->handle_scope_implementer()->RestoreThread(from);
   from = isolate_->RestoreThread(from);
-  from = Relocatable::RestoreState(from);
+  from = Relocatable::RestoreState(isolate_, from);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   from = isolate_->debug()->RestoreDebug(from);
 #endif
@@ -293,16 +305,21 @@
 
 
 ThreadManager::~ThreadManager() {
-  // TODO(isolates): Destroy mutexes.
+  delete mutex_;
+  delete free_anchor_;
+  delete in_use_anchor_;
 }
 
 
 void ThreadManager::ArchiveThread() {
   ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
   ASSERT(!IsArchived());
+  ASSERT(IsLockedByCurrentThread());
   ThreadState* state = GetFreeThreadState();
   state->Unlink();
-  Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
+  Isolate::PerIsolateThreadData* per_thread =
+      isolate_->FindOrAllocatePerThreadDataForThisThread();
+  per_thread->set_thread_state(state);
   lazily_archived_thread_ = ThreadId::Current();
   lazily_archived_thread_state_ = state;
   ASSERT(state->id().Equals(ThreadId::Invalid()));
@@ -312,6 +329,7 @@
 
 
 void ThreadManager::EagerlyArchiveThread() {
+  ASSERT(IsLockedByCurrentThread());
   ThreadState* state = lazily_archived_thread_state_;
   state->LinkInto(ThreadState::IN_USE_LIST);
   char* to = state->data();
@@ -319,7 +337,7 @@
   // in ThreadManager::Iterate(ObjectVisitor*).
   to = isolate_->handle_scope_implementer()->ArchiveThread(to);
   to = isolate_->ArchiveThread(to);
-  to = Relocatable::ArchiveState(to);
+  to = Relocatable::ArchiveState(isolate_, to);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   to = isolate_->debug()->ArchiveDebug(to);
 #endif
@@ -344,11 +362,11 @@
 
 
 bool ThreadManager::IsArchived() {
-  Isolate::PerIsolateThreadData* data = Isolate::CurrentPerIsolateThreadData();
+  Isolate::PerIsolateThreadData* data =
+      isolate_->FindPerThreadDataForThisThread();
   return data != NULL && data->thread_state() != NULL;
 }
 
-
 void ThreadManager::Iterate(ObjectVisitor* v) {
   // Expecting no threads during serialization/deserialization
   for (ThreadState* state = FirstThreadStateInUse();
@@ -390,9 +408,10 @@
 
 
 ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
-  : Thread(isolate, "v8:CtxtSwitcher"),
+  : Thread("v8:CtxtSwitcher"),
     keep_going_(true),
-    sleep_ms_(every_n_ms) {
+    sleep_ms_(every_n_ms),
+    isolate_(isolate) {
 }
 
 
@@ -400,7 +419,7 @@
 // ContextSwitcher thread if needed.
 void ContextSwitcher::StartPreemption(int every_n_ms) {
   Isolate* isolate = Isolate::Current();
-  ASSERT(Locker::IsLocked());
+  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
   if (isolate->context_switcher() == NULL) {
     // If the ContextSwitcher thread is not running at the moment start it now.
     isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
@@ -417,7 +436,7 @@
 // must cooperatively schedule amongst them from this point on.
 void ContextSwitcher::StopPreemption() {
   Isolate* isolate = Isolate::Current();
-  ASSERT(Locker::IsLocked());
+  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
   if (isolate->context_switcher() != NULL) {
     // The ContextSwitcher thread is running. We need to stop it and release
     // its resources.
diff --git a/src/v8threads.h b/src/v8threads.h
index d8a923e..a2aee4e 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -54,6 +54,7 @@
 
   // Get data area for archiving a thread.
   char* data() { return data_; }
+
  private:
   explicit ThreadState(ThreadManager* thread_manager);
 
@@ -71,7 +72,7 @@
 };
 
 
-// Defined in top.h
+// Defined in isolate.h.
 class ThreadLocalTop;
 
 
@@ -152,12 +153,15 @@
   static void PreemptionReceived();
 
  private:
-  explicit ContextSwitcher(Isolate* isolate, int every_n_ms);
+  ContextSwitcher(Isolate* isolate, int every_n_ms);
+
+  Isolate* isolate() const { return isolate_; }
 
   void Run();
 
   bool keep_going_;
   int sleep_ms_;
+  Isolate* isolate_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/v8utils.cc b/src/v8utils.cc
new file mode 100644
index 0000000..bf0e05d
--- /dev/null
+++ b/src/v8utils.cc
@@ -0,0 +1,360 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+#include "sys/stat.h"
+
+namespace v8 {
+namespace internal {
+
+
+void PrintF(const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  OS::VPrint(format, arguments);
+  va_end(arguments);
+}
+
+
+void PrintF(FILE* out, const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  OS::VFPrint(out, format, arguments);
+  va_end(arguments);
+}
+
+
+void Flush(FILE* out) {
+  fflush(out);
+}
+
+
+char* ReadLine(const char* prompt) {
+  char* result = NULL;
+  char line_buf[256];
+  int offset = 0;
+  bool keep_going = true;
+  fprintf(stdout, "%s", prompt);
+  fflush(stdout);
+  while (keep_going) {
+    if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+      // fgets got an error. Just give up.
+      if (result != NULL) {
+        DeleteArray(result);
+      }
+      return NULL;
+    }
+    int len = StrLength(line_buf);
+    if (len > 1 &&
+        line_buf[len - 2] == '\\' &&
+        line_buf[len - 1] == '\n') {
+      // When we read a line that ends with a "\" we remove the escape and
+      // append the remainder.
+      line_buf[len - 2] = '\n';
+      line_buf[len - 1] = 0;
+      len -= 1;
+    } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
+      // Since we read a new line we are done reading the line. This
+      // will exit the loop after copying this buffer into the result.
+      keep_going = false;
+    }
+    if (result == NULL) {
+      // Allocate the initial result and make room for the terminating '\0'
+      result = NewArray<char>(len + 1);
+    } else {
+      // Allocate a new result with enough room for the new addition.
+      int new_len = offset + len + 1;
+      char* new_result = NewArray<char>(new_len);
+      // Copy the existing input into the new array and set the new
+      // array as the result.
+      memcpy(new_result, result, offset * kCharSize);
+      DeleteArray(result);
+      result = new_result;
+    }
+    // Copy the newly read line into the result.
+    memcpy(result + offset, line_buf, len * kCharSize);
+    offset += len;
+  }
+  ASSERT(result != NULL);
+  result[offset] = '\0';
+  return result;
+}
+
+
+char* ReadCharsFromFile(FILE* file,
+                        int* size,
+                        int extra_space,
+                        bool verbose,
+                        const char* filename) {
+  if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+    if (verbose) {
+      OS::PrintError("Cannot read from file %s.\n", filename);
+    }
+    return NULL;
+  }
+
+  // Get the size of the file and rewind it.
+  *size = ftell(file);
+  rewind(file);
+
+  char* result = NewArray<char>(*size + extra_space);
+  for (int i = 0; i < *size && feof(file) == 0;) {
+    int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
+    if (read != (*size - i) && ferror(file) != 0) {
+      fclose(file);
+      DeleteArray(result);
+      return NULL;
+    }
+    i += read;
+  }
+  return result;
+}
+
+
+char* ReadCharsFromFile(const char* filename,
+                        int* size,
+                        int extra_space,
+                        bool verbose) {
+  FILE* file = OS::FOpen(filename, "rb");
+  char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
+  if (file != NULL) fclose(file);
+  return result;
+}
+
+
+byte* ReadBytes(const char* filename, int* size, bool verbose) {
+  char* chars = ReadCharsFromFile(filename, size, 0, verbose);
+  return reinterpret_cast<byte*>(chars);
+}
+
+
+static Vector<const char> SetVectorContents(char* chars,
+                                            int size,
+                                            bool* exists) {
+  if (!chars) {
+    *exists = false;
+    return Vector<const char>::empty();
+  }
+  chars[size] = '\0';
+  *exists = true;
+  return Vector<const char>(chars, size);
+}
+
+
+Vector<const char> ReadFile(const char* filename,
+                            bool* exists,
+                            bool verbose) {
+  int size;
+  char* result = ReadCharsFromFile(filename, &size, 1, verbose);
+  return SetVectorContents(result, size, exists);
+}
+
+
+Vector<const char> ReadFile(FILE* file,
+                            bool* exists,
+                            bool verbose) {
+  int size;
+  char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
+  return SetVectorContents(result, size, exists);
+}
+
+
+int WriteCharsToFile(const char* str, int size, FILE* f) {
+  int total = 0;
+  while (total < size) {
+    int write = static_cast<int>(fwrite(str, 1, size - total, f));
+    if (write == 0) {
+      return total;
+    }
+    total += write;
+    str += write;
+  }
+  return total;
+}
+
+
+int AppendChars(const char* filename,
+                const char* str,
+                int size,
+                bool verbose) {
+  FILE* f = OS::FOpen(filename, "ab");
+  if (f == NULL) {
+    if (verbose) {
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
+    }
+    return 0;
+  }
+  int written = WriteCharsToFile(str, size, f);
+  fclose(f);
+  return written;
+}
+
+
+int WriteChars(const char* filename,
+               const char* str,
+               int size,
+               bool verbose) {
+  FILE* f = OS::FOpen(filename, "wb");
+  if (f == NULL) {
+    if (verbose) {
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
+    }
+    return 0;
+  }
+  int written = WriteCharsToFile(str, size, f);
+  fclose(f);
+  return written;
+}
+
+
+int WriteBytes(const char* filename,
+               const byte* bytes,
+               int size,
+               bool verbose) {
+  const char* str = reinterpret_cast<const char*>(bytes);
+  return WriteChars(filename, str, size, verbose);
+}
+
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  AddFormattedList(format, arguments);
+  va_end(arguments);
+}
+
+
+void StringBuilder::AddFormattedList(const char* format, va_list list) {
+  ASSERT(!is_finalized() && position_ < buffer_.length());
+  int n = OS::VSNPrintF(buffer_ + position_, format, list);
+  if (n < 0 || n >= (buffer_.length() - position_)) {
+    position_ = buffer_.length();
+  } else {
+    position_ += n;
+  }
+}
+
+
+MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
+    : filename_(NULL),
+      data_(NULL),
+      length_(0),
+      remove_file_on_cleanup_(false) {
+  Init(filename);
+}
+
+
+MemoryMappedExternalResource::
+    MemoryMappedExternalResource(const char* filename,
+                                 bool remove_file_on_cleanup)
+    : filename_(NULL),
+      data_(NULL),
+      length_(0),
+      remove_file_on_cleanup_(remove_file_on_cleanup) {
+  Init(filename);
+}
+
+
+MemoryMappedExternalResource::~MemoryMappedExternalResource() {
+  // Release the resources if we had successfully acquired them:
+  if (file_ != NULL) {
+    delete file_;
+    if (remove_file_on_cleanup_) {
+      OS::Remove(filename_);
+    }
+    DeleteArray<char>(filename_);
+  }
+}
+
+
+void MemoryMappedExternalResource::Init(const char* filename) {
+  file_ = OS::MemoryMappedFile::open(filename);
+  if (file_ != NULL) {
+    filename_ = StrDup(filename);
+    data_ = reinterpret_cast<char*>(file_->memory());
+    length_ = file_->size();
+  }
+}
+
+
+bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
+  bool is_ascii = true;
+
+  int line_no = 1;
+  const char* start_of_line = data_;
+  const char* end = data_ + length_;
+  for (const char* p = data_; p < end; p++) {
+    char c = *p;
+    if ((c & 0x80) != 0) {
+      // Non-ascii detected:
+      is_ascii = false;
+
+      // Report the error and abort if appropriate:
+      if (abort_if_failed) {
+        int char_no = static_cast<int>(p - start_of_line) - 1;
+
+        ASSERT(filename_ != NULL);
+        PrintF("\n\n\n"
+               "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
+               c, filename_, line_no, char_no);
+
+        // Allow for some context up to kNumberOfLeadingContextChars chars
+        // before the offending non-ascii char to help the user see where
+        // the offending char is.
+        const int kNumberOfLeadingContextChars = 10;
+        const char* err_context = p - kNumberOfLeadingContextChars;
+        if (err_context < data_) {
+          err_context = data_;
+        }
+        // Compute the length of the error context and print it.
+        int err_context_length = static_cast<int>(p - err_context);
+        if (err_context_length != 0) {
+          PrintF(" after \"%.*s\"", err_context_length, err_context);
+        }
+        PrintF(".\n\n\n");
+        OS::Abort();
+      }
+
+      break;  // Non-ascii detected.  No need to continue scanning.
+    }
+    if (c == '\n') {
+      start_of_line = p;
+      line_no++;
+    }
+  }
+
+  return is_ascii;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/v8utils.h b/src/v8utils.h
index 93fc1fd..aada521 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -188,73 +188,11 @@
 Vector<const char> ReadFile(const char* filename,
                             bool* exists,
                             bool verbose = true);
+Vector<const char> ReadFile(FILE* file,
+                            bool* exists,
+                            bool verbose = true);
 
 
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-class StringBuilder {
- public:
-  // Create a string builder with a buffer of the given size. The
-  // buffer is allocated through NewArray<char> and must be
-  // deallocated by the caller of Finalize().
-  explicit StringBuilder(int size);
-
-  StringBuilder(char* buffer, int size)
-      : buffer_(buffer, size), position_(0) { }
-
-  ~StringBuilder() { if (!is_finalized()) Finalize(); }
-
-  int size() const { return buffer_.length(); }
-
-  // Get the current position in the builder.
-  int position() const {
-    ASSERT(!is_finalized());
-    return position_;
-  }
-
-  // Reset the position.
-  void Reset() { position_ = 0; }
-
-  // Add a single character to the builder. It is not allowed to add
-  // 0-characters; use the Finalize() method to terminate the string
-  // instead.
-  void AddCharacter(char c) {
-    ASSERT(c != '\0');
-    ASSERT(!is_finalized() && position_ < buffer_.length());
-    buffer_[position_++] = c;
-  }
-
-  // Add an entire string to the builder. Uses strlen() internally to
-  // compute the length of the input string.
-  void AddString(const char* s);
-
-  // Add the first 'n' characters of the given string 's' to the
-  // builder. The input string must have enough characters.
-  void AddSubstring(const char* s, int n);
-
-  // Add formatted contents to the builder just like printf().
-  void AddFormatted(const char* format, ...);
-
-  // Add formatted contents like printf based on a va_list.
-  void AddFormattedList(const char* format, va_list list);
-
-  // Add character padding to the builder. If count is non-positive,
-  // nothing is added to the builder.
-  void AddPadding(char c, int count);
-
-  // Finalize the string by 0-terminating it and returning the buffer.
-  char* Finalize();
-
- private:
-  Vector<char> buffer_;
-  int position_;
-
-  bool is_finalized() const { return position_ < 0; }
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
@@ -313,6 +251,19 @@
   bool remove_file_on_cleanup_;
 };
 
+class StringBuilder : public SimpleStringBuilder {
+ public:
+  explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
+  StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
+
+  // Add formatted contents to the builder just like printf().
+  void AddFormatted(const char* format, ...);
+
+  // Add formatted contents like printf based on a va_list.
+  void AddFormattedList(const char* format, va_list list);
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
 
 } }  // namespace v8::internal
 
diff --git a/src/variables.cc b/src/variables.cc
index 0502722..971061b 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -41,6 +41,7 @@
   switch (mode) {
     case VAR: return "VAR";
     case CONST: return "CONST";
+    case LET: return "LET";
     case DYNAMIC: return "DYNAMIC";
     case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
     case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -52,40 +53,6 @@
 }
 
 
-Property* Variable::AsProperty() const {
-  return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
-}
-
-
-Slot* Variable::AsSlot() const {
-  return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
-}
-
-
-bool Variable::IsStackAllocated() const {
-  Slot* slot = AsSlot();
-  return slot != NULL && slot->IsStackAllocated();
-}
-
-
-bool Variable::IsParameter() const {
-  Slot* s = AsSlot();
-  return s != NULL && s->type() == Slot::PARAMETER;
-}
-
-
-bool Variable::IsStackLocal() const {
-  Slot* s = AsSlot();
-  return s != NULL && s->type() == Slot::LOCAL;
-}
-
-
-bool Variable::IsContextSlot() const {
-  Slot* s = AsSlot();
-  return s != NULL && s->type() == Slot::CONTEXT;
-}
-
-
 Variable::Variable(Scope* scope,
                    Handle<String> name,
                    Mode mode,
@@ -95,8 +62,9 @@
     name_(name),
     mode_(mode),
     kind_(kind),
+    location_(UNALLOCATED),
+    index_(-1),
     local_if_not_shadowed_(NULL),
-    rewrite_(NULL),
     is_valid_LHS_(is_valid_LHS),
     is_accessed_from_inner_scope_(false),
     is_used_(false) {
diff --git a/src/variables.h b/src/variables.h
index b1ff0db..56c8dab 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -46,6 +46,8 @@
 
     CONST,     // declared via 'const' declarations
 
+    LET,       // declared via 'let' declarations
+
     // Variables introduced by the compiler:
     DYNAMIC,         // always require dynamic lookup (we don't know
                      // the declaration)
@@ -72,6 +74,33 @@
     ARGUMENTS
   };
 
+  enum Location {
+    // Before and during variable allocation, a variable whose location is
+    // not yet determined.  After allocation, a variable looked up as a
+    // property on the global object (and possibly absent).  name() is the
+    // variable name, index() is invalid.
+    UNALLOCATED,
+
+    // A slot in the parameter section on the stack.  index() is the
+    // parameter index, counting left-to-right.  The reciever is index -1;
+    // the first parameter is index 0.
+    PARAMETER,
+
+    // A slot in the local section on the stack.  index() is the variable
+    // index in the stack frame, starting at 0.
+    LOCAL,
+
+    // An indexed slot in a heap context.  index() is the variable index in
+    // the context object on the heap, starting at 0.  scope() is the
+    // corresponding scope.
+    CONTEXT,
+
+    // A named slot in a heap context.  name() is the variable name in the
+    // context object on the heap, with lookup starting at the current
+    // context.  index() is invalid.
+    LOOKUP
+  };
+
   Variable(Scope* scope,
            Handle<String> name,
            Mode mode,
@@ -81,10 +110,6 @@
   // Printing support
   static const char* Mode2String(Mode mode);
 
-  // Type testing & conversion
-  Property* AsProperty() const;
-  Slot* AsSlot() const;
-
   bool IsValidLeftHandSide() { return is_valid_LHS_; }
 
   // The source code for an eval() call may refer to a variable that is
@@ -99,6 +124,7 @@
     return is_accessed_from_inner_scope_;
   }
   void MarkAsAccessedFromInnerScope() {
+    ASSERT(mode_ != TEMPORARY);
     is_accessed_from_inner_scope_ = true;
   }
   bool is_used() { return is_used_; }
@@ -108,10 +134,12 @@
     return !is_this() && name().is_identical_to(n);
   }
 
-  bool IsStackAllocated() const;
-  bool IsParameter() const;  // Includes 'this'.
-  bool IsStackLocal() const;
-  bool IsContextSlot() const;
+  bool IsUnallocated() const { return location_ == UNALLOCATED; }
+  bool IsParameter() const { return location_ == PARAMETER; }
+  bool IsStackLocal() const { return location_ == LOCAL; }
+  bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
+  bool IsContextSlot() const { return location_ == CONTEXT; }
+  bool IsLookupSlot() const { return location_ == LOOKUP; }
 
   bool is_dynamic() const {
     return (mode_ == DYNAMIC ||
@@ -138,21 +166,24 @@
     local_if_not_shadowed_ = local;
   }
 
-  Expression* rewrite() const { return rewrite_; }
-  void set_rewrite(Expression* expr) { rewrite_ = expr; }
+  Location location() const { return location_; }
+  int index() const { return index_; }
+
+  void AllocateTo(Location location, int index) {
+    location_ = location;
+    index_ = index;
+  }
 
  private:
   Scope* scope_;
   Handle<String> name_;
   Mode mode_;
   Kind kind_;
+  Location location_;
+  int index_;
 
   Variable* local_if_not_shadowed_;
 
-  // Code generation.
-  // rewrite_ is usually a Slot or a Property, but may be any expression.
-  Expression* rewrite_;
-
   // Valid as a LHS? (const and this are not valid LHS, for example)
   bool is_valid_LHS_;
 
diff --git a/src/version.cc b/src/version.cc
index 47e7fe2..2c21152 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     2
-#define BUILD_NUMBER      10
-#define PATCH_LEVEL       40
+#define MINOR_VERSION     6
+#define BUILD_NUMBER      6
+#define PATCH_LEVEL       19
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 1f363de..c647e56 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -39,7 +39,6 @@
 // logger and partially threaded through the call stack.  States are pushed by
 // VMState construction and popped by destruction.
 //
-#ifdef ENABLE_VMSTATE_TRACKING
 inline const char* StateToString(StateTag state) {
   switch (state) {
     case JS:
@@ -61,32 +60,16 @@
 
 VMState::VMState(Isolate* isolate, StateTag tag)
     : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
     LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
     LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
   }
-#endif
 
   isolate_->SetCurrentVMState(tag);
-
-#ifdef ENABLE_HEAP_PROTECTION
-  if (FLAG_protect_heap) {
-    if (tag == EXTERNAL) {
-      // We are leaving V8.
-      ASSERT(previous_tag_ != EXTERNAL);
-      isolate_->heap()->Protect();
-    } else if (previous_tag_ = EXTERNAL) {
-      // We are entering V8.
-      isolate_->heap()->Unprotect();
-    }
-  }
-#endif
 }
 
 
 VMState::~VMState() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
     LOG(isolate_,
         UncheckedStringEvent("Leaving",
@@ -94,32 +77,10 @@
     LOG(isolate_,
         UncheckedStringEvent("To", StateToString(previous_tag_)));
   }
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
-#ifdef ENABLE_HEAP_PROTECTION
-  StateTag tag = isolate_->current_vm_state();
-#endif
 
   isolate_->SetCurrentVMState(previous_tag_);
-
-#ifdef ENABLE_HEAP_PROTECTION
-  if (FLAG_protect_heap) {
-    if (tag == EXTERNAL) {
-      // We are reentering V8.
-      ASSERT(previous_tag_ != EXTERNAL);
-      isolate_->heap()->Unprotect();
-    } else if (previous_tag_ == EXTERNAL) {
-      // We are leaving V8.
-      isolate_->heap()->Protect();
-    }
-  }
-#endif  // ENABLE_HEAP_PROTECTION
 }
 
-#endif  // ENABLE_VMSTATE_TRACKING
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
 
 ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
     : isolate_(isolate), previous_callback_(isolate->external_callback()) {
@@ -130,8 +91,6 @@
   isolate_->set_external_callback(previous_callback_);
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
 
 } }  // namespace v8::internal
 
diff --git a/src/vm-state.h b/src/vm-state.h
index 11fc6d6..831e2d3 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -28,13 +28,13 @@
 #ifndef V8_VM_STATE_H_
 #define V8_VM_STATE_H_
 
+#include "allocation.h"
 #include "isolate.h"
 
 namespace v8 {
 namespace internal {
 
 class VMState BASE_EMBEDDED {
-#ifdef ENABLE_VMSTATE_TRACKING
  public:
   inline VMState(Isolate* isolate, StateTag tag);
   inline ~VMState();
@@ -42,26 +42,16 @@
  private:
   Isolate* isolate_;
   StateTag previous_tag_;
-
-#else
- public:
-  VMState(Isolate* isolate, StateTag state) {}
-#endif
 };
 
 
 class ExternalCallbackScope BASE_EMBEDDED {
-#ifdef ENABLE_LOGGING_AND_PROFILING
  public:
   inline ExternalCallbackScope(Isolate* isolate, Address callback);
   inline ~ExternalCallbackScope();
  private:
   Isolate* isolate_;
   Address previous_callback_;
-#else
- public:
-  ExternalCallbackScope(Isolate* isolate, Address callback) {}
-#endif
 };
 
 } }  // namespace v8::internal
diff --git a/src/weakmap.js b/src/weakmap.js
new file mode 100644
index 0000000..5fb5151
--- /dev/null
+++ b/src/weakmap.js
@@ -0,0 +1,98 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// const $Object = global.Object;
+const $WeakMap = global.WeakMap;
+
+// -------------------------------------------------------------------
+
+function WeakMapConstructor() {
+  if (%_IsConstructCall()) {
+    %WeakMapInitialize(this);
+  } else {
+    return new $WeakMap();
+  }
+}
+
+
+function WeakMapGet(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return !IS_UNDEFINED(%WeakMapGet(this, key));
+}
+
+
+function WeakMapDelete(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
+    %WeakMapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// -------------------------------------------------------------------
+
+(function () {
+  %CheckIsBootstrapping();
+  // Set up the WeakMap constructor function.
+  %SetCode($WeakMap, WeakMapConstructor);
+
+  // Set up the constructor property on the WeakMap prototype object.
+  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakMap prototype object.
+  InstallFunctionsOnHiddenPrototype($WeakMap.prototype, DONT_ENUM, $Array(
+    "get", WeakMapGet,
+    "set", WeakMapSet,
+    "has", WeakMapHas,
+    "delete", WeakMapDelete
+  ));
+})();
diff --git a/src/win32-math.cc b/src/win32-math.cc
new file mode 100644
index 0000000..3410872
--- /dev/null
+++ b/src/win32-math.cc
@@ -0,0 +1,106 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+#ifdef _MSC_VER
+
+#undef V8_WIN32_LEAN_AND_MEAN
+#define V8_WIN32_HEADERS_FULL
+#include "win32-headers.h"
+#include <limits.h>        // Required for INT_MAX etc.
+#include <math.h>
+#include <float.h>         // Required for DBL_MAX and on Win32 for finite()
+#include "win32-math.h"
+
+#include "checks.h"
+
+namespace v8 {
+
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+  return _finite(x);
+}
+
+}  // namespace v8
+
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+  return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+  // Use the MS-specific _fpclass() for classification.
+  int flags = _fpclass(x);
+
+  // Determine class. We cannot use a switch statement because
+  // the _FPCLASS_ constants are defined as flags.
+  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+  // All cases should be covered by the code above.
+  ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+  return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive
+  // and negative versions of zero.
+  if (x == 0)
+    return _fpclass(x) & _FPCLASS_NZ;
+  else
+    return x < 0;
+}
+
+#endif  // _MSC_VER
diff --git a/test/mjsunit/override-eval-with-non-function.js b/src/win32-math.h
similarity index 69%
copy from test/mjsunit/override-eval-with-non-function.js
copy to src/win32-math.h
index aa93b25..6875999 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/src/win32-math.h
@@ -25,12 +25,37 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+#ifndef V8_WIN32_MATH_H_
+#define V8_WIN32_MATH_H_
 
-test();
+#ifndef _MSC_VER
+#error Wrong environment, expected MSVC.
+#endif  // _MSC_VER
+
+enum {
+  FP_NAN,
+  FP_INFINITE,
+  FP_ZERO,
+  FP_SUBNORMAL,
+  FP_NORMAL
+};
+
+namespace v8 {
+
+int isfinite(double x);
+
+}  // namespace v8
+
+int isnan(double x);
+int isinf(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+#endif  // V8_WIN32_MATH_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 9541a58..8db54f0 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -61,9 +61,15 @@
 }
 
 
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::emit_code_target(Handle<Code> target,
+                                 RelocInfo::Mode rmode,
+                                 unsigned ast_id) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  RecordRelocInfo(rmode);
+  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+    RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+  } else {
+    RecordRelocInfo(rmode);
+  }
   int current = code_targets_.length();
   if (current > 0 && code_targets_.last().is_identical_to(target)) {
     // Optimization if we keep jumping to the same code target.
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 6e4f005..745fdae 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -458,6 +458,20 @@
     int last_imm32 = pos - (current + sizeof(int32_t));
     long_at_put(current, last_imm32);
   }
+  while (L->is_near_linked()) {
+    int fixup_pos = L->near_link_pos();
+    int offset_to_next =
+        static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+    ASSERT(offset_to_next <= 0);
+    int disp = pos - (fixup_pos + sizeof(int8_t));
+    ASSERT(is_int8(disp));
+    set_byte_at(fixup_pos, disp);
+    if (offset_to_next < 0) {
+      L->link_to(fixup_pos + offset_to_next, Label::kNear);
+    } else {
+      L->UnuseNear();
+    }
+  }
   L->bind_to(pos);
 }
 
@@ -467,19 +481,6 @@
 }
 
 
-void Assembler::bind(NearLabel* L) {
-  ASSERT(!L->is_bound());
-  while (L->unresolved_branches_ > 0) {
-    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
-    int disp = pc_offset() - branch_pos;
-    ASSERT(is_int8(disp));
-    set_byte_at(branch_pos - sizeof(int8_t), disp);
-    L->unresolved_branches_--;
-  }
-  L->bind_to(pc_offset());
-}
-
-
 void Assembler::GrowBuffer() {
   ASSERT(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
@@ -869,12 +870,14 @@
 }
 
 
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> target,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
-  emit_code_target(target, rmode);
+  emit_code_target(target, rmode, ast_id);
 }
 
 
@@ -1212,7 +1215,7 @@
 }
 
 
-void Assembler::j(Condition cc, Label* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   if (cc == always) {
     jmp(L);
     return;
@@ -1236,6 +1239,17 @@
       emit(0x80 | cc);
       emitl(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    // 0111 tttn #8-bit disp
+    emit(0x70 | cc);
+    byte disp = 0x00;
+    if (L->is_near_linked()) {
+      int offset = L->near_link_pos() - pc_offset();
+      ASSERT(is_int8(offset));
+      disp = static_cast<byte>(offset & 0xFF);
+    }
+    L->link_to(pc_offset(), Label::kNear);
+    emit(disp);
   } else if (L->is_linked()) {
     // 0000 1111 1000 tttn #32-bit disp.
     emit(0x0F);
@@ -1265,27 +1279,7 @@
 }
 
 
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 0111 tttn #8-bit disp
-    emit(0x70 | cc);
-    emit((offs - short_size) & 0xFF);
-  } else {
-    emit(0x70 | cc);
-    emit(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
-void Assembler::jmp(Label* L) {
+void Assembler::jmp(Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
   const int short_size = sizeof(int8_t);
   const int long_size = sizeof(int32_t);
@@ -1301,7 +1295,17 @@
       emit(0xE9);
       emitl(offs - long_size);
     }
-  } else  if (L->is_linked()) {
+  } else if (distance == Label::kNear) {
+    emit(0xEB);
+    byte disp = 0x00;
+    if (L->is_near_linked()) {
+      int offset = L->near_link_pos() - pc_offset();
+      ASSERT(is_int8(offset));
+      disp = static_cast<byte>(offset & 0xFF);
+    }
+    L->link_to(pc_offset(), Label::kNear);
+    emit(disp);
+  } else if (L->is_linked()) {
     // 1110 1001 #32-bit disp.
     emit(0xE9);
     emitl(L->pos());
@@ -1325,24 +1329,6 @@
 }
 
 
-void Assembler::jmp(NearLabel* L) {
-  EnsureSpace ensure_space(this);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 1110 1011 #8-bit disp.
-    emit(0xEB);
-    emit((offs - short_size) & 0xFF);
-  } else {
-    emit(0xEB);
-    emit(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   // Opcode FF/4 r64.
@@ -2540,6 +2526,24 @@
 }
 
 
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (dst.low_bits() == 4) {
+    // Avoid unnecessary SIB byte.
+    emit(0xf3);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x7e);
+    emit_sse_operand(dst, src);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0xD6);
+    emit_sse_operand(src, dst);
+  }
+}
+
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0x66);
@@ -2603,6 +2607,42 @@
 }
 
 
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit(0xF3);  // single
@@ -2833,6 +2873,15 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0xF2);
@@ -2863,6 +2912,21 @@
 }
 
 
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+                        Assembler::RoundingMode mode) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x3a);
+  emit(0x0b);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  emit(static_cast<byte>(mode) | 0x8);
+}
+
+
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0x66);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 9453277..2e373fa 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -125,7 +125,7 @@
     return names[index];
   }
 
-  static Register toRegister(int code) {
+  static Register from_code(int code) {
     Register r = { code };
     return r;
   }
@@ -327,22 +327,6 @@
 }
 
 
-enum Hint {
-  no_hint = 0,
-  not_taken = 0x2e,
-  taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition.  That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
-  return (hint == no_hint)
-      ? no_hint
-      : ((hint == not_taken) ? taken : not_taken);
-}
-
-
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
 
@@ -469,6 +453,7 @@
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
+
    public:
     explicit Scope(CpuFeature f) {
       uint64_t mask = V8_UINT64_C(1) << f;
@@ -488,10 +473,12 @@
         isolate_->set_enabled_cpu_features(old_enabled_);
       }
     }
+
    private:
     Isolate* isolate_;
     uint64_t old_enabled_;
 #else
+
    public:
     explicit Scope(CpuFeature f) {}
 #endif
@@ -656,6 +643,7 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
+  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -1178,12 +1166,13 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
-  void bind(NearLabel* L);
 
   // Calls
   // Call near relative 32-bit displacement, relative to next instruction.
   void call(Label* L);
-  void call(Handle<Code> target, RelocInfo::Mode rmode);
+  void call(Handle<Code> target,
+            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            unsigned ast_id = kNoASTId);
 
   // Calls directly to the given address using a relative offset.
   // Should only ever be used in Code objects for calls within the
@@ -1200,7 +1189,8 @@
   // Jumps
   // Jump short or near relative.
   // Use a 32-bit signed displacement.
-  void jmp(Label* L);  // unconditional jump to L
+  // Unconditional jump to L
+  void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Jump near absolute indirect (r64)
@@ -1209,16 +1199,12 @@
   // Jump near absolute indirect (m64)
   void jmp(const Operand& src);
 
-  // Short jump
-  void jmp(NearLabel* L);
-
   // Conditional jumps
-  void j(Condition cc, Label* L);
+  void j(Condition cc,
+         Label* L,
+         Label::Distance distance = Label::kFar);
   void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
-  // Conditional short jump
-  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
   // Floating-point operations
   void fld(int i);
 
@@ -1291,15 +1277,24 @@
   void movd(Register dst, XMMRegister src);
   void movq(XMMRegister dst, Register src);
   void movq(Register dst, XMMRegister src);
+  void movq(XMMRegister dst, XMMRegister src);
   void extractps(Register dst, XMMRegister src, byte imm8);
 
-  void movsd(const Operand& dst, XMMRegister src);
+  // Don't use this unless it's important to keep the
+  // top half of the destination register unchanged.
+  // Used movaps when moving double values and movq for integer
+  // values in xmm registers.
   void movsd(XMMRegister dst, XMMRegister src);
+
+  void movsd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, const Operand& src);
 
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqa(XMMRegister dst, const Operand& src);
 
+  void movapd(XMMRegister dst, XMMRegister src);
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
 
@@ -1331,11 +1326,21 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
 
+  enum RoundingMode {
+    kRoundToNearest = 0x0,
+    kRoundDown      = 0x1,
+    kRoundUp        = 0x2,
+    kRoundToZero    = 0x3
+  };
+
+  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
   void movmskpd(Register dst, XMMRegister src);
 
   // The first argument is the reg field, the second argument is the r/m field.
@@ -1348,7 +1353,9 @@
   void Print();
 
   // Check the code size generated from label to here.
-  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
 
   // Mark address of the ExitJSFrame code.
   void RecordJSReturn();
@@ -1408,7 +1415,9 @@
   inline void emitl(uint32_t x);
   inline void emitq(uint64_t x, RelocInfo::Mode rmode);
   inline void emitw(uint16_t x);
-  inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
+  inline void emit_code_target(Handle<Code> target,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id = kNoASTId);
   void emit(Immediate x) { emitl(x.value_); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index a549633..db06909 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -98,6 +98,7 @@
   // Set expected number of arguments to zero (not changing rax).
   __ Set(rbx, 0);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
 }
@@ -138,7 +139,7 @@
     // rdi: constructor
     __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(rax, &rt_call);
     // rdi: constructor
     // rax: initial map (if proven valid below)
@@ -342,11 +343,12 @@
     Handle<Code> code =
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
   } else {
     ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Restore context from the frame.
@@ -360,8 +362,9 @@
   __ JumpIfSmi(rax, &use_receiver);
 
   // If the type of the result (stored in its map) is less than
-  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
   __ j(above_equal, &exit);
 
   // Throw away the result of the constructor invocation and use the
@@ -498,7 +501,8 @@
   } else {
     ParameterCount actual(rax);
     // Function must be in rdi.
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame. Notice that this also removes the empty
@@ -526,17 +530,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
   __ push(rdi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyCompile, 1);
+
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore receiver.
   __ pop(rdi);
 
   // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rcx);
+  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rax);
 }
 
 
@@ -546,17 +556,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
   __ push(rdi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore function and tear down temporary frame.
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore function.
   __ pop(rdi);
+
+  // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rcx);
+  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rax);
 }
 
 
@@ -576,15 +592,15 @@
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
 
   // Switch on the state.
-  NearLabel not_no_registers, not_tos_rax;
+  Label not_no_registers, not_tos_rax;
   __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
-  __ j(not_equal, &not_no_registers);
+  __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
-  __ j(not_equal, &not_tos_rax);
+  __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
   __ bind(&not_tos_rax);
@@ -639,15 +655,16 @@
 
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
-  Label non_function;
+  Label slow, non_function;
   // The function to call is at position n+1 on the stack.
   __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
   __ JumpIfSmi(rdi, &non_function);
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &non_function);
+  __ j(not_equal, &slow);
 
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
+  __ Set(rdx, 0);  // indicate regular JS_FUNCTION
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
@@ -658,19 +675,24 @@
              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
     __ j(not_equal, &shift_arguments);
 
+    // Do not transform the receiver for natives.
+    // SharedFunctionInfo is already loaded into rbx.
+    __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_zero, &shift_arguments);
+
     // Compute the receiver in non-strict mode.
     __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-    __ JumpIfSmi(rbx, &convert_to_object);
+    __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
 
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
     __ j(equal, &use_global_receiver);
     __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
     __ j(equal, &use_global_receiver);
 
-    __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &convert_to_object);
-    __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-    __ j(below_equal, &shift_arguments);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -680,13 +702,14 @@
     __ push(rbx);
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ movq(rbx, rax);
+    __ Set(rdx, 0);  // indicate regular JS_FUNCTION
 
     __ pop(rax);
     __ SmiToInteger32(rax, rax);
     __ LeaveInternalFrame();
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-    __ jmp(&patch_receiver);
+    __ jmp(&patch_receiver, Label::kNear);
 
     // Use the global receiver object from the called function as the
     // receiver.
@@ -704,14 +727,19 @@
     __ jmp(&shift_arguments);
   }
 
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ Set(rdx, 1);  // indicate function proxy
+  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &shift_arguments);
+  __ bind(&non_function);
+  __ Set(rdx, 2);  // indicate non-function
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
-  __ bind(&non_function);
   __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
-  __ Set(rdi, 0);
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
@@ -728,11 +756,25 @@
     __ decq(rax);  // One fewer argument (first argument is new receiver).
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
-  { Label function;
-    __ testq(rdi, rdi);
-    __ j(not_zero, &function);
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
+  { Label function, non_proxy;
+    __ testq(rdx, rdx);
+    __ j(zero, &function);
     __ Set(rbx, 0);
+    __ SetCallKind(rcx, CALL_AS_METHOD);
+    __ cmpq(rdx, Immediate(1));
+    __ j(not_equal, &non_proxy);
+
+    __ pop(rdx);   // return address
+    __ push(rdi);  // re-add proxy object as additional argument
+    __ push(rdx);
+    __ incq(rax);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+    __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+           RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
     __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
@@ -747,13 +789,15 @@
              FieldOperand(rdx,
                           SharedFunctionInfo::kFormalParameterCountOffset));
   __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ cmpq(rax, rbx);
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
        RelocInfo::CODE_TARGET);
 
   ParameterCount expected(0);
-  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -773,11 +817,12 @@
   static const int kArgumentsOffset = 2 * kPointerSize;
   static const int kReceiverOffset = 3 * kPointerSize;
   static const int kFunctionOffset = 4 * kPointerSize;
+
   __ push(Operand(rbp, kFunctionOffset));
   __ push(Operand(rbp, kArgumentsOffset));
   __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying need to catch
+  // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
@@ -807,23 +852,32 @@
   __ push(rax);  // limit
   __ push(Immediate(0));  // index
 
-  // Change context eagerly to get the right global object if
-  // necessary.
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
+  // Get the receiver.
   __ movq(rbx, Operand(rbp, kReceiverOffset));
 
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &push_receiver);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
   // Do not transform the receiver for strict mode functions.
+  Label call_to_object, use_global_receiver;
   __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
            Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
   __ j(not_equal, &push_receiver);
 
+  // Do not transform the receiver for natives.
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+  __ j(not_equal, &push_receiver);
+
   // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(rbx, &call_to_object);
+  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
   __ CompareRoot(rbx, Heap::kNullValueRootIndex);
   __ j(equal, &use_global_receiver);
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -831,17 +885,16 @@
 
   // If given receiver is already a JavaScript object then there's no
   // reason for converting it.
-  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-  __ j(below, &call_to_object);
-  __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-  __ j(below_equal, &push_receiver);
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+  __ j(above_equal, &push_receiver);
 
   // Convert the receiver to an object.
   __ bind(&call_to_object);
   __ push(rbx);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ movq(rbx, rax);
-  __ jmp(&push_receiver);
+  __ jmp(&push_receiver, Label::kNear);
 
   // Use the current global receiver object as the receiver.
   __ bind(&use_global_receiver);
@@ -885,13 +938,30 @@
   __ j(not_equal, &loop);
 
   // Invoke the function.
+  Label call_proxy;
   ParameterCount actual(rax);
   __ SmiToInteger32(rax, rax);
   __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &call_proxy);
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove function, receiver, and arguments
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(rdi);  // add function proxy as last argument
+  __ incq(rax);
+  __ Set(rbx, 0);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
@@ -1254,7 +1324,7 @@
     // Initial map for the builtin Array functions should be maps.
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
     __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
@@ -1288,7 +1358,7 @@
     // Initial map for the builtin Array function should be a map.
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
     __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
@@ -1324,11 +1394,11 @@
   // Push the function on the stack.
   __ push(rdi);
 
-  // Preserve the number of arguments on the stack. Must preserve both
-  // rax and rbx because these registers are used when copying the
+  // Preserve the number of arguments on the stack. Must preserve rax,
+  // rbx and rcx because these registers are used when copying the
   // arguments and the receiver.
-  __ Integer32ToSmi(rcx, rax);
-  __ push(rcx);
+  __ Integer32ToSmi(r8, rax);
+  __ push(r8);
 }
 
 
@@ -1352,6 +1422,7 @@
   // ----------- S t a t e -------------
   //  -- rax : actual number of arguments
   //  -- rbx : expected number of arguments
+  //  -- rcx : call kind information
   //  -- rdx : code entry to call
   // -----------------------------------
 
@@ -1372,14 +1443,14 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
-    __ Set(rcx, -1);  // account for receiver
+    __ Set(r8, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(Operand(rax, 0));
     __ subq(rax, Immediate(kPointerSize));
-    __ cmpq(rcx, rbx);
+    __ cmpq(r8, rbx);
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1391,23 +1462,23 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
-    __ Set(rcx, -1);  // account for receiver
+    __ Set(r8, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(Operand(rdi, 0));
     __ subq(rdi, Immediate(kPointerSize));
-    __ cmpq(rcx, rax);
+    __ cmpq(r8, rax);
     __ j(less, &copy);
 
     // Fill remaining expected arguments with undefined values.
     Label fill;
     __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
     __ bind(&fill);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(kScratchRegister);
-    __ cmpq(rcx, rbx);
+    __ cmpq(r8, rbx);
     __ j(less, &fill);
 
     // Restore function pointer.
@@ -1456,17 +1527,17 @@
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
-  NearLabel skip;
+  Label skip;
   __ SmiCompare(rax, Smi::FromInt(-1));
-  __ j(not_equal, &skip);
+  __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
   // If we decide not to perform on-stack replacement we perform a
   // stack guard check to enable interrupts.
   __ bind(&stack_check);
-  NearLabel ok;
+  Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok);
+  __ j(above_equal, &ok, Label::kNear);
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index c365385..6499ea0 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -40,15 +40,15 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
-  NearLabel check_heap_number, call_builtin;
+  Label check_heap_number, call_builtin;
   __ SmiTest(rax);
-  __ j(not_zero, &check_heap_number);
+  __ j(not_zero, &check_heap_number, Label::kNear);
   __ Ret();
 
   __ bind(&check_heap_number);
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_builtin);
+  __ j(not_equal, &call_builtin, Label::kNear);
   __ Ret();
 
   __ bind(&call_builtin);
@@ -125,18 +125,17 @@
   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
 
   // Setup the object header.
-  __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+  __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
   __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
   __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
 
   // Setup the fixed slots.
   __ Set(rbx, 0);  // Set to NULL.
   __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
-  __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
-  __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+  __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
   __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
 
-  // Copy the global object from the surrounding context.
+  // Copy the global object from the previous context.
   __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
 
@@ -152,7 +151,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
 }
 
 
@@ -231,53 +230,139 @@
 }
 
 
+// The stub expects its argument on the stack and returns its result in tos_:
+// zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  NearLabel false_result, true_result, not_string;
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+  Label patch;
+  const Register argument = rax;
+  const Register map = rdx;
 
-  // 'null' => false.
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  __ j(equal, &false_result);
+  if (!types_.IsEmpty()) {
+    __ movq(argument, Operand(rsp, 1 * kPointerSize));
+  }
 
-  // Get the map and type of the heap object.
-  // We don't use CmpObjectType because we manipulate the type field.
-  __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
-  __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+  // undefined -> false
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // Undetectable => false.
-  __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
-  __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
-  __ j(not_zero, &false_result);
+  // Boolean -> its value
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  // JavaScript object => true.
-  __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
-  __ j(above_equal, &true_result);
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  // String value => false iff empty.
-  __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
-  __ j(above_equal, &not_string);
-  __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
-  __ SmiTest(rdx);
-  __ j(zero, &false_result);
-  __ jmp(&true_result);
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    Label not_smi;
+    __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
+    // argument contains the correct return value already
+    if (!tos_.is(argument)) {
+      __ movq(tos_, argument);
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&not_smi);
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(argument, &patch, Label::kNear);
+  }
 
-  __ bind(&not_string);
-  __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &true_result);
-  // HeapNumber => false iff +0, -0, or NaN.
-  // These three cases set the zero flag when compared to zero using ucomisd.
-  __ xorpd(xmm0, xmm0);
-  __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ j(zero, &false_result);
-  // Fall through to |true_result|.
+  if (types_.NeedsMap()) {
+    __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
 
-  // Return 1/0 for true/false in rax.
-  __ bind(&true_result);
-  __ Set(rax, 1);
-  __ ret(1 * kPointerSize);
-  __ bind(&false_result);
-  __ Set(rax, 0);
-  __ ret(1 * kPointerSize);
+    if (types_.CanBeUndetectable()) {
+      __ testb(FieldOperand(map, Map::kBitFieldOffset),
+               Immediate(1 << Map::kIsUndetectable));
+      // Undetectable -> false.
+      Label not_undetectable;
+      __ j(zero, &not_undetectable, Label::kNear);
+      __ Set(tos_, 0);
+      __ ret(1 * kPointerSize);
+      __ bind(&not_undetectable);
+    }
+  }
+
+  if (types_.Contains(SPEC_OBJECT)) {
+    // spec object -> true.
+    Label not_js_object;
+    __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+    __ j(below, &not_js_object, Label::kNear);
+    // argument contains the correct return value already.
+    if (!tos_.is(argument)) {
+      __ Set(tos_, 1);
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&not_js_object);
+  }
+
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+    Label not_string;
+    __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+    __ j(above_equal, &not_string, Label::kNear);
+    __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
+    __ ret(1 * kPointerSize);  // the string length is OK as the return value
+    __ bind(&not_string);
+  }
+
+  if (types_.Contains(HEAP_NUMBER)) {
+    // heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number, false_result;
+    __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &not_heap_number, Label::kNear);
+    __ xorps(xmm0, xmm0);
+    __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
+    __ j(zero, &false_result, Label::kNear);
+    // argument contains the correct return value already.
+    if (!tos_.is(argument)) {
+      __ Set(tos_, 1);
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&false_result);
+    __ Set(tos_, 0);
+    __ ret(1 * kPointerSize);
+    __ bind(&not_heap_number);
+  }
+
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  const Register argument = rax;
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    Label different_value;
+    __ CompareRoot(argument, value);
+    __ j(not_equal, &different_value, Label::kNear);
+    if (!result) {
+      // If we have to return zero, there is no way around clearing tos_.
+      __ Set(tos_, 0);
+    } else if (!tos_.is(argument)) {
+      // If we have to return non-zero, we can re-use the argument if it is the
+      // same register as the result, because we never see Smi-zero here.
+      __ Set(tos_, 1);
+    }
+    __ ret(1 * kPointerSize);
+    __ bind(&different_value);
+  }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(rcx);  // Get return address, operand is now on top of stack.
+  __ Push(Smi::FromInt(tos_.code()));
+  __ Push(Smi::FromInt(types_.ToByte()));
+  __ push(rcx);  // Push return address.
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
 }
 
 
@@ -322,15 +407,327 @@
 };
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
-  return stub.GetCode();
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+                    Register result,
+                    Register source) {
+  // Result may be rcx. If result and source are the same register, source will
+  // be overwritten.
+  ASSERT(!result.is(rdi) && !result.is(rbx));
+  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+  // cvttsd2si (32-bit version) directly.
+  Register double_exponent = rbx;
+  Register double_value = rdi;
+  Label done, exponent_63_plus;
+  // Get double and extract exponent.
+  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+  // Clear result preemptively, in case we need to return zero.
+  __ xorl(result, result);
+  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
+  // Double to remove sign bit, shift exponent down to least significant bits.
+  // and subtract bias to get the unshifted, unbiased exponent.
+  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+  // Check whether the exponent is too big for a 63 bit unsigned integer.
+  __ cmpl(double_exponent, Immediate(63));
+  __ j(above_equal, &exponent_63_plus, Label::kNear);
+  // Handle exponent range 0..62.
+  __ cvttsd2siq(result, xmm0);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&exponent_63_plus);
+  // Exponent negative or 63+.
+  __ cmpl(double_exponent, Immediate(83));
+  // If exponent negative or above 83, number contains no significant bits in
+  // the range 0..2^31, so result is zero, and rcx already holds zero.
+  __ j(above, &done, Label::kNear);
+
+  // Exponent in rage 63..83.
+  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+  // the least significant exponent-52 bits.
+
+  // Negate low bits of mantissa if value is negative.
+  __ addq(double_value, double_value);  // Move sign bit to carry.
+  __ sbbl(result, result);  // And convert carry to -1 in result register.
+  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+  __ addl(double_value, result);
+  // Do xor in opposite directions depending on where we want the result
+  // (depending on whether result is rcx or not).
+
+  if (result.is(rcx)) {
+    __ xorl(double_value, result);
+    // Left shift mantissa by (exponent - mantissabits - 1) to save the
+    // bits that have positional values below 2^32 (the extra -1 comes from the
+    // doubling done above to move the sign bit into the carry flag).
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(double_value);
+    __ movl(result, double_value);
+  } else {
+    // As the then-branch, but move double-value to result before shifting.
+    __ xorl(result, double_value);
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(result);
+  }
+
+  __ bind(&done);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(rcx);  // Save return address.
+
+  __ push(rax);  // the operand
+  __ Push(Smi::FromInt(op_));
+  __ Push(Smi::FromInt(mode_));
+  __ Push(Smi::FromInt(operand_type_));
+
+  __ push(rcx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label slow;
+  GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow,
+                                     Label::Distance non_smi_near,
+                                     Label::Distance slow_near) {
+  Label done;
+  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+  __ SmiNeg(rax, rax, &done, Label::kNear);
+  __ jmp(slow, slow_near);
+  __ bind(&done);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi,
+                                        Label::Distance non_smi_near) {
+  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+  __ SmiNot(rax, rax);
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+    MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  // Check if the operand is a heap number.
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, slow);
+
+  // Operand is a float, negate its value by flipping the sign bit.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ Set(kScratchRegister, 0x01);
+    __ shl(kScratchRegister, Immediate(63));
+    __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
+  } else {
+    // Allocate a heap number before calculating the answer,
+    // so we don't have an untagged double around during GC.
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(rax);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ movq(rcx, rax);
+    __ pop(rax);
+    __ LeaveInternalFrame();
+    __ bind(&heapnumber_allocated);
+    // rcx: allocated 'empty' number
+
+    // Copy the double value to the new heap number, flipping the sign.
+    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+    __ Set(kScratchRegister, 0x01);
+    __ shl(kScratchRegister, Immediate(63));
+    __ xor_(rdx, kScratchRegister);  // Flip sign.
+    __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+    __ movq(rax, rcx);
+  }
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+                                               Label* slow) {
+  // Check if the operand is a heap number.
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, slow);
+
+  // Convert the heap number in rax to an untagged integer in rcx.
+  IntegerConvert(masm, rax, rax);
+
+  // Do the bitwise operation and smi tag the result.
+  __ notl(rax);
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ pop(rcx);  // pop return address
+  __ push(rax);
+  __ push(rcx);  // push return address
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::PrintName(StringStream* stream) {
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+  stream->Add("UnaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              UnaryOpIC::GetName(operand_type_));
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   __ pop(rcx);  // Save return address.
   __ push(rdx);
   __ push(rax);
@@ -346,36 +743,39 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       UNREACHABLE();
       // The int32 case is identical to the Smi case.  We avoid creating this
       // ic state on x64.
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -384,12 +784,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name;
   switch (mode_) {
@@ -398,21 +793,19 @@
     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
     default: overwrite_name = "UnknownOverwrite"; break;
   }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
-               op_name,
-               overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
-  return name_;
+  stream->Add("BinaryOpStub_%s_%s_%s",
+              op_name,
+              overwrite_name,
+              BinaryOpIC::GetName(operands_type_));
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
 
-  // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+  // Arguments to BinaryOpStub are in rdx and rax.
   Register left = rdx;
   Register right = rax;
 
@@ -558,10 +951,9 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
-    MacroAssembler* masm,
-    Label* allocation_failure,
-    Label* non_numeric_failure) {
+void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
+                                             Label* allocation_failure,
+                                             Label* non_numeric_failure) {
   switch (op_) {
     case Token::ADD:
     case Token::SUB:
@@ -660,32 +1052,32 @@
   // No fall-through from this generated code.
   if (FLAG_debug_code) {
     __ Abort("Unexpected fall-through in "
-             "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+             "BinaryStub::GenerateFloatingPointCode.");
   }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
-  NearLabel left_not_string, call_runtime;
+  Label left_not_string, call_runtime;
 
   // Registers containing left and right operands respectively.
   Register left = rdx;
   Register right = rax;
 
   // Test if left operand is a string.
-  __ JumpIfSmi(left, &left_not_string);
+  __ JumpIfSmi(left, &left_not_string, Label::kNear);
   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
-  __ j(above_equal, &left_not_string);
+  __ j(above_equal, &left_not_string, Label::kNear);
   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
   GenerateRegisterArgsPush(masm);
   __ TailCallStub(&string_add_left_stub);
 
   // Left operand is not a string, test right.
   __ bind(&left_not_string);
-  __ JumpIfSmi(right, &call_runtime);
+  __ JumpIfSmi(right, &call_runtime, Label::kNear);
   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -696,7 +1088,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
   GenerateRegisterArgsPush(masm);
   switch (op_) {
     case Token::ADD:
@@ -738,10 +1130,10 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label call_runtime;
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     // Only allow smi results.
     GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
   } else {
@@ -761,17 +1153,47 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   GenerateStringAddCode(masm);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
   if (op_ == Token::ADD) {
@@ -781,18 +1203,18 @@
   }
 
   // Convert oddball arguments to numbers.
-  NearLabel check, done;
+  Label check, done;
   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &check);
+  __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(rdx, rdx);
   } else {
     __ LoadRoot(rdx, Heap::kNanValueRootIndex);
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&check);
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(rax, rax);
   } else {
@@ -804,7 +1226,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label gc_required, not_number;
   GenerateFloatingPointCode(masm, &gc_required, &not_number);
 
@@ -816,7 +1238,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime, call_string_add_or_runtime;
 
   GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -833,9 +1255,8 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
-    MacroAssembler* masm,
-    Label* alloc_failure) {
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                Label* alloc_failure) {
   Label skip_allocation;
   OverwriteMode mode = mode_;
   switch (mode) {
@@ -873,7 +1294,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ pop(rcx);
   __ push(rdx);
   __ push(rax);
@@ -900,11 +1321,10 @@
   Label skip_cache;
   const bool tagged = (argument_type_ == TAGGED);
   if (tagged) {
-    NearLabel input_not_smi;
-    NearLabel loaded;
+    Label input_not_smi, loaded;
     // Test that rax is a number.
     __ movq(rax, Operand(rsp, kPointerSize));
-    __ JumpIfNotSmi(rax, &input_not_smi);
+    __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
     // Input is a smi. Untag and load it onto the FPU stack.
     // Then load the bits of the double into rbx.
     __ SmiToInteger32(rax, rax);
@@ -915,7 +1335,7 @@
     __ movq(rdx, xmm1);
     __ fld_d(Operand(rsp, 0));
     __ addq(rsp, Immediate(kDoubleSize));
-    __ jmp(&loaded);
+    __ jmp(&loaded, Label::kNear);
 
     __ bind(&input_not_smi);
     // Check if input is a HeapNumber.
@@ -990,9 +1410,9 @@
   __ addl(rcx, rcx);
   __ lea(rcx, Operand(rax, rcx, times_8, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  NearLabel cache_miss;
+  Label cache_miss;
   __ cmpq(rbx, Operand(rcx, 0));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
   __ movq(rax, Operand(rcx, 2 * kIntSize));
   if (tagged) {
@@ -1100,8 +1520,8 @@
     __ j(below, &in_range);
     // Check for infinity and NaN. Both return NaN for sin.
     __ cmpl(rdi, Immediate(0x7ff));
-    NearLabel non_nan_result;
-    __ j(not_equal, &non_nan_result);
+    Label non_nan_result;
+    __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
     __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
@@ -1129,7 +1549,7 @@
 
     // Compute st(0) % st(1)
     {
-      NearLabel partial_remainder_loop;
+      Label partial_remainder_loop;
       __ bind(&partial_remainder_loop);
       __ fprem1();
       __ fwait();
@@ -1166,74 +1586,6 @@
 }
 
 
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
-                    Register result,
-                    Register source) {
-  // Result may be rcx. If result and source are the same register, source will
-  // be overwritten.
-  ASSERT(!result.is(rdi) && !result.is(rbx));
-  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
-  // cvttsd2si (32-bit version) directly.
-  Register double_exponent = rbx;
-  Register double_value = rdi;
-  NearLabel done, exponent_63_plus;
-  // Get double and extract exponent.
-  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
-  // Clear result preemptively, in case we need to return zero.
-  __ xorl(result, result);
-  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
-  // Double to remove sign bit, shift exponent down to least significant bits.
-  // and subtract bias to get the unshifted, unbiased exponent.
-  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
-  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
-  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
-  // Check whether the exponent is too big for a 63 bit unsigned integer.
-  __ cmpl(double_exponent, Immediate(63));
-  __ j(above_equal, &exponent_63_plus);
-  // Handle exponent range 0..62.
-  __ cvttsd2siq(result, xmm0);
-  __ jmp(&done);
-
-  __ bind(&exponent_63_plus);
-  // Exponent negative or 63+.
-  __ cmpl(double_exponent, Immediate(83));
-  // If exponent negative or above 83, number contains no significant bits in
-  // the range 0..2^31, so result is zero, and rcx already holds zero.
-  __ j(above, &done);
-
-  // Exponent in rage 63..83.
-  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
-  // the least significant exponent-52 bits.
-
-  // Negate low bits of mantissa if value is negative.
-  __ addq(double_value, double_value);  // Move sign bit to carry.
-  __ sbbl(result, result);  // And convert carry to -1 in result register.
-  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
-  __ addl(double_value, result);
-  // Do xor in opposite directions depending on where we want the result
-  // (depending on whether result is rcx or not).
-
-  if (result.is(rcx)) {
-    __ xorl(double_value, result);
-    // Left shift mantissa by (exponent - mantissabits - 1) to save the
-    // bits that have positional values below 2^32 (the extra -1 comes from the
-    // doubling done above to move the sign bit into the carry flag).
-    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
-    __ shll_cl(double_value);
-    __ movl(result, double_value);
-  } else {
-    // As the then-branch, but move double-value to result before shifting.
-    __ xorl(result, double_value);
-    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
-    __ shll_cl(result);
-  }
-
-  __ bind(&done);
-}
-
-
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
@@ -1390,8 +1742,8 @@
 
   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
 
-  NearLabel first_smi, check_second;
-  __ JumpIfSmi(first, &first_smi);
+  Label first_smi;
+  __ JumpIfSmi(first, &first_smi, Label::kNear);
   __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, on_not_smis);
   // Convert HeapNumber to smi if possible.
@@ -1406,7 +1758,6 @@
   __ j(not_equal, on_not_smis);
   __ Integer32ToSmi(first, smi_result);
 
-  __ bind(&check_second);
   __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
   __ bind(&first_smi);
   if (FLAG_debug_code) {
@@ -1432,91 +1783,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done;
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      Label try_float;
-      __ JumpIfNotSmi(rax, &try_float);
-      if (negative_zero_ == kIgnoreNegativeZero) {
-        __ SmiCompare(rax, Smi::FromInt(0));
-        __ j(equal, &done);
-      }
-      __ SmiNeg(rax, rax, &done);
-      __ jmp(&slow);  // zero, if not handled above, and Smi::kMinValue.
-
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(rax);
-    }
-
-    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &slow);
-    // Operand is a float, negate its value by flipping sign bit.
-    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
-    __ Set(kScratchRegister, 0x01);
-    __ shl(kScratchRegister, Immediate(63));
-    __ xor_(rdx, kScratchRegister);  // Flip sign.
-    // rdx is value to store.
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
-    } else {
-      __ AllocateHeapNumber(rcx, rbx, &slow);
-      // rcx: allocated 'empty' number
-      __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
-      __ movq(rax, rcx);
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label try_float;
-      __ JumpIfNotSmi(rax, &try_float);
-      __ SmiNot(rax, rax);
-      __ jmp(&done);
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(rax);
-    }
-
-    // Check if the operand is a heap number.
-    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &slow);
-
-    // Convert the heap number in rax to an untagged integer in rcx.
-    IntegerConvert(masm, rax, rax);
-
-    // Do the bitwise operation and smi tag the result.
-    __ notl(rax);
-    __ Integer32ToSmi(rax, rax);
-  }
-
-  // Return from the stub.
-  __ bind(&done);
-  __ StubReturn(1);
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ pop(rcx);  // pop return address
-  __ push(rax);
-  __ push(rcx);  // push return address
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   // Registers are used as follows:
   // rdx = base
@@ -1562,20 +1828,20 @@
   __ movq(rdx, rax);
 
   // Get absolute value of exponent.
-  NearLabel no_neg;
+  Label no_neg;
   __ cmpl(rax, Immediate(0));
-  __ j(greater_equal, &no_neg);
+  __ j(greater_equal, &no_neg, Label::kNear);
   __ negl(rax);
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  NearLabel while_true;
-  NearLabel no_multiply;
+  __ movaps(xmm1, xmm3);
+  Label while_true;
+  Label no_multiply;
 
   __ bind(&while_true);
   __ shrl(rax, Immediate(1));
-  __ j(not_carry, &no_multiply);
+  __ j(not_carry, &no_multiply, Label::kNear);
   __ mulsd(xmm1, xmm0);
   __ bind(&no_multiply);
   __ mulsd(xmm0, xmm0);
@@ -1587,8 +1853,8 @@
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ xorpd(xmm0, xmm0);
+  __ movaps(xmm1, xmm3);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
 
@@ -1605,12 +1871,11 @@
   __ ucomisd(xmm1, xmm1);
   __ j(parity_even, &call_runtime);
 
-  NearLabel base_not_smi;
-  NearLabel handle_special_cases;
-  __ JumpIfNotSmi(rdx, &base_not_smi);
+  Label base_not_smi, handle_special_cases;
+  __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
   __ SmiToInteger32(rdx, rdx);
   __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&handle_special_cases);
+  __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
   __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
@@ -1625,22 +1890,22 @@
 
   // base is in xmm0 and exponent is in xmm1.
   __ bind(&handle_special_cases);
-  NearLabel not_minus_half;
+  Label not_minus_half;
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
   __ movq(xmm2, rcx);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
+  __ j(not_equal, &not_minus_half, Label::kNear);
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   __ jmp(&allocate_return);
 
   // Test for 0.5.
@@ -1653,8 +1918,8 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
   __ sqrtsd(xmm1, xmm1);
 
   __ bind(&allocate_return);
@@ -1728,49 +1993,288 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+  // Stack layout:
+  //  rsp[0] : return address
+  //  rsp[8] : number of parameters (tagged)
+  //  rsp[16] : receiver displacement
+  //  rsp[24] : function
+  // Registers used over the whole function:
+  //  rbx: the mapped parameter count (untagged)
+  //  rax: the allocated object (tagged).
+
+  Factory* factory = masm->isolate()->factory();
+
+  __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+  // rbx = parameter count (untagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(equal, &adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ movq(rcx, rbx);
+  __ jmp(&try_allocate, Label::kNear);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ SmiToInteger64(rcx,
+                    Operand(rdx,
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+                      StandardFrameConstants::kCallerSPOffset));
+  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+  // rbx = parameter count (untagged)
+  // rcx = argument count (untagged)
+  // Compute the mapped parameter count = min(rbx, rcx) in rbx.
+  __ cmpq(rbx, rcx);
+  __ j(less_equal, &try_allocate, Label::kNear);
+  __ movq(rbx, rcx);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  Label no_parameter_map;
+  __ testq(rbx, rbx);
+  __ j(zero, &no_parameter_map, Label::kNear);
+  __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ bind(&no_parameter_map);
+
+  // 2. Backing store.
+  __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+
+  // rax = address of new object(s) (tagged)
+  // rcx = argument count (untagged)
+  // Get the arguments boilerplate from the current (global) context into rdi.
+  Label has_mapped_parameters, copy;
+  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+  __ testq(rbx, rbx);
+  __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+  const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+  __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+  __ jmp(&copy, Label::kNear);
+
+  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
+  __ bind(&has_mapped_parameters);
+  __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+  __ bind(&copy);
+
+  // rax = address of new object (tagged)
+  // rbx = mapped parameter count (untagged)
+  // rcx = argument count (untagged)
+  // rdi = address of boilerplate object (tagged)
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ movq(rdx, FieldOperand(rdi, i));
+    __ movq(FieldOperand(rax, i), rdx);
+  }
+
+  // Setup the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+                       Heap::kArgumentsCalleeIndex * kPointerSize),
+          rdx);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  // Note: rcx is tagged from here on.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ Integer32ToSmi(rcx, rcx);
+  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+                       Heap::kArgumentsLengthIndex * kPointerSize),
+          rcx);
+
+  // Setup the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, edi will point there, otherwise to the
+  // backing store.
+  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+  // rax = address of new object (tagged)
+  // rbx = mapped parameter count (untagged)
+  // rcx = argument count (tagged)
+  // rdi = address of parameter map or backing store (tagged)
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ testq(rbx, rbx);
+  __ j(zero, &skip_parameter_map);
+
+  __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+  // rbx contains the untagged argument count. Add 2 and tag to write.
+  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+  __ Integer64PlusConstantToSmi(r9, rbx, 2);
+  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+  __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+
+  // Load tagged parameter count into r9.
+  __ movq(r9, Operand(rsp, 1 * kPointerSize));
+  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+  __ addq(r8, Operand(rsp, 3 * kPointerSize));
+  __ subq(r8, r9);
+  __ Move(r11, factory->the_hole_value());
+  __ movq(rdx, rdi);
+  __ SmiToInteger64(kScratchRegister, r9);
+  __ lea(rdi, Operand(rdi, kScratchRegister,
+                      times_pointer_size,
+                      kParameterMapHeaderSize));
+  // r9 = loop variable (tagged)
+  // r8 = mapping index (tagged)
+  // r11 = the hole value
+  // rdx = address of parameter map (tagged)
+  // rdi = address of backing store (tagged)
+  __ jmp(&parameters_test, Label::kNear);
+
+  __ bind(&parameters_loop);
+  __ SmiSubConstant(r9, r9, Smi::FromInt(1));
+  __ SmiToInteger64(kScratchRegister, r9);
+  __ movq(FieldOperand(rdx, kScratchRegister,
+                       times_pointer_size,
+                       kParameterMapHeaderSize),
+          r8);
+  __ movq(FieldOperand(rdi, kScratchRegister,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          r11);
+  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+  __ bind(&parameters_test);
+  __ SmiTest(r9);
+  __ j(not_zero, &parameters_loop, Label::kNear);
+
+  __ bind(&skip_parameter_map);
+
+  // rcx = argument count (tagged)
+  // rdi = address of backing store (tagged)
+  // Copy arguments header and remaining slots (if there are any).
+  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+          factory->fixed_array_map());
+  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+  Label arguments_loop, arguments_test;
+  __ movq(r8, rbx);
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  // Untag rcx and r8 for the loop below.
+  __ SmiToInteger64(rcx, rcx);
+  __ SmiToInteger64(r8, r8);
+  __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
+  __ subq(rdx, kScratchRegister);
+  __ jmp(&arguments_test, Label::kNear);
+
+  __ bind(&arguments_loop);
+  __ subq(rdx, Immediate(kPointerSize));
+  __ movq(r9, Operand(rdx, 0));
+  __ movq(FieldOperand(rdi, r8,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          r9);
+  __ addq(r8, Immediate(1));
+
+  __ bind(&arguments_test);
+  __ cmpq(r8, rcx);
+  __ j(less, &arguments_loop, Label::kNear);
+
+  // Return and remove the on-stack parameters.
+  __ ret(3 * kPointerSize);
+
+  // Do the runtime call to allocate the arguments object.
+  // rcx = argument count (untagged)
+  __ bind(&runtime);
+  __ Integer32ToSmi(rcx, rcx);
+  __ movq(Operand(rsp, 1 * kPointerSize), rcx);  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+  // esp[0] : return address
+  // esp[8] : number of parameters
+  // esp[16] : receiver displacement
+  // esp[24] : function
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &runtime);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+  __ SmiToInteger64(rcx, rcx);
+  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+              StandardFrameConstants::kCallerSPOffset));
+  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
   // rsp[0] : return address
   // rsp[8] : number of parameters
   // rsp[16] : receiver displacement
   // rsp[24] : function
 
-  // The displacement is used for skipping the return address and the
-  // frame pointer on the stack. It is the offset of the last
-  // parameter (if any) relative to the frame pointer.
-  static const int kDisplacement = 2 * kPointerSize;
-
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor_frame);
 
   // Get the length from the frame.
-  __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+  __ SmiToInteger64(rcx, rcx);
   __ jmp(&try_allocate);
 
   // Patch the arguments.length and the parameters pointer.
   __ bind(&adaptor_frame);
-  __ SmiToInteger32(rcx,
-                    Operand(rdx,
-                            ArgumentsAdaptorFrameConstants::kLengthOffset));
-  // Space on stack must already hold a smi.
-  __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
-  // Do not clobber the length index for the indexing operation since
-  // it is used compute the size for allocation later.
-  __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+  __ SmiToInteger64(rcx, rcx);
+  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+                      StandardFrameConstants::kCallerSPOffset));
   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ testl(rcx, rcx);
-  __ j(zero, &add_arguments_object);
-  __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+  __ testq(rcx, rcx);
+  __ j(zero, &add_arguments_object, Label::kNear);
+  __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ addl(rcx, Immediate(GetArgumentsObjectSize()));
+  __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -1778,59 +2282,51 @@
   // Get the arguments boilerplate from the current (global) context.
   __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
-  __ movq(rdi, Operand(rdi,
-                       Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+  const int offset =
+      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+  __ movq(rdi, Operand(rdi, offset));
 
   // Copy the JS object part.
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
-  __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
-  __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
-  __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
-  __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
-  __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
-  if (type_ == NEW_NON_STRICT) {
-    // Setup the callee in-object property.
-    ASSERT(Heap::kArgumentsCalleeIndex == 1);
-    __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
-    __ movq(FieldOperand(rax, JSObject::kHeaderSize +
-                              Heap::kArgumentsCalleeIndex * kPointerSize),
-            kScratchRegister);
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ movq(rbx, FieldOperand(rdi, i));
+    __ movq(FieldOperand(rax, i), rbx);
   }
 
   // Get the length (smi tagged) and set that as an in-object property too.
-  ASSERT(Heap::kArgumentsLengthIndex == 0);
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
   __ movq(FieldOperand(rax, JSObject::kHeaderSize +
-                            Heap::kArgumentsLengthIndex * kPointerSize),
+                       Heap::kArgumentsLengthIndex * kPointerSize),
           rcx);
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ SmiTest(rcx);
+  __ testq(rcx, rcx);
   __ j(zero, &done);
 
-  // Get the parameters pointer from the stack and untag the length.
+  // Get the parameters pointer from the stack.
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
 
   // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
-  __ lea(rdi, Operand(rax, GetArgumentsObjectSize()));
+  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
   __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
   __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+
+
   __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
-  __ SmiToInteger32(rcx, rcx);  // Untag length for the loop below.
+  // Untag the length for the loop below.
+  __ SmiToInteger64(rcx, rcx);
 
   // Copy the fixed array slots.
   Label loop;
   __ bind(&loop);
-  __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
-  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+  __ movq(rbx, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
+  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
   __ addq(rdi, Immediate(kPointerSize));
   __ subq(rdx, Immediate(kPointerSize));
-  __ decl(rcx);
+  __ decq(rcx);
   __ j(not_zero, &loop);
 
   // Return and remove the on-stack parameters.
@@ -1839,7 +2335,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
 }
 
 
@@ -1878,7 +2374,6 @@
   __ testq(kScratchRegister, kScratchRegister);
   __ j(zero, &runtime);
 
-
   // Check that the first argument is a JSRegExp object.
   __ movq(rax, Operand(rsp, kJSRegExpOffset));
   __ JumpIfSmi(rax, &runtime);
@@ -1949,44 +2444,60 @@
   __ cmpl(rdx, rdi);
   __ j(greater, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ Set(r14, 0);
   // rax: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
-  NearLabel seq_ascii_string, seq_two_byte_string, check_code;
+  Label seq_ascii_string, seq_two_byte_string, check_code;
   __ movq(rdi, Operand(rsp, kSubjectOffset));
+  // Make a copy of the original subject string.
+  __ movq(r15, rdi);
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
   __ andb(rbx, Immediate(
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
-  __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
-  __ j(zero, &seq_ascii_string);
+  __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+  __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag !=0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
-  __ j(not_zero, &runtime);
-  // String is a cons string.
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmpq(rbx, Immediate(kExternalStringTag));
+  __ j(less, &cons_string, Label::kNear);
+  __ j(equal, &runtime);
+
+  // String is sliced.
+  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
+  __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+  // r14: slice offset
+  // r15: original subject string
+  // rdi: parent string
+  __ jmp(&check_encoding, Label::kNear);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
   __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, &runtime);
   __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+  // rdi: first part of cons string or parent of sliced string.
+  // rbx: map of first part of cons string or map of parent of sliced string.
+  // Is first part of cons or parent of slice a flat two byte string?
+  __ bind(&check_encoding);
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  // String is a cons string with empty second part.
-  // rdi: first part of cons string.
-  // rbx: map of first part of cons string.
-  // Is first part a flat two byte string?
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be ascii.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
@@ -1997,7 +2508,7 @@
   // rax: RegExp data (FixedArray)
   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
   __ Set(rcx, 1);  // Type is ascii.
-  __ jmp(&check_code);
+  __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
   // rdi: subject string (flat two-byte)
@@ -2008,9 +2519,8 @@
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
-  __ j(not_equal, &runtime);
+  // smi (code flushing support)
+  __ JumpIfSmi(r11, &runtime);
 
   // rdi: subject string
   // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
@@ -2080,32 +2590,40 @@
   // rbx: previous index
   // rcx: encoding of subject string (1 if ascii 0 if two_byte);
   // r11: code
+  // r14: slice offset
+  // r15: original subject string
 
-  // Argument 4: End of string data
-  // Argument 3: Start of string data
-  NearLabel setup_two_byte, setup_rest;
-  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
-  __ j(zero, &setup_two_byte);
-  __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
-  __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
-  __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
-  __ jmp(&setup_rest);
-  __ bind(&setup_two_byte);
-  __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
-  __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
-  __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
-  __ bind(&setup_rest);
   // Argument 2: Previous index.
   __ movq(arg2, rbx);
 
-  // Argument 1: Subject string.
-#ifdef _WIN64
-  __ movq(arg1, rdi);
-#else
-  // Already there in AMD64 calling convention.
-  ASSERT(arg1.is(rdi));
-#endif
+  // Argument 4: End of string data
+  // Argument 3: Start of string data
+  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
+  // Prepare start and end index of the input.
+  // Load the length from the original sliced string if that is the case.
+  __ addq(rbx, r14);
+  __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
+  __ addq(r14, arg3);  // Using arg3 as scratch.
+
+  // rbx: start index of the input
+  // r14: end index of the input
+  // r15: original subject string
+  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
+  __ j(zero, &setup_two_byte, Label::kNear);
+  __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+  __ jmp(&setup_rest, Label::kNear);
+  __ bind(&setup_two_byte);
+  __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
+  __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
+  __ bind(&setup_rest);
+
+  // Argument 1: Original subject string.
+  // The original subject is in the previous stack frame. Therefore we have to
+  // use rbp, which points exactly to one pointer size below the previous rsp.
+  // (Because creating a new stack frame pushes the previous rbp onto the stack
+  // and thereby moves up rsp by one kPointerSize.)
+  __ movq(arg1, r15);
 
   // Locate the code entry and call it.
   __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -2114,10 +2632,10 @@
   __ LeaveApiExitFrame();
 
   // Check the result.
-  NearLabel success;
+  Label success;
   Label exception;
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
-  __ j(equal, &success);
+  __ j(equal, &success, Label::kNear);
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
   __ j(equal, &exception);
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
@@ -2166,12 +2684,12 @@
   // rbx: last_match_info backing store (FixedArray)
   // rcx: offsets vector
   // rdx: number of capture registers
-  NearLabel next_capture, done;
+  Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
   __ subq(rdx, Immediate(1));
-  __ j(negative, &done);
+  __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer and make it a smi.
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
   __ Integer32ToSmi(rdi, rdi);
@@ -2194,7 +2712,7 @@
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception_address(
-      Isolate::k_pending_exception_address, isolate);
+      Isolate::kPendingExceptionAddress, isolate);
   Operand pending_exception_operand =
       masm->ExternalOperand(pending_exception_address, rbx);
   __ movq(rax, pending_exception_operand);
@@ -2204,8 +2722,8 @@
   __ movq(pending_exception_operand, rdx);
 
   __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
-  NearLabel termination_exception;
-  __ j(equal, &termination_exception);
+  Label termination_exception;
+  __ j(equal, &termination_exception, Label::kNear);
   __ Throw(rax);
 
   __ bind(&termination_exception);
@@ -2330,9 +2848,13 @@
   // Heap::GetNumberStringCache.
   Label is_smi;
   Label load_result_from_cache;
+  Factory* factory = masm->isolate()->factory();
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
+    __ CheckMap(object,
+                factory->heap_number_map(),
+                not_found,
+                DONT_DO_SMI_CHECK);
 
     STATIC_ASSERT(8 == kDoubleSize);
     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2419,6 +2941,7 @@
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
   Label check_unequal_objects, done;
+  Factory* factory = masm->isolate()->factory();
 
   // Compare two smis if required.
   if (include_smi_compare_) {
@@ -2446,16 +2969,16 @@
 
   // Two identical objects are equal unless they are both NaN or undefined.
   {
-    NearLabel not_identical;
+    Label not_identical;
     __ cmpq(rax, rdx);
-    __ j(not_equal, &not_identical);
+    __ j(not_equal, &not_identical, Label::kNear);
 
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      NearLabel check_for_nan;
+      Label check_for_nan;
       __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
-      __ j(not_equal, &check_for_nan);
+      __ j(not_equal, &check_for_nan, Label::kNear);
       __ Set(rax, NegativeComparisonResult(cc_));
       __ ret(0);
       __ bind(&check_for_nan);
@@ -2466,20 +2989,19 @@
     // Note: if cc_ != equal, never_nan_nan_ is not used.
     // We cannot set rax to EQUAL until just before return because
     // rax must be unchanged on jump to not_identical.
-
     if (never_nan_nan_ && (cc_ == equal)) {
       __ Set(rax, EQUAL);
       __ ret(0);
     } else {
-      NearLabel heap_number;
+      Label heap_number;
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-             FACTORY->heap_number_map());
-      __ j(equal, &heap_number);
+             factory->heap_number_map());
+      __ j(equal, &heap_number, Label::kNear);
       if (cc_ != equal) {
-        // Call runtime on identical JSObjects.  Otherwise return equal.
-        __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-        __ j(above_equal, &not_identical);
+        // Call runtime on identical objects.  Otherwise return equal.
+        __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+        __ j(above_equal, &not_identical, Label::kNear);
       }
       __ Set(rax, EQUAL);
       __ ret(0);
@@ -2519,7 +3041,7 @@
 
         // Check if the non-smi operand is a heap number.
         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
+               factory->heap_number_map());
         // If heap number, handle it in the slow case.
         __ j(equal, &slow);
         // Return non-equal.  ebx (the lower half of rbx) is not zero.
@@ -2534,10 +3056,10 @@
       // There is no test for undetectability in strict equality.
 
       // If the first object is a JS object, we have done pointer comparison.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      NearLabel first_non_object;
-      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-      __ j(below, &first_non_object);
+      STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+      Label first_non_object;
+      __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+      __ j(below, &first_non_object, Label::kNear);
       // Return non-zero (eax (not rax) is not zero)
       Label return_not_equal;
       STATIC_ASSERT(kHeapObjectTag != 0);
@@ -2549,7 +3071,7 @@
       __ CmpInstanceType(rcx, ODDBALL_TYPE);
       __ j(equal, &return_not_equal);
 
-      __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+      __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
       __ j(above_equal, &return_not_equal);
 
       // Check for oddballs: true, false, null, undefined.
@@ -2564,14 +3086,14 @@
   // Generate the number comparison code.
   if (include_number_compare_) {
     Label non_number_comparison;
-    NearLabel unordered;
+    Label unordered;
     FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
     __ xorl(rax, rax);
     __ xorl(rcx, rcx);
     __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
-    __ j(parity_even, &unordered);
+    __ j(parity_even, &unordered, Label::kNear);
     // Return a result of -1, 0, or 1, based on EFLAGS.
     __ setcc(above, rax);
     __ setcc(below, rcx);
@@ -2611,13 +3133,21 @@
       rdx, rax, rcx, rbx, &check_unequal_objects);
 
   // Inline comparison of ascii strings.
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == equal) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      rdx,
                                                      rax,
                                                      rcx,
-                                                     rbx,
-                                                     rdi,
-                                                     r8);
+                                                     rbx);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       rdx,
+                                                       rax,
+                                                       rcx,
+                                                       rbx,
+                                                       rdi,
+                                                       r8);
+  }
 
 #ifdef DEBUG
   __ Abort("Unexpected fall-through from string comparison");
@@ -2628,7 +3158,7 @@
     // Not strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    NearLabel not_both_objects, return_unequal;
+    Label not_both_objects, return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -2636,17 +3166,17 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(rcx, Operand(rax, rdx, times_1, 0));
     __ testb(rcx, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_both_objects);
-    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
-    __ j(below, &not_both_objects);
-    __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &not_both_objects);
+    __ j(not_zero, &not_both_objects, Label::kNear);
+    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+    __ j(below, &not_both_objects, Label::kNear);
+    __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(below, &not_both_objects, Label::kNear);
     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     // The objects are both undetectable, so they both compare as the value
     // undefined, and are equal.
     __ Set(rax, EQUAL);
@@ -2702,32 +3232,24 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  Label slow, non_function;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // +1 ~ return address
-    Label receiver_is_value, receiver_is_js_object;
     __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ JumpIfSmi(rax, &receiver_is_value);
-
-    // Check if the receiver is a valid JS object.
-    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
-    __ j(above_equal, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ LeaveInternalFrame();
-    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
-    __ bind(&receiver_is_js_object);
+    // Call as function is indicated with the hole.
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &call, Label::kNear);
+    // Patch the receiver on the stack with the global receiver object.
+    __ movq(rbx, GlobalObjectOperand());
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -2735,22 +3257,56 @@
   __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
 
   // Check that the function really is a JavaScript function.
-  __ JumpIfSmi(rdi, &slow);
+  __ JumpIfSmi(rdi, &non_function);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
   __ j(not_equal, &slow);
 
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ j(equal, &call_as_function);
+    __ InvokeFunction(rdi,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(rdi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+  __ pop(rcx);
+  __ push(rdi);  // put proxy as additional argument under return address
+  __ push(rcx);
+  __ Set(rax, argc_ + 1);
+  __ Set(rbx, 0);
+  __ SetCallKind(rcx, CALL_AS_FUNCTION);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ jmp(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
   __ Set(rax, argc_);
   __ Set(rbx, 0);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
   Handle<Code> adaptor =
       Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
@@ -2876,11 +3432,11 @@
   // Handling of failure.
   __ bind(&failure_returned);
 
-  NearLabel retry;
+  Label retry;
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
-  __ j(zero, &retry);
+  __ j(zero, &retry, Label::kNear);
 
   // Special handling of out of memory exceptions.
   __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
@@ -2889,7 +3445,7 @@
 
   // Retrieve the pending exception and clear the variable.
   ExternalReference pending_exception_address(
-      Isolate::k_pending_exception_address, masm->isolate());
+      Isolate::kPendingExceptionAddress, masm->isolate());
   Operand pending_exception_operand =
       masm->ExternalOperand(pending_exception_address);
   __ movq(rax, pending_exception_operand);
@@ -2990,9 +3546,7 @@
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
   Label not_outermost_js, not_outermost_js_2;
-#endif
   {  // NOLINT. Scope block confuses linter.
     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
     // Setup frame.
@@ -3031,15 +3585,14 @@
   Isolate* isolate = masm->isolate();
 
   // Save copies of the top frame descriptor on the stack.
-  ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, isolate);
+  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
   {
     Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
     __ push(c_entry_fp_operand);
   }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // If this is the outermost JS call, set js_entry_sp value.
-  ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   __ Load(rax, js_entry_sp);
   __ testq(rax, rax);
   __ j(not_zero, &not_outermost_js);
@@ -3051,14 +3604,13 @@
   __ bind(&not_outermost_js);
   __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
-#endif
 
   // Call a faked try-block that does the invoke.
   __ call(&invoke);
 
   // Caught exception: Store result (exception) in the pending
   // exception field in the JSEnv and return a failure sentinel.
-  ExternalReference pending_exception(Isolate::k_pending_exception_address,
+  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
   __ Store(pending_exception, rax);
   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
@@ -3095,7 +3647,6 @@
   __ PopTryHandler();
 
   __ bind(&exit);
-#ifdef ENABLE_LOGGING_AND_PROFILING
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(rbx);
   __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
@@ -3103,7 +3654,6 @@
   __ movq(kScratchRegister, js_entry_sp);
   __ movq(Operand(kScratchRegister, 0), Immediate(0));
   __ bind(&not_outermost_js_2);
-#endif
 
   // Restore the top frame descriptor from the stack.
   { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
@@ -3167,9 +3717,9 @@
   __ JumpIfSmi(rax, &slow);
 
   // Check that the left hand is a JS object. Leave its map in rax.
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
   __ j(below, &slow);
-  __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
   // Get the prototype of the function.
@@ -3180,11 +3730,11 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     // Look up the function and the map in the instanceof cache.
-    NearLabel miss;
+    Label miss;
     __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
     __ ret(2 * kPointerSize);
     __ bind(&miss);
@@ -3194,9 +3744,9 @@
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(rbx, &slow);
-  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
   __ j(below, &slow);
-  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
   // Register mapping:
@@ -3220,15 +3770,15 @@
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
 
   // Loop through the prototype chain looking for the function prototype.
-  NearLabel loop, is_instance, is_not_instance;
+  Label loop, is_instance, is_not_instance;
   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   __ bind(&loop);
   __ cmpq(rcx, rbx);
-  __ j(equal, &is_instance);
+  __ j(equal, &is_instance, Label::kNear);
   __ cmpq(rcx, kScratchRegister);
   // The code at is_not_instance assumes that kScratchRegister contains a
   // non-zero GCable value (the null object in this case).
-  __ j(equal, &is_not_instance);
+  __ j(equal, &is_not_instance, Label::kNear);
   __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
   __ jmp(&loop);
@@ -3312,15 +3862,8 @@
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-
   const char* cc_name;
   switch (cc_) {
     case less: cc_name = "LT"; break;
@@ -3331,35 +3874,12 @@
     case not_equal: cc_name = "NE"; break;
     default: cc_name = "UnknownCondition"; break;
   }
-
-  const char* strict_name = "";
-  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
-    strict_name = "_STRICT";
-  }
-
-  const char* never_nan_nan_name = "";
-  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
-    never_nan_nan_name = "_NO_NAN";
-  }
-
-  const char* include_number_compare_name = "";
-  if (!include_number_compare_) {
-    include_number_compare_name = "_NO_NUMBER";
-  }
-
-  const char* include_smi_compare_name = "";
-  if (!include_smi_compare_) {
-    include_smi_compare_name = "_NO_SMI";
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "CompareStub_%s%s%s%s",
-               cc_name,
-               strict_name,
-               never_nan_nan_name,
-               include_number_compare_name,
-               include_smi_compare_name);
-  return name_;
+  bool is_equality = cc_ == equal || cc_ == not_equal;
+  stream->Add("CompareStub_%s", cc_name);
+  if (strict_ && is_equality) stream->Add("_STRICT");
+  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+  if (!include_number_compare_) stream->Add("_NO_NUMBER");
+  if (!include_smi_compare_) stream->Add("_NO_SMI");
 }
 
 
@@ -3370,6 +3890,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -3398,29 +3919,44 @@
   __ j(zero, &flat_string);
 
   // Handle non-flat strings.
-  __ testb(result_, Immediate(kIsConsStringMask));
-  __ j(zero, &call_runtime_);
+  __ and_(result_, Immediate(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmpb(result_, Immediate(kExternalStringTag));
+  __ j(greater, &sliced_string);
+  __ j(equal, &call_runtime_);
 
   // ConsString.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  __ bind(&assure_seq_string);
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ testb(result_, Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string);
 
@@ -3449,10 +3985,14 @@
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
+  Factory* factory = masm->isolate()->factory();
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
-  __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
+  __ CheckMap(index_,
+              factory->heap_number_map(),
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -3561,15 +4101,12 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    Condition is_smi;
-    is_smi = masm->CheckSmi(rax);
-    __ j(is_smi, &string_add_runtime);
+    __ JumpIfSmi(rax, &string_add_runtime);
     __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
     __ j(above_equal, &string_add_runtime);
 
     // First argument is a a string, test second.
-    is_smi = masm->CheckSmi(rdx);
-    __ j(is_smi, &string_add_runtime);
+    __ JumpIfSmi(rdx, &string_add_runtime);
     __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
     __ j(above_equal, &string_add_runtime);
   } else {
@@ -3592,10 +4129,10 @@
   // rax: first string
   // rdx: second string
   // Check if either of the strings are empty. In that case return the other.
-  NearLabel second_not_zero_length, both_not_zero_length;
+  Label second_not_zero_length, both_not_zero_length;
   __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
   __ SmiTest(rcx);
-  __ j(not_zero, &second_not_zero_length);
+  __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in rax.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -3603,7 +4140,7 @@
   __ bind(&second_not_zero_length);
   __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
   __ SmiTest(rbx);
-  __ j(not_zero, &both_not_zero_length);
+  __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in rdx.
   __ movq(rax, rdx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -3676,8 +4213,9 @@
   Label non_ascii, allocated, ascii_data;
   __ movl(rcx, r8);
   __ and_(rcx, r9);
-  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
-  __ testl(rcx, Immediate(kAsciiStringTag));
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testl(rcx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
   // Allocate an acsii cons string.
@@ -3706,7 +4244,7 @@
   __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
+  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ jmp(&allocated);
 
   // Handle creating a flat result. First check that both strings are not
@@ -3726,6 +4264,8 @@
   __ and_(rcx, Immediate(kStringRepresentationMask));
   __ cmpl(rcx, Immediate(kExternalStringTag));
   __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
   // Now check if both strings are ascii strings.
   // rax: first string
   // rbx: length of resulting flat string
@@ -3733,10 +4273,11 @@
   // r8: instance type of first string
   // r9: instance type of second string
   Label non_ascii_string_add_flat_result;
-  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
-  __ testl(r8, Immediate(kAsciiStringTag));
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testl(r8, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii_string_add_flat_result);
-  __ testl(r9, Immediate(kAsciiStringTag));
+  __ testl(r9, Immediate(kStringEncodingMask));
   __ j(zero, &string_add_runtime);
 
   __ bind(&make_flat_ascii_string);
@@ -3774,7 +4315,9 @@
   // r8: instance type of first string
   // r9: instance type of first string
   __ bind(&non_ascii_string_add_flat_result);
-  __ and_(r9, Immediate(kAsciiStringTag));
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ and_(r9, Immediate(kStringEncodingMask));
   __ j(not_zero, &string_add_runtime);
   // Both strings are two byte strings. As they are short they are both
   // flat.
@@ -3897,9 +4440,9 @@
   ASSERT(count.is(rcx));  // rep movs count
 
   // Nothing to do for zero characters.
-  NearLabel done;
+  Label done;
   __ testl(count, count);
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Make count the number of bytes to copy.
   if (!ascii) {
@@ -3908,9 +4451,9 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  NearLabel last_bytes;
+  Label last_bytes;
   __ testl(count, Immediate(~7));
-  __ j(zero, &last_bytes);
+  __ j(zero, &last_bytes, Label::kNear);
 
   // Copy from edi to esi using rep movs instruction.
   __ movl(kScratchRegister, count);
@@ -3924,7 +4467,7 @@
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
   __ testl(count, count);
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Copy remaining characters.
   Label loop;
@@ -3952,10 +4495,10 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  NearLabel not_array_index;
+  Label not_array_index;
   __ leal(scratch, Operand(c1, -'0'));
   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
-  __ j(above, &not_array_index);
+  __ j(above, &not_array_index, Label::kNear);
   __ leal(scratch, Operand(c2, -'0'));
   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_found);
@@ -3999,6 +4542,7 @@
   static const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes];
+  Register candidate = scratch;  // Scratch register contains candidate.
   for (int i = 0; i < kProbes; i++) {
     // Calculate entry in symbol table.
     __ movl(scratch, hash);
@@ -4008,7 +4552,6 @@
     __ andl(scratch, mask);
 
     // Load the entry from the symbol table.
-    Register candidate = scratch;  // Scratch register contains candidate.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ movq(candidate,
             FieldOperand(symbol_table,
@@ -4017,9 +4560,9 @@
                          SymbolTable::kElementsStartOffset));
 
     // If entry is undefined no string with this hash can be found.
-    NearLabel is_string;
+    Label is_string;
     __ CmpObjectType(candidate, ODDBALL_TYPE, map);
-    __ j(not_equal, &is_string);
+    __ j(not_equal, &is_string, Label::kNear);
 
     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
@@ -4054,7 +4597,7 @@
   __ jmp(not_found);
 
   // Scratch register contains result when we fall through to here.
-  Register result = scratch;
+  Register result = candidate;
   __ bind(&found_in_symbol_table);
   if (!result.is(rax)) {
     __ movq(rax, result);
@@ -4066,13 +4609,16 @@
                                     Register hash,
                                     Register character,
                                     Register scratch) {
-  // hash = character + (character << 10);
-  __ movl(hash, character);
-  __ shll(hash, Immediate(10));
-  __ addl(hash, character);
+  // hash = (seed + character) + ((seed + character) << 10);
+  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  __ SmiToInteger32(scratch, scratch);
+  __ addl(scratch, character);
+  __ movl(hash, scratch);
+  __ shll(scratch, Immediate(10));
+  __ addl(hash, scratch);
   // hash ^= hash >> 6;
   __ movl(scratch, hash);
-  __ sarl(scratch, Immediate(6));
+  __ shrl(scratch, Immediate(6));
   __ xorl(hash, scratch);
 }
 
@@ -4089,7 +4635,7 @@
   __ addl(hash, scratch);
   // hash ^= hash >> 6;
   __ movl(scratch, hash);
-  __ sarl(scratch, Immediate(6));
+  __ shrl(scratch, Immediate(6));
   __ xorl(hash, scratch);
 }
 
@@ -4101,17 +4647,19 @@
   __ leal(hash, Operand(hash, hash, times_8, 0));
   // hash ^= hash >> 11;
   __ movl(scratch, hash);
-  __ sarl(scratch, Immediate(11));
+  __ shrl(scratch, Immediate(11));
   __ xorl(hash, scratch);
   // hash += hash << 15;
   __ movl(scratch, hash);
   __ shll(scratch, Immediate(15));
   __ addl(hash, scratch);
 
+  __ andl(hash, Immediate(String::kHashBitMask));
+
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
   __ j(not_zero, &hash_not_zero);
-  __ Set(hash, 27);
+  __ Set(hash, StringHasher::kZeroHash);
   __ bind(&hash_not_zero);
 }
 
@@ -4183,7 +4731,82 @@
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   __ Set(rcx, 2);
 
-  __ bind(&result_longer_than_two);
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // If coming from the make_two_character_string path, the string
+    // is too short to be sliced anyways.
+    STATIC_ASSERT(2 < SlicedString::kMinLength);
+    __ jmp(&copy_routine);
+    __ bind(&result_longer_than_two);
+
+    // rax: string
+    // rbx: instance type
+    // rcx: sub string length
+    // rdx: from index (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+    // Short slice.  Copy instead of slicing.
+    __ j(less, &copy_routine);
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ testb(rbx, Immediate(kStringRepresentationMask));
+    __ j(zero, &seq_string, Label::kNear);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ testb(rbx, Immediate(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ j(zero, &runtime);
+
+    __ testb(rbx, Immediate(kSlicedNotConsMask));
+    __ j(not_zero, &sliced_string, Label::kNear);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
+                   Heap::kEmptyStringRootIndex);
+    __ j(not_equal, &runtime);
+    __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+    __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ movq(rdi, rax);
+
+    __ bind(&allocate_slice);
+    // edi: underlying subject string
+    // ebx: instance type of original subject string
+    // edx: offset
+    // ecx: length
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ testb(rbx, Immediate(kStringEncodingMask));
+    __ j(zero, &two_byte_slice, Label::kNear);
+    __ AllocateAsciiSlicedString(rax, rbx, no_reg, &runtime);
+    __ jmp(&set_slice_header, Label::kNear);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(rax, rbx, no_reg, &runtime);
+    __ bind(&set_slice_header);
+    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+    __ Integer32ToSmi(rcx, rcx);
+    __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+    __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
+           Immediate(String::kEmptyHashField));
+    __ jmp(&return_rax);
+
+    __ bind(&copy_routine);
+  } else {
+    __ bind(&result_longer_than_two);
+  }
 
   // rax: string
   // rbx: instance type
@@ -4263,6 +4886,47 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label check_zero_length;
+  __ movq(length, FieldOperand(left, String::kLengthOffset));
+  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
+  __ j(equal, &check_zero_length, Label::kNear);
+  __ Move(rax, Smi::FromInt(NOT_EQUAL));
+  __ ret(0);
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ SmiTest(length);
+  __ j(not_zero, &compare_chars, Label::kNear);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  Label strings_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+                                &strings_not_equal, Label::kNear);
+
+  // Characters are equal.
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Characters are not equal.
+  __ bind(&strings_not_equal);
+  __ Move(rax, Smi::FromInt(NOT_EQUAL));
+  __ ret(0);
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
@@ -4282,8 +4946,8 @@
             FieldOperand(right, String::kLengthOffset));
   // Register scratch4 now holds left.length - right.length.
   const Register length_difference = scratch4;
-  NearLabel left_shorter;
-  __ j(less, &left_shorter);
+  Label left_shorter;
+  __ j(less, &left_shorter, Label::kNear);
   // The right string isn't longer that the left one.
   // Get the right string's length by subtracting the (non-negative) difference
   // from the left string's length.
@@ -4292,54 +4956,30 @@
   // Register scratch1 now holds Min(left.length, right.length).
   const Register min_length = scratch1;
 
-  NearLabel compare_lengths;
+  Label compare_lengths;
   // If min-length is zero, go directly to comparing lengths.
   __ SmiTest(min_length);
-  __ j(zero, &compare_lengths);
+  __ j(zero, &compare_lengths, Label::kNear);
 
-  __ SmiToInteger32(min_length, min_length);
+  // Compare loop.
+  Label result_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                &result_not_equal, Label::kNear);
 
-  // Registers scratch2 and scratch3 are free.
-  NearLabel result_not_equal;
-  Label loop;
-  {
-    // Check characters 0 .. min_length - 1 in a loop.
-    // Use scratch3 as loop index, min_length as limit and scratch2
-    // for computation.
-    const Register index = scratch3;
-    __ Set(index, 0);  // Index into strings.
-    __ bind(&loop);
-    // Compare characters.
-    // TODO(lrn): Could we load more than one character at a time?
-    __ movb(scratch2, FieldOperand(left,
-                                   index,
-                                   times_1,
-                                   SeqAsciiString::kHeaderSize));
-    // Increment index and use -1 modifier on next load to give
-    // the previous load extra time to complete.
-    __ addl(index, Immediate(1));
-    __ cmpb(scratch2, FieldOperand(right,
-                                   index,
-                                   times_1,
-                                   SeqAsciiString::kHeaderSize - 1));
-    __ j(not_equal, &result_not_equal);
-    __ cmpl(index, min_length);
-    __ j(not_equal, &loop);
-  }
   // Completed loop without finding different characters.
   // Compare lengths (precomputed).
   __ bind(&compare_lengths);
   __ SmiTest(length_difference);
-  __ j(not_zero, &result_not_equal);
+  __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
   __ Move(rax, Smi::FromInt(EQUAL));
   __ ret(0);
 
-  NearLabel result_greater;
+  Label result_greater;
   __ bind(&result_not_equal);
   // Unequal comparison of left to right, either character or length.
-  __ j(greater, &result_greater);
+  __ j(greater, &result_greater, Label::kNear);
 
   // Result is LESS.
   __ Move(rax, Smi::FromInt(LESS));
@@ -4352,6 +4992,36 @@
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch,
+    Label* chars_not_equal,
+    Label::Distance near_jump) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiToInteger32(length, length);
+  __ lea(left,
+         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(right,
+         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+  __ neg(length);
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ movb(scratch, Operand(left, index, times_1, 0));
+  __ cmpb(scratch, Operand(right, index, times_1, 0));
+  __ j(not_equal, chars_not_equal, near_jump);
+  __ addq(index, Immediate(1));
+  __ j(not_zero, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -4364,9 +5034,9 @@
   __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
 
   // Check for identity.
-  NearLabel not_same;
+  Label not_same;
   __ cmpq(rdx, rax);
-  __ j(not_equal, &not_same);
+  __ j(not_equal, &not_same, Label::kNear);
   __ Move(rax, Smi::FromInt(EQUAL));
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
@@ -4394,16 +5064,16 @@
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
-  NearLabel miss;
-  __ JumpIfNotBothSmi(rdx, rax, &miss);
+  Label miss;
+  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
     __ subq(rax, rdx);
   } else {
-    NearLabel done;
+    Label done;
     __ subq(rdx, rax);
-    __ j(no_overflow, &done);
+    __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ SmiNot(rdx, rdx);
     __ bind(&done);
@@ -4419,16 +5089,16 @@
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
-  NearLabel generic_stub;
-  NearLabel unordered;
-  NearLabel miss;
+  Label generic_stub;
+  Label unordered;
+  Label miss;
   Condition either_smi = masm->CheckEitherSmi(rax, rdx);
-  __ j(either_smi, &generic_stub);
+  __ j(either_smi, &generic_stub, Label::kNear);
 
   __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Load left and right operand
   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -4438,7 +5108,7 @@
   __ ucomisd(xmm0, xmm1);
 
   // Don't base result on EFLAGS when a NaN is involved.
-  __ j(parity_even, &unordered);
+  __ j(parity_even, &unordered, Label::kNear);
 
   // Return a result of -1, 0, or 1, based on EFLAGS.
   // Performing mov, because xor would destroy the flag register.
@@ -4459,16 +5129,133 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  ASSERT(GetCondition() == equal);
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+  Register tmp1 = rcx;
+  Register tmp2 = rbx;
+
+  // Check that both operands are heap objects.
+  Label miss;
+  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+  __ j(cond, &miss, Label::kNear);
+
+  // Check that both operands are symbols.
+  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp2);
+  __ testb(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &miss, Label::kNear);
+
+  // Symbols are compared by identity.
+  Label done;
+  __ cmpq(left, right);
+  // Make sure rax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(rax));
+  __ j(not_equal, &done, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ bind(&done);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+  Register tmp1 = rcx;
+  Register tmp2 = rbx;
+  Register tmp3 = rdi;
+
+  // Check that both operands are heap objects.
+  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+  __ j(cond, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  __ movq(tmp3, tmp1);
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ or_(tmp3, tmp2);
+  __ testb(tmp3, Immediate(kIsNotStringMask));
+  __ j(not_zero, &miss);
+
+  // Fast check for identical strings.
+  Label not_same;
+  __ cmpq(left, right);
+  __ j(not_equal, &not_same, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Handle not identical strings.
+  __ bind(&not_same);
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp2);
+  __ testb(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure rax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(rax));
+  __ ret(0);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ bind(&do_compare);
+  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ pop(tmp1);  // Return address.
+  __ push(left);
+  __ push(right);
+  __ push(tmp1);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
-  NearLabel miss;
+  Label miss;
   Condition either_smi = masm->CheckEitherSmi(rdx, rax);
-  __ j(either_smi, &miss);
+  __ j(either_smi, &miss, Label::kNear);
 
   __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ subq(rax, rdx);
@@ -4510,6 +5297,206 @@
 }
 
 
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // r0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = r0;
+    // Capacity is smi 2^n.
+    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+    __ decl(index);
+    __ and_(index,
+            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
+
+    Register entity_name = r0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ movq(entity_name, Operand(properties,
+                                 index,
+                                 times_pointer_size,
+                                 kElementsStartOffset - kHeapObjectTag));
+    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
+    __ j(equal, done);
+
+    // Stop if found the property.
+    __ Cmp(entity_name, Handle<String>(name));
+    __ j(equal, miss);
+
+    // Check if the entry name is not a symbol.
+    __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+    __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+             Immediate(kIsSymbolMask));
+    __ j(zero, miss);
+  }
+
+  StringDictionaryLookupStub stub(properties,
+                                  r0,
+                                  r0,
+                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+  __ Push(Handle<Object>(name));
+  __ push(Immediate(name->Hash()));
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ testq(r0, r0);
+  __ j(not_zero, miss);
+  __ jmp(done);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register r0,
+                                                        Register r1) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+  __ decl(r0);
+
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+    __ shrl(r1, Immediate(String::kHashShift));
+    if (i > 0) {
+      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, r0);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmpq(name, Operand(elements, r1, times_pointer_size,
+                          kElementsStartOffset - kHeapObjectTag));
+    __ j(equal, done);
+  }
+
+  StringDictionaryLookupStub stub(elements,
+                                  r0,
+                                  r1,
+                                  POSITIVE_LOOKUP);
+  __ push(name);
+  __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
+  __ shrl(r0, Immediate(String::kHashShift));
+  __ push(r0);
+  __ CallStub(&stub);
+
+  __ testq(r0, r0);
+  __ j(zero, miss);
+  __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Stack frame on entry:
+  //  esp[0 * kPointerSize]: return address.
+  //  esp[1 * kPointerSize]: key's hash.
+  //  esp[2 * kPointerSize]: key.
+  // Registers:
+  //  dictionary_: StringDictionary to probe.
+  //  result_: used as scratch.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  Register scratch = result_;
+
+  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ decl(scratch);
+  __ push(scratch);
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+    if (i > 0) {
+      __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(rsp, 0));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+
+    // Having undefined at this place means the name is not contained.
+    __ movq(scratch, Operand(dictionary_,
+                             index_,
+                             times_pointer_size,
+                             kElementsStartOffset - kHeapObjectTag));
+
+    __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+    __ j(equal, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+    __ j(equal, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // If we hit a non symbol key during negative lookup
+      // we have to bailout as this key might be equal to the
+      // key we are looking for.
+
+      // Check if the entry name is not a symbol.
+      __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+      __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+               Immediate(kIsSymbolMask));
+      __ j(zero, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ movq(scratch, Immediate(0));
+    __ Drop(1);
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&in_dictionary);
+  __ movq(scratch, Immediate(1));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&not_in_dictionary);
+  __ movq(scratch, Immediate(0));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index f97d099..4058118 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -59,38 +59,95 @@
 };
 
 
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
+  UnaryOpStub(Token::Value op,
+              UnaryOverwriteMode mode,
+              UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
+      : op_(op),
+        mode_(mode),
+        operand_type_(operand_type) {
+  }
 
  private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  virtual void PrintName(StringStream* stream);
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm,
+                          Label* non_smi,
+                          Label* slow,
+                          Label::Distance non_smi_near = Label::kFar,
+                          Label::Distance slow_near = Label::kFar);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm,
+                             Label* non_smi,
+                             Label::Distance non_smi_near);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED) {
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         operands_type_(operands_type),
-        result_type_(result_type),
-        name_(NULL) { }
+        result_type_(result_type) { }
 
  private:
   enum SmiCodeGenerateHeapNumberResults {
@@ -102,31 +159,18 @@
   OverwriteMode mode_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
-           "(mode %d, runtime_type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
-  }
-#endif
+  virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -152,6 +196,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
 
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
@@ -159,15 +204,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -276,10 +321,9 @@
 
 class StringCompareStub: public CodeStub {
  public:
-  explicit StringCompareStub() {}
+  StringCompareStub() {}
 
-  // Compare two flat ascii strings and returns result in rax after popping two
-  // arguments from the stack.
+  // Compares two flat ASCII strings and returns result in rax.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -288,11 +332,27 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in rax.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(
+      MacroAssembler* masm,
+      Register left,
+      Register right,
+      Register length,
+      Register scratch,
+      Label* chars_not_equal,
+      Label::Distance near_jump = Label::kFar);
 };
 
 
@@ -322,14 +382,67 @@
   int MinorKey() { return 0; }
 
   void Generate(MacroAssembler* masm);
+};
 
-  const char* GetName() { return "NumberToStringStub"; }
 
-#ifdef DEBUG
-  void Print() {
-    PrintF("NumberToStringStub\n");
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  StringDictionaryLookupStub(Register dictionary,
+                             Register result,
+                             Register index,
+                             LookupMode mode)
+      : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return DictionaryBits::encode(dictionary_.code()) |
+        ResultBits::encode(result_.code()) |
+        IndexBits::encode(index_.code()) |
+        LookupModeBits::encode(mode_);
   }
-#endif
+
+  class DictionaryBits: public BitField<int, 0, 4> {};
+  class ResultBits: public BitField<int, 4, 4> {};
+  class IndexBits: public BitField<int, 8, 4> {};
+  class LookupModeBits: public BitField<LookupMode, 12, 1> {};
+
+  Register dictionary_;
+  Register result_;
+  Register index_;
+  LookupMode mode_;
 };
 
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index f8f2d6e..507bbd4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -132,6 +132,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
+  OS::ProtectCode(buffer, actual_size);
   // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 94c7850..a0648ce 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -58,9 +58,7 @@
   // Print the code after compiling it.
   static void PrintCode(Handle<Code> code, CompilationInfo* info);
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
-#endif
 
   static bool RecordPositions(MacroAssembler* masm,
                               int pos,
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index e637ba1..ae5045f 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -67,7 +67,8 @@
   // solution is to run valgrind with --smc-check=all, but this comes at a big
   // performance cost.  We can notify valgrind to invalidate its cache.
 #ifdef VALGRIND_DISCARD_TRANSLATIONS
-  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+  unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+  USE(res);
 #endif
 }
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index abac2b6..f322312 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -38,71 +38,11 @@
 namespace internal {
 
 
-int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 10;
 
 
 int Deoptimizer::patch_size() {
-  return MacroAssembler::kCallInstructionLength;
-}
-
-
-#ifdef DEBUG
-// Overwrites code with int3 instructions.
-static void ZapCodeRange(Address from, Address to) {
-  CHECK(from <= to);
-  int length = static_cast<int>(to - from);
-  CodePatcher destroyer(from, length);
-  while (length-- > 0) {
-    destroyer.masm()->int3();
-  }
-}
-#endif
-
-
-// Iterate through the entries of a SafepointTable that corresponds to
-// deoptimization points.
-class SafepointTableDeoptimiztionEntryIterator {
- public:
-  explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
-      : code_(code), table_(code), index_(-1), limit_(table_.length()) {
-    FindNextIndex();
-  }
-
-  SafepointEntry Next(Address* pc) {
-    if (index_ >= limit_) {
-      *pc = NULL;
-      return SafepointEntry();  // Invalid entry.
-    }
-    *pc = code_->instruction_start() + table_.GetPcOffset(index_);
-    SafepointEntry entry = table_.GetEntry(index_);
-    FindNextIndex();
-    return entry;
-  }
-
- private:
-  void FindNextIndex() {
-    ASSERT(index_ < limit_);
-    while (++index_ < limit_) {
-      if (table_.GetEntry(index_).deoptimization_index() !=
-          Safepoint::kNoDeoptimizationIndex) {
-        return;
-      }
-    }
-  }
-
-  Code* code_;
-  SafepointTable table_;
-  // Index of next deoptimization entry. If negative after calling
-  // FindNextIndex, there are no more, and Next will return an invalid
-  // SafepointEntry.
-  int index_;
-  // Table length.
-  int limit_;
-};
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
-  // TODO(1276): Implement.
+  return Assembler::kCallInstructionLength;
 }
 
 
@@ -119,76 +59,34 @@
   // code patching below, and is not needed any more.
   code->InvalidateRelocation();
 
-  // For each return after a safepoint insert a absolute call to the
+  // For each LLazyBailout instruction insert a absolute call to the
   // corresponding deoptimization entry, or a short call to an absolute
   // jump if space is short. The absolute jumps are put in a table just
   // before the safepoint table (space was allocated there when the Code
   // object was created, if necessary).
 
   Address instruction_start = function->code()->instruction_start();
-  Address jump_table_address =
-      instruction_start + function->code()->safepoint_table_offset();
-  Address previous_pc = instruction_start;
-
-  SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
-  Address entry_pc = NULL;
-
-  SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
-  while (current_entry.is_valid()) {
-    int gap_code_size = current_entry.gap_code_size();
-    unsigned deoptimization_index = current_entry.deoptimization_index();
-
 #ifdef DEBUG
-    // Destroy the code which is not supposed to run again.
-    ZapCodeRange(previous_pc, entry_pc);
+  Address prev_call_address = NULL;
 #endif
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
     // Position where Call will be patched in.
-    Address call_address = entry_pc + gap_code_size;
-    // End of call instruction, if using a direct call to a 64-bit address.
-    Address call_end_address =
-        call_address + MacroAssembler::kCallInstructionLength;
-
-    // Find next deoptimization entry, if any.
-    Address next_pc = NULL;
-    SafepointEntry next_entry = deoptimizations.Next(&next_pc);
-
-    if (!next_entry.is_valid() || next_pc >= call_end_address) {
-      // Room enough to write a long call instruction.
-      CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
-      patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
-                           RelocInfo::NONE);
-      previous_pc = call_end_address;
-    } else {
-      // Not room enough for a long Call instruction. Write a short call
-      // instruction to a long jump placed elsewhere in the code.
-      Address short_call_end_address =
-          call_address + MacroAssembler::kShortCallInstructionLength;
-      ASSERT(next_pc >= short_call_end_address);
-
-      // Write jump in jump-table.
-      jump_table_address -= MacroAssembler::kJumpInstructionLength;
-      CodePatcher jump_patcher(jump_table_address,
-                               MacroAssembler::kJumpInstructionLength);
-      jump_patcher.masm()->Jump(
-          GetDeoptimizationEntry(deoptimization_index, LAZY),
-          RelocInfo::NONE);
-
-      // Write call to jump at call_offset.
-      CodePatcher call_patcher(call_address,
-                               MacroAssembler::kShortCallInstructionLength);
-      call_patcher.masm()->call(jump_table_address);
-      previous_pc = short_call_end_address;
-    }
-
-    // Continue with next deoptimization entry.
-    current_entry = next_entry;
-    entry_pc = next_pc;
+    Address call_address = instruction_start + deopt_data->Pc(i)->value();
+    // There is room enough to write a long call instruction because we pad
+    // LLazyBailout instructions with nops if necessary.
+    CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
+    patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
   }
 
-#ifdef DEBUG
-  // Destroy the code which is not supposed to run again.
-  ZapCodeRange(previous_pc, jump_table_address);
-#endif
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
@@ -203,11 +101,6 @@
     PrintF("[forced deoptimization: ");
     function->PrintName();
     PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
-#ifdef DEBUG
-    if (FLAG_print_code) {
-      code->PrintLn();
-    }
-#endif
   }
 }
 
@@ -316,7 +209,7 @@
   USE(height_in_bytes);
 
   unsigned fixed_size = ComputeFixedSize(function_);
-  unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+  unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
   unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
@@ -340,6 +233,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -448,12 +344,15 @@
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by JavaScriptFrameConstants.
   unsigned fixed_frame_size = ComputeFixedSize(function);
-  unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+  unsigned input_frame_size = input_->GetFrameSize();
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -584,15 +483,33 @@
   output_frame->SetState(Smi::FromInt(state));
 
   // Set the continuation for the topmost frame.
-  if (is_topmost) {
+  if (is_topmost && bailout_type_ != DEBUGGER) {
     Code* continuation = (bailout_type_ == EAGER)
         ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
         : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
     output_frame->SetContinuation(
         reinterpret_cast<intptr_t>(continuation->entry()));
   }
+}
 
-  if (output_count_ - 1 == frame_index) iterator->Done();
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers rbp and rsp are set to the correct values though.
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+  }
 }
 
 
@@ -617,7 +534,7 @@
   // We push all registers onto the stack, even though we do not need
   // to restore all later.
   for (int i = 0; i < kNumberOfRegisters; i++) {
-    Register r = Register::toRegister(i);
+    Register r = Register::from_code(i);
     __ push(r);
   }
 
@@ -775,12 +692,12 @@
 
   // Restore the registers from the stack.
   for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
-    Register r = Register::toRegister(i);
+    Register r = Register::from_code(i);
     // Do not restore rsp, simply pop the value into the next register
     // and overwrite this afterwards.
     if (r.is(rsp)) {
       ASSERT(i > 0);
-      r = Register::toRegister(i - 1);
+      r = Register::from_code(i - 1);
     }
     __ pop(r);
   }
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 2b7b7b7..1b8871f 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -58,7 +58,7 @@
 };
 
 
-static ByteMnemonic two_operands_instr[] = {
+static const ByteMnemonic two_operands_instr[] = {
   { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
   { 0x01, OPER_REG_OP_ORDER,      "add" },
   { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
@@ -105,7 +105,7 @@
 };
 
 
-static ByteMnemonic zero_operands_instr[] = {
+static const ByteMnemonic zero_operands_instr[] = {
   { 0xC3, UNSET_OP_ORDER, "ret" },
   { 0xC9, UNSET_OP_ORDER, "leave" },
   { 0xF4, UNSET_OP_ORDER, "hlt" },
@@ -125,14 +125,14 @@
 };
 
 
-static ByteMnemonic call_jump_instr[] = {
+static const ByteMnemonic call_jump_instr[] = {
   { 0xE8, UNSET_OP_ORDER, "call" },
   { 0xE9, UNSET_OP_ORDER, "jmp" },
   { -1, UNSET_OP_ORDER, "" }
 };
 
 
-static ByteMnemonic short_immediate_instr[] = {
+static const ByteMnemonic short_immediate_instr[] = {
   { 0x05, UNSET_OP_ORDER, "add" },
   { 0x0D, UNSET_OP_ORDER, "or" },
   { 0x15, UNSET_OP_ORDER, "adc" },
@@ -145,7 +145,7 @@
 };
 
 
-static const char* conditional_code_suffix[] = {
+static const char* const conditional_code_suffix[] = {
   "o", "no", "c", "nc", "z", "nz", "na", "a",
   "s", "ns", "pe", "po", "l", "ge", "le", "g"
 };
@@ -193,7 +193,7 @@
   InstructionDesc instructions_[256];
   void Clear();
   void Init();
-  void CopyTable(ByteMnemonic bm[], InstructionType type);
+  void CopyTable(const ByteMnemonic bm[], InstructionType type);
   void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
                      const char* mnem);
   void AddJumpConditionalShort();
@@ -228,7 +228,8 @@
 }
 
 
-void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+void InstructionTable::CopyTable(const ByteMnemonic bm[],
+                                 InstructionType type) {
   for (int i = 0; bm[i].b >= 0; i++) {
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
@@ -1021,12 +1022,26 @@
         current += PrintRightOperand(current);
         AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x0b) {
+        get_modrm(*current, &mod, &regop, &rm);
+         // roundsd xmm, xmm/m64, imm8
+        AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(", %d", (*current) & 3);
+        current += 1;
       } else {
         UnimplementedInstruction();
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x6E) {
+      if (opcode == 0x28) {
+        AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      } else if (opcode == 0x29) {
+        AppendToBuffer("movapd ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0x6E) {
         AppendToBuffer("mov%c %s,",
                        rex_w() ? 'q' : 'd',
                        NameOfXMMRegister(regop));
@@ -1044,11 +1059,16 @@
         AppendToBuffer("movdqa ");
         current += PrintRightXMMOperand(current);
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0xD6) {
+        AppendToBuffer("movq ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0x50) {
+        AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+        current += PrintRightXMMOperand(current);
       } else {
         const char* mnemonic = "?";
-        if (opcode == 0x50) {
-          mnemonic = "movmskpd";
-        } else  if (opcode == 0x54) {
+        if (opcode == 0x54) {
           mnemonic = "andpd";
         } else  if (opcode == 0x56) {
           mnemonic = "orpd";
@@ -1145,6 +1165,11 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x7E) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1162,6 +1187,22 @@
       current += 4;
     }  // else no immediate displacement.
     AppendToBuffer("nop");
+
+  } else if (opcode == 0x28) {
+    // movaps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
+  } else if (opcode == 0x29) {
+    // movaps xmm/m128, xmm
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps ");
+    current += PrintRightXMMOperand(current);
+    AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
   } else if (opcode == 0xA2 || opcode == 0x31) {
     // RDTSC or CPUID
     AppendToBuffer("%s", mnemonic);
@@ -1173,6 +1214,13 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
+  } else if (opcode == 0x57) {
+    // xorps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
   } else if ((opcode & 0xF0) == 0x80) {
     // Jcc: Conditional jump (branch).
     current = data + JumpConditional(data);
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index b14267c..7012c76 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,12 +50,13 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset  = 0 * kPointerSize;
-  static const int kFPOffset    = 1 * kPointerSize;
-  static const int kStateOffset = 2 * kPointerSize;
-  static const int kPCOffset    = 3 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kContextOffset = 1 * kPointerSize;
+  static const int kFPOffset      = 2 * kPointerSize;
+  static const int kStateOffset   = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
-  static const int kSize = 4 * kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
 };
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 6933d78..556523f 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -44,10 +44,14 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
-  explicit JumpPatchSite(MacroAssembler* masm)
-      : masm_(masm) {
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
 #ifdef DEBUG
     info_emitted_ = false;
 #endif
@@ -57,34 +61,40 @@
     ASSERT(patch_site_.is_bound() == info_emitted_);
   }
 
-  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfNotSmi(Register reg,
+                        Label* target,
+                        Label::Distance near_jump = Label::kFar) {
     __ testb(reg, Immediate(kSmiTagMask));
-    EmitJump(not_carry, target);   // Always taken before patched.
+    EmitJump(not_carry, target, near_jump);   // Always taken before patched.
   }
 
-  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfSmi(Register reg,
+                     Label* target,
+                     Label::Distance near_jump = Label::kFar) {
     __ testb(reg, Immediate(kSmiTagMask));
-    EmitJump(carry, target);  // Never taken before patched.
+    EmitJump(carry, target, near_jump);  // Never taken before patched.
   }
 
   void EmitPatchInfo() {
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-    ASSERT(is_int8(delta_to_patch_site));
-    __ testl(rax, Immediate(delta_to_patch_site));
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+      ASSERT(is_int8(delta_to_patch_site));
+      __ testl(rax, Immediate(delta_to_patch_site));
 #ifdef DEBUG
-    info_emitted_ = true;
+      info_emitted_ = true;
 #endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
   }
 
-  bool is_bound() const { return patch_site_.is_bound(); }
-
  private:
   // jc will be patched with jz, jnc will become jnz.
-  void EmitJump(Condition cc, NearLabel* target) {
+  void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
     ASSERT(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
-    __ j(cc, target);
+    __ j(cc, target, near_jump);
   }
 
   MacroAssembler* masm_;
@@ -111,6 +121,7 @@
 void FullCodeGenerator::Generate(CompilationInfo* info) {
   ASSERT(info_ == NULL);
   info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -120,13 +131,29 @@
     __ int3();
   }
 #endif
+
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). rcx is zero for method calls and non-zero for
+  // function calls.
+  if (info->is_strict_mode() || info->is_native()) {
+    Label ok;
+    __ testq(rcx, rcx);
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+    __ bind(&ok);
+  }
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
   __ push(rdi);  // Callee's JS Function.
 
   { Comment cmnt(masm_, "[ Allocate locals");
-    int locals_count = scope()->num_stack_slots();
+    int locals_count = info->scope()->num_stack_slots();
     if (locals_count == 1) {
       __ PushRoot(Heap::kUndefinedValueRootIndex);
     } else if (locals_count > 1) {
@@ -140,7 +167,7 @@
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
@@ -149,7 +176,7 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in both rax and rsi.  It replaces the context
@@ -157,16 +184,16 @@
     __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
 
     // Copy any necessary parameters into the context.
-    int num_parameters = scope()->num_parameters();
+    int num_parameters = info->scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ movq(rax, Operand(rbp, parameter_offset));
         // Store it in the context.
-        int context_offset = Context::SlotOffset(slot->index());
+        int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
         // Update the write barrier. This clobbers all involved
         // registers, so we have use a third register to avoid
@@ -189,27 +216,22 @@
       __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     }
     // The receiver is just before the parameters on the caller's stack.
-    int offset = scope()->num_parameters() * kPointerSize;
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
     __ lea(rdx,
            Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
     __ push(rdx);
-    __ Push(Smi::FromInt(scope()->num_parameters()));
+    __ Push(Smi::FromInt(num_parameters));
     // Arguments to ArgumentsAccessStub:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub stub(
         is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
-                         : ArgumentsAccessStub::NEW_NON_STRICT);
+                         : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
     __ CallStub(&stub);
 
-    Variable* arguments_shadow = scope()->arguments_shadow();
-    if (arguments_shadow != NULL) {
-      // Store new arguments object in both "arguments" and ".arguments" slots.
-      __ movq(rcx, rax);
-      Move(arguments_shadow->AsSlot(), rcx, rbx, rdx);
-    }
-    Move(arguments->AsSlot(), rax, rbx, rdx);
+    SetVar(arguments, rax, rbx, rdx);
   }
 
   if (FLAG_trace) {
@@ -221,21 +243,24 @@
   if (scope()->HasIllegalRedeclaration()) {
     Comment cmnt(masm_, "[ Declarations");
     scope()->VisitIllegalRedeclaration(this);
+
   } else {
+    PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
     { Comment cmnt(masm_, "[ Declarations");
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
-      NearLabel ok;
+      PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+      Label ok;
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-      __ j(above_equal, &ok);
+      __ j(above_equal, &ok, Label::kNear);
       StackCheckStub stub;
       __ CallStub(&stub);
       __ bind(&ok);
@@ -264,9 +289,9 @@
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
-  NearLabel ok;
+  Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
@@ -312,7 +337,7 @@
     __ movq(rsp, rbp);
     __ pop(rbp);
 
-    int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+    int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
     __ Ret(arguments_bytes, rcx);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -332,26 +357,28 @@
 }
 
 
-void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
-void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
-  __ movq(result_register(), slot_operand);
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
 }
 
 
-void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
-  __ push(slot_operand);
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  MemOperand operand = codegen()->VarOperand(var, result_register());
+  __ push(operand);
 }
 
 
-void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
-  codegen()->Move(result_register(), slot);
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+  codegen()->GetVar(result_register(), var);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -384,7 +411,7 @@
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
-    codegen()->DoTest(true_label_, false_label_, fall_through_);
+    codegen()->DoTest(this);
   }
 }
 
@@ -429,7 +456,7 @@
   } else {
     // For simplicity we always test the accumulator register.
     __ Move(result_register(), lit);
-    codegen()->DoTest(true_label_, false_label_, fall_through_);
+    codegen()->DoTest(this);
   }
 }
 
@@ -465,7 +492,7 @@
   __ Drop(count);
   __ Move(result_register(), reg);
   codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
-  codegen()->DoTest(true_label_, false_label_, fall_through_);
+  codegen()->DoTest(this);
 }
 
 
@@ -479,10 +506,10 @@
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ Move(result_register(), isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ Move(result_register(), isolate()->factory()->false_value());
   __ bind(&done);
@@ -492,10 +519,10 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ Push(isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ Push(isolate()->factory()->false_value());
   __ bind(&done);
@@ -540,28 +567,14 @@
 }
 
 
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+                               Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  // Emit the inlined tests assumed by the stub.
-  __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
-  __ j(equal, if_false);
-  __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-  __ j(equal, if_true);
-  __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
-  __ j(equal, if_false);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Cmp(result_register(), Smi::FromInt(0));
-  __ j(equal, if_false);
-  Condition is_smi = masm_->CheckSmi(result_register());
-  __ j(is_smi, if_true);
-
-  // Call the ToBoolean stub for all other cases.
-  ToBooleanStub stub;
+  ToBooleanStub stub(result_register());
   __ push(result_register());
   __ CallStub(&stub);
-  __ testq(rax, rax);
-
+  __ testq(result_register(), result_register());
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -582,43 +595,53 @@
 }
 
 
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-    case Slot::LOCAL:
-      return Operand(rbp, SlotOffset(slot));
-    case Slot::CONTEXT: {
-      int context_chain_length =
-          scope()->ContextChainLength(slot->var()->scope());
-      __ LoadContext(scratch, context_chain_length);
-      return ContextOperand(scratch, slot->index());
-    }
-    case Slot::LOOKUP:
-      UNREACHABLE();
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  ASSERT(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
   }
-  UNREACHABLE();
-  return Operand(rax, 0);
+  return Operand(rbp, offset);
 }
 
 
-void FullCodeGenerator::Move(Register destination, Slot* source) {
-  MemOperand location = EmitSlotSearch(source, destination);
-  __ movq(destination, location);
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
 }
 
 
-void FullCodeGenerator::Move(Slot* dst,
-                             Register src,
-                             Register scratch1,
-                             Register scratch2) {
-  ASSERT(dst->type() != Slot::LOOKUP);  // Not yet implemented.
-  ASSERT(!scratch1.is(src) && !scratch2.is(src));
-  MemOperand location = EmitSlotSearch(dst, scratch1);
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  MemOperand location = VarOperand(var, dest);
+  __ movq(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+                               Register src,
+                               Register scratch0,
+                               Register scratch1) {
+  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  ASSERT(!scratch0.is(src));
+  ASSERT(!scratch0.is(scratch1));
+  ASSERT(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
   __ movq(location, src);
   // Emit the write barrier code if the location is in the heap.
-  if (dst->type() == Slot::CONTEXT) {
-    int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
-    __ RecordWrite(scratch1, offset, src, scratch2);
+  if (var->IsContextSlot()) {
+    int offset = Context::SlotOffset(var->index());
+    __ RecordWrite(scratch0, offset, src, scratch1);
   }
 }
 
@@ -632,8 +655,8 @@
   // preparation to avoid preparing with the same AST id twice.
   if (!context()->IsTest() || !info_->IsOptimizable()) return;
 
-  NearLabel skip;
-  if (should_normalize) __ jmp(&skip);
+  Label skip;
+  if (should_normalize) __ jmp(&skip, Label::kNear);
 
   ForwardBailoutStack* current = forward_bailout_stack_;
   while (current != NULL) {
@@ -649,118 +672,98 @@
 }
 
 
-void FullCodeGenerator::EmitDeclaration(Variable* variable,
+void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
                                         Variable::Mode mode,
-                                        FunctionLiteral* function) {
-  Comment cmnt(masm_, "[ Declaration");
-  ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
+                                        FunctionLiteral* function,
+                                        int* global_count) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      ++(*global_count);
+      break;
 
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-          __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ movq(Operand(rbp, SlotOffset(slot)), result_register());
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check if we have the correct context pointer.
-          __ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
-          __ cmpq(rbx, rsi);
-          __ Check(equal, "Unexpected declaration in current context.");
-        }
-        if (mode == Variable::CONST) {
-          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-          __ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
-          // No write barrier since the hole value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ movq(ContextOperand(rsi, slot->index()), result_register());
-          int offset = Context::SlotOffset(slot->index());
-          __ movq(rbx, rsi);
-          __ RecordWrite(rbx, offset, result_register(), rcx);
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ push(rsi);
-        __ Push(variable->name());
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR || mode == Variable::CONST);
-        PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
-        __ Push(Smi::FromInt(attr));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ PushRoot(Heap::kTheHoleValueRootIndex);
-        } else if (function != NULL) {
-          VisitForStackValue(function);
-        } else {
-          __ Push(Smi::FromInt(0));  // no initial value!
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (function != NULL) {
+        Comment cmnt(masm_, "[ Declaration");
+        VisitForAccumulatorValue(function);
+        __ movq(StackOperand(variable), result_register());
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+        __ movq(StackOperand(variable), kScratchRegister);
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We
-      // cannot visit the rewrite because it's shared and we risk
-      // recording duplicate AST IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+    case Variable::CONTEXT:
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+        __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+        __ Check(not_equal, "Declaration in with context.");
+        __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+        __ Check(not_equal, "Declaration in catch context.");
       }
       if (function != NULL) {
-        __ push(rax);
+        Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
-        __ pop(rdx);
-      } else {
-        __ movq(rdx, rax);
-        __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+        __ movq(ContextOperand(rsi, variable->index()), result_register());
+        int offset = Context::SlotOffset(variable->index());
+        __ movq(rbx, rsi);
+        __ RecordWrite(rbx, offset, result_register(), rcx);
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        Comment cmnt(masm_, "[ Declaration");
+        __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+        __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
+        // No write barrier since the hole value is in old space.
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       }
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ Move(rcx, prop->key()->AsLiteral()->handle());
+      break;
 
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Declaration");
+      __ push(rsi);
+      __ Push(variable->name());
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ Push(Smi::FromInt(attr));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (function != NULL) {
+        VisitForStackValue(function);
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
+        __ PushRoot(Heap::kTheHoleValueRootIndex);
+      } else {
+        __ Push(Smi::FromInt(0));  // Indicates no initial value.
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
     }
   }
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
-}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
   __ Push(pairs);
-  __ Push(Smi::FromInt(is_eval() ? 1 : 0));
-  __ Push(Smi::FromInt(strict_mode_flag()));
-  __ CallRuntime(Runtime::kDeclareGlobals, 4);
+  __ Push(Smi::FromInt(DeclareGlobalsFlags()));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -801,10 +804,10 @@
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
     JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
-      NearLabel slow_case;
+      Label slow_case;
       __ movq(rcx, rdx);
       __ or_(rcx, rax);
-      patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+      patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
 
       __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
@@ -816,7 +819,8 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    patch_site.EmitPatchInfo();
 
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
@@ -829,7 +833,7 @@
   __ bind(&next_test);
   __ Drop(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
-    __ jmp(nested_statement.break_target());
+    __ jmp(nested_statement.break_label());
   } else {
     __ jmp(default_clause->body_target());
   }
@@ -843,7 +847,7 @@
     VisitStatements(clause->statements());
   }
 
-  __ bind(nested_statement.break_target());
+  __ bind(nested_statement.break_label());
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
@@ -870,7 +874,7 @@
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(rax, &convert);
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
   __ j(above_equal, &done_convert);
   __ bind(&convert);
   __ push(rax);
@@ -901,9 +905,8 @@
   // check for an enum cache.  Leave the map in rbx for the subsequent
   // prototype load.
   __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
-  __ cmpq(rdx, empty_descriptor_array_value);
-  __ j(equal, &call_runtime);
+  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(rdx, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (rdx).  This is the case if the next enumeration
@@ -912,9 +915,9 @@
   __ JumpIfSmi(rdx, &call_runtime);
 
   // For all objects but the receiver, check that the cache is empty.
-  NearLabel check_prototype;
+  Label check_prototype;
   __ cmpq(rcx, rax);
-  __ j(equal, &check_prototype);
+  __ j(equal, &check_prototype, Label::kNear);
   __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmpq(rdx, empty_fixed_array_value);
   __ j(not_equal, &call_runtime);
@@ -927,9 +930,9 @@
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
-  NearLabel use_cache;
+  Label use_cache;
   __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  __ jmp(&use_cache);
+  __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
@@ -939,14 +942,14 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  NearLabel fixed_array;
+  Label fixed_array;
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kMetaMapRootIndex);
-  __ j(not_equal, &fixed_array);
+  __ j(not_equal, &fixed_array, Label::kNear);
 
   // We got a map in register rax. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(rax, rcx);
   __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
   __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -970,7 +973,7 @@
   __ bind(&loop);
   __ movq(rax, Operand(rsp, 0 * kPointerSize));  // Get the current index.
   __ cmpq(rax, Operand(rsp, 1 * kPointerSize));  // Compare to the array length.
-  __ j(above_equal, loop_statement.break_target());
+  __ j(above_equal, loop_statement.break_label());
 
   // Get the current entry of the array into register rbx.
   __ movq(rbx, Operand(rsp, 2 * kPointerSize));
@@ -986,10 +989,10 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  NearLabel update_each;
+  Label update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ j(equal, &update_each);
+  __ j(equal, &update_each, Label::kNear);
 
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
@@ -998,7 +1001,7 @@
   __ push(rbx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
   __ Cmp(rax, Smi::FromInt(0));
-  __ j(equal, loop_statement.continue_target());
+  __ j(equal, loop_statement.continue_label());
   __ movq(rbx, rax);
 
   // Update the 'each' property or variable from the possibly filtered
@@ -1015,14 +1018,14 @@
 
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
-  __ bind(loop_statement.continue_target());
+  __ bind(loop_statement.continue_label());
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
 
   EmitStackCheck(stmt);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
-  __ bind(loop_statement.break_target());
+  __ bind(loop_statement.break_label());
   __ addq(rsp, Immediate(5 * kPointerSize));
 
   // Exit and decrement the loop depth.
@@ -1061,14 +1064,13 @@
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var());
+  EmitVariableLoad(expr);
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
   Register context = rsi;
   Register temp = rdx;
 
@@ -1082,8 +1084,7 @@
         __ j(not_equal, slow);
       }
       // Load next context in chain.
-      __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+      __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
       // Walk the rest of the chain without clobbering rsi.
       context = temp;
     }
@@ -1097,7 +1098,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    NearLabel next, fast;
+    Label next, fast;
     if (!context.is(temp)) {
       __ movq(temp, context);
     }
@@ -1106,13 +1107,12 @@
     __ bind(&next);
     // Terminate at global context.
     __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
-    __ j(equal, &fast);
+    __ j(equal, &fast, Label::kNear);
     // Check that extension is NULL.
     __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
     __ j(not_equal, slow);
     // Load next context in chain.
-    __ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
-    __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+    __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
     __ jmp(&next);
     __ bind(&fast);
   }
@@ -1120,23 +1120,22 @@
   // All extension objects were empty and it is safe to use a global
   // load IC call.
   __ movq(rax, GlobalObjectOperand());
-  __ Move(rcx, slot->var()->name());
+  __ Move(rcx, var->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  EmitCallIC(ic, mode);
+  __ call(ic, mode);
 }
 
 
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Label* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  ASSERT(var->IsContextSlot());
   Register context = rsi;
   Register temp = rbx;
 
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
         // Check that extension is NULL.
@@ -1144,8 +1143,7 @@
                 Immediate(0));
         __ j(not_equal, slow);
       }
-      __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+      __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
       // Walk the rest of the chain without clobbering rsi.
       context = temp;
     }
@@ -1157,143 +1155,93 @@
   // This function is used only for loads, not stores, so it's safe to
   // return an rsi-based operand (the write barrier cannot be allowed to
   // destroy the rsi register).
-  return ContextOperand(context, slot->index());
+  return ContextOperand(context, var->index());
 }
 
 
-void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow,
-    Label* done) {
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+                                                  TypeofState typeof_state,
+                                                  Label* slow,
+                                                  Label* done) {
   // Generate fast-case code for variables that might be shadowed by
   // eval-introduced variables.  Eval is used a lot without
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      __ movq(rax,
-              ContextSlotOperandCheckExtensions(potential_slot, slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, done);
-        __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-      }
-      __ jmp(done);
-    } else if (rewrite != NULL) {
-      // Generate fast case for calls of an argument function.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          __ movq(rdx,
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                    slow));
-          __ Move(rax, key_literal->handle());
-          Handle<Code> ic =
-              isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
-          __ jmp(done);
-        }
-      }
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == Variable::CONST) {
+      __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+      __ j(not_equal, done);
+      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
     }
+    __ jmp(done);
   }
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  // Four cases: non-this global variables, lookup slots, all other
-  // types of slots, and parameters that rewrite to explicit property
-  // accesses on the arguments object.
-  Slot* slot = var->AsSlot();
-  Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
 
-  if (var->is_global() && !var->is_this()) {
-    Comment cmnt(masm_, "Global variable");
-    // Use inline caching. Variable name is passed in rcx and the global
-    // object on the stack.
-    __ Move(rcx, var->name());
-    __ movq(rax, GlobalObjectOperand());
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    context()->Plug(rax);
-
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    Label done, slow;
-
-    // Generate code for loading from variables potentially shadowed
-    // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    Comment cmnt(masm_, "Lookup slot");
-    __ push(rsi);  // Context.
-    __ Push(var->name());
-    __ CallRuntime(Runtime::kLoadContextSlot, 2);
-    __ bind(&done);
-
-    context()->Plug(rax);
-
-  } else if (slot != NULL) {
-    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
-                            ? "Context slot"
-                            : "Stack slot");
-    if (var->mode() == Variable::CONST) {
-      // Constants may be the hole value if they have not been initialized.
-      // Unhole them.
-      NearLabel done;
-      MemOperand slot_operand = EmitSlotSearch(slot, rax);
-      __ movq(rax, slot_operand);
-      __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &done);
-      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-      __ bind(&done);
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "Global variable");
+      // Use inline caching. Variable name is passed in rcx and the global
+      // object on the stack.
+      __ Move(rcx, var->name());
+      __ movq(rax, GlobalObjectOperand());
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(rax);
-    } else {
-      context()->Plug(slot);
+      break;
     }
 
-  } else {
-    Comment cmnt(masm_, "Rewritten parameter");
-    ASSERT_NOT_NULL(property);
-    // Rewritten parameter accesses are of the form "slot[literal]".
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        Label done;
+        GetVar(rax, var);
+        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+        __ j(not_equal, &done, Label::kNear);
+        if (var->mode() == Variable::LET) {
+          __ Push(var->name());
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+        } else {  // Variable::CONST
+          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+        }
+        __ bind(&done);
+        context()->Plug(rax);
+      }
+      break;
+    }
 
-    // Assert that the object is in a slot.
-    Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
-    ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
-
-    // Load the object.
-    MemOperand object_loc = EmitSlotSearch(object_slot, rax);
-    __ movq(rdx, object_loc);
-
-    // Assert that the key is a smi.
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    ASSERT(key_literal->handle()->IsSmi());
-
-    // Load the key.
-    __ Move(rax, key_literal->handle());
-
-    // Do a keyed property load.
-    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
-    context()->Plug(rax);
+    case Variable::LOOKUP: {
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      Comment cmnt(masm_, "Lookup slot");
+      __ push(rsi);  // Context.
+      __ Push(var->name());
+      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      __ bind(&done);
+      context()->Plug(rax);
+      break;
+    }
   }
 }
 
@@ -1312,7 +1260,7 @@
       FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
   __ movq(rbx, FieldOperand(rcx, literal_offset));
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in rax.
@@ -1405,7 +1353,7 @@
             Handle<Code> ic = is_strict_mode()
                 ? isolate()->builtins()->StoreIC_Initialize_Strict()
                 : isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1526,7 +1474,7 @@
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* property = expr->target()->AsProperty();
@@ -1552,29 +1500,13 @@
       break;
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          MemOperand slot_operand =
-              EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
-          __ push(slot_operand);
-          __ Move(rax, property->key()->AsLiteral()->handle());
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForAccumulatorValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ movq(rdx, Operand(rsp, 0));
         __ push(rax);
       } else {
-        if (property->is_arguments_access()) {
-          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-          MemOperand slot_operand =
-              EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
-          __ push(slot_operand);
-          __ Push(property->key()->AsLiteral()->handle());
-        } else {
-          VisitForStackValue(property->obj());
-          VisitForStackValue(property->key());
-        }
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
     }
@@ -1586,7 +1518,7 @@
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
-          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          EmitVariableLoad(expr->target()->AsVariableProxy());
           PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
@@ -1610,13 +1542,13 @@
     SetSourcePosition(expr->position() + 1);
     AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
     // Deoptimization point in case the binary operation may have side effects.
     PrepareForBailout(expr->binary_operation(), TOS_REG);
@@ -1650,18 +1582,18 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left,
@@ -1669,18 +1601,19 @@
   // Do combined smi check of the operands. Left operand is on the
   // stack (popped into rdx). Right operand is in rax but moved into
   // rcx to make the shifts easier.
-  NearLabel done, stub_call, smi_case;
+  Label done, stub_call, smi_case;
   __ pop(rdx);
   __ movq(rcx, rax);
   __ or_(rax, rdx);
   JumpPatchSite patch_site(masm_);
-  patch_site.EmitJumpIfSmi(rax, &smi_case);
+  patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ movq(rax, rcx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
-  __ jmp(&done);
+  BinaryOpStub stub(op, mode);
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
+  __ jmp(&done, Label::kNear);
 
   __ bind(&smi_case);
   switch (op) {
@@ -1721,11 +1654,14 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(rdx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
+  BinaryOpStub stub(op, mode);
+  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  patch_site.EmitPatchInfo();
   context()->Plug(rax);
 }
 
@@ -1739,7 +1675,7 @@
   }
 
   // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->AsProperty();
@@ -1765,30 +1701,20 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(rax);  // Preserve value.
-      if (prop->is_synthetic()) {
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        { AccumulatorValueContext for_object(this);
-          EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
-        }
-        __ movq(rdx, rax);
-        __ Move(rcx, prop->key()->AsLiteral()->handle());
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-        __ movq(rcx, rax);
-        __ pop(rdx);
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ movq(rcx, rax);
+      __ pop(rdx);
       __ pop(rax);  // Restore value.
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic);
       break;
     }
   }
@@ -1799,94 +1725,88 @@
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  // Left-hand sides that rewrite to explicit property accesses do not reach
-  // here.
-  ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->AsSlot() != NULL);
-
-  if (var->is_global()) {
-    ASSERT(!var->is_this());
-    // Assignment to a global variable.  Use inline caching for the
-    // assignment.  Right-hand-side value is passed in rax, variable name in
-    // rcx, and the global object on the stack.
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
     __ Move(rcx, var->name());
     __ movq(rdx, GlobalObjectOperand());
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
+    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
   } else if (op == Token::INIT_CONST) {
-    // Like var declarations, const declarations are hoisted to function
-    // scope.  However, unlike var initializers, const initializers are able
-    // to drill a hole to that function context, even from inside a 'with'
-    // context.  We thus bypass the normal static scope lookup.
-    Slot* slot = var->AsSlot();
-    Label skip;
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-        // No const parameters.
-        UNREACHABLE();
-        break;
-      case Slot::LOCAL:
-        __ movq(rdx, Operand(rbp, SlotOffset(slot)));
-        __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, &skip);
-        __ movq(Operand(rbp, SlotOffset(slot)), rax);
-        break;
-      case Slot::CONTEXT: {
-        __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
-        __ movq(rdx, ContextOperand(rcx, slot->index()));
-        __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, &skip);
-        __ movq(ContextOperand(rcx, slot->index()), rax);
-        int offset = Context::SlotOffset(slot->index());
-        __ movq(rdx, rax);  // Preserve the stored value in eax.
-        __ RecordWrite(rcx, offset, rdx, rbx);
-        break;
-      }
-      case Slot::LOOKUP:
-        __ push(rax);
-        __ push(rsi);
-        __ Push(var->name());
-        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-        break;
+    // Const initializers need a write barrier.
+    ASSERT(!var->IsParameter());  // No const parameters.
+    if (var->IsStackLocal()) {
+      Label skip;
+      __ movq(rdx, StackOperand(var));
+      __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+      __ j(not_equal, &skip);
+      __ movq(StackOperand(var), rax);
+      __ bind(&skip);
+    } else {
+      ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+      // Like var declarations, const declarations are hoisted to function
+      // scope.  However, unlike var initializers, const initializers are
+      // able to drill a hole to that function context, even from inside a
+      // 'with' context.  We thus bypass the normal static scope lookup for
+      // var->IsContextSlot().
+      __ push(rax);
+      __ push(rsi);
+      __ Push(var->name());
+      __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
-    __ bind(&skip);
+
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    if (var->IsLookupSlot()) {
+      __ push(rax);  // Value.
+      __ push(rsi);  // Context.
+      __ Push(var->name());
+      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
+    } else {
+      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      Label assign;
+      MemOperand location = VarOperand(var, rcx);
+      __ movq(rdx, location);
+      __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+      __ j(not_equal, &assign, Label::kNear);
+      __ Push(var->name());
+      __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      __ bind(&assign);
+      __ movq(location, rax);
+      if (var->IsContextSlot()) {
+        __ movq(rdx, rax);
+        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+      }
+    }
 
   } else if (var->mode() != Variable::CONST) {
-    // Perform the assignment for non-const variables.  Const assignments
-    // are simply skipped.
-    Slot* slot = var->AsSlot();
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        // Perform the assignment.
-        __ movq(Operand(rbp, SlotOffset(slot)), rax);
-        break;
-
-      case Slot::CONTEXT: {
-        MemOperand target = EmitSlotSearch(slot, rcx);
-        // Perform the assignment and issue the write barrier.
-        __ movq(target, rax);
-        // The value of the assignment is in rax.  RecordWrite clobbers its
-        // register arguments.
-        __ movq(rdx, rax);
-        int offset = Context::SlotOffset(slot->index());
-        __ RecordWrite(rcx, offset, rdx, rbx);
-        break;
+    // Assignment to var or initializing assignment to let.
+    if (var->IsStackAllocated() || var->IsContextSlot()) {
+      MemOperand location = VarOperand(var, rcx);
+      if (FLAG_debug_code && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ movq(rdx, location);
+        __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+        __ Check(equal, "Let binding re-initialization.");
       }
-
-      case Slot::LOOKUP:
-        // Call the runtime for the assignment.
-        __ push(rax);  // Value.
-        __ push(rsi);  // Context.
-        __ Push(var->name());
-        __ Push(Smi::FromInt(strict_mode_flag()));
-        __ CallRuntime(Runtime::kStoreContextSlot, 4);
-        break;
+      // Perform the assignment.
+      __ movq(location, rax);
+      if (var->IsContextSlot()) {
+        __ movq(rdx, rax);
+        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+      }
+    } else {
+      ASSERT(var->IsLookupSlot());
+      __ push(rax);  // Value.
+      __ push(rsi);  // Context.
+      __ Push(var->name());
+      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
+  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -1917,7 +1837,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1957,7 +1877,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2006,10 +1926,9 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  EmitCallIC(ic, mode);
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+  __ call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2018,8 +1937,7 @@
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2040,11 +1958,10 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2052,7 +1969,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2063,8 +1980,7 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2084,10 +2000,15 @@
   }
 
   // Push the receiver of the enclosing function and do runtime call.
-  __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+  __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
 
-  // Push the strict mode flag.
-  __ Push(Smi::FromInt(strict_mode_flag()));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ Push(Smi::FromInt(strict_mode));
 
   __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
                  ? Runtime::kResolvePossiblyDirectEvalNoLookup
@@ -2103,18 +2024,18 @@
 #endif
 
   Comment cmnt(masm_, "[ Call");
-  Expression* fun = expr->expression();
-  Variable* var = fun->AsVariableProxy()->AsVariable();
+  Expression* callee = expr->expression();
+  VariableProxy* proxy = callee->AsVariableProxy();
+  Property* property = callee->AsProperty();
 
-  if (var != NULL && var->is_possibly_eval()) {
+  if (proxy != NULL && proxy->var()->is_possibly_eval()) {
     // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
+    // resolve the function we need to call and the receiver of the call.
+    // Then we call the resolved function using the given arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
     { PreservePositionScope pos_scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(callee);
       __ PushRoot(Heap::kUndefinedValueRootIndex);  // Reserved receiver slot.
 
       // Push the arguments.
@@ -2123,15 +2044,14 @@
       }
 
       // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
+      // variables we attempt to load the global eval function directly in
+      // generated code. If we succeed, there is no need to perform a
       // context lookup in the runtime system.
       Label done;
-      if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
         Label slow;
-        EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
-                                          NOT_INSIDE_TYPEOF,
-                                          &slow);
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
         // Push the function and resolve eval.
         __ push(rax);
         EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
@@ -2139,13 +2059,11 @@
         __ bind(&slow);
       }
 
-      // Push copy of the function (found below the arguments) and
-      // resolve eval.
+      // Push a copy of the function (found below the arguments) and resolve
+      // eval.
       __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
       EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      if (done.is_linked()) {
-        __ bind(&done);
-      }
+      __ bind(&done);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -2154,111 +2072,74 @@
     }
     // Record source position for debugger.
     SetSourcePosition(expr->position());
-    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, rax);
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // Call to a global variable.
-    // Push global object as receiver for the call IC lookup.
+  } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
+    // Call to a global variable.  Push global object as receiver for the
+    // call IC lookup.
     __ push(GlobalObjectOperand());
-    EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
+    EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
     { PreservePositionScope scope(masm()->positions_recorder());
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                      NOT_INSIDE_TYPEOF,
-                                      &slow,
-                                      &done);
-
-      __ bind(&slow);
+      // Generate code for loading from variables potentially shadowed by
+      // eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
     }
-    // Call the runtime to find the function to call (returned in rax)
-    // and the object holding it (returned in rdx).
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in rax) and
+    // the object holding it (returned in rdx).
     __ push(context_register());
-    __ Push(var->name());
+    __ Push(proxy->name());
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(rax);  // Function.
     __ push(rdx);  // Receiver.
 
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
+    // If fast case code has been generated, emit code to push the function
+    // and receiver and have the slow path jump around this code.
     if (done.is_linked()) {
-      NearLabel call;
-      __ jmp(&call);
+      Label call;
+      __ jmp(&call, Label::kNear);
       __ bind(&done);
       // Push function.
       __ push(rax);
-      // Push global receiver.
-        __ movq(rbx, GlobalObjectOperand());
-        __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-        __ bind(&call);
+      // The receiver is implicitly the global receiver. Indicate this by
+      // passing the hole to the call function stub.
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+      __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
-  } else if (fun->AsProperty() != NULL) {
-    // Call to an object property.
-    Property* prop = fun->AsProperty();
-    Literal* key = prop->key()->AsLiteral();
-    if (key != NULL && key->handle()->IsSymbol()) {
-      // Call to a named property, use call IC.
-      { PreservePositionScope scope(masm()->positions_recorder());
-        VisitForStackValue(prop->obj());
-      }
-      EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+    // The receiver is either the global receiver or an object found by
+    // LoadContextSlot. That object could be the hole if the receiver is
+    // implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+  } else if (property != NULL) {
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(property->obj());
+    }
+    if (property->key()->IsPropertyName()) {
+      EmitCallWithIC(expr,
+                     property->key()->AsLiteral()->handle(),
+                     RelocInfo::CODE_TARGET);
     } else {
-      // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed EmitCallIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, rdx);
-        __ movq(rdx, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ Move(rax, prop->key()->AsLiteral()->handle());
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
-        // Push result (function).
-        __ push(rax);
-        // Push Global receiver.
-        __ movq(rcx, GlobalObjectOperand());
-        __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
-        EmitCallWithStub(expr);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
-      }
+      EmitKeyedCallWithIC(expr, property->key());
     }
   } else {
+    // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(fun);
+      VisitForStackValue(callee);
     }
     // Load global receiver object.
     __ movq(rbx, GlobalObjectOperand());
     __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2362,9 +2243,9 @@
            Immediate(1 << Map::kIsUndetectable));
   __ j(not_zero, if_false);
   __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, if_false);
-  __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+  __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
@@ -2385,7 +2266,7 @@
                          &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
@@ -2447,7 +2328,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(rbx, rbx);
   __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
   // rbx: descriptor array
   // rcx: length of descriptor array
@@ -2623,7 +2504,7 @@
   // parameter count in rax.
   VisitForAccumulatorValue(args->at(0));
   __ movq(rdx, rax);
-  __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+  __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
   context()->Plug(rax);
@@ -2633,15 +2514,15 @@
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
-  NearLabel exit;
+  Label exit;
   // Get the number of formal parameters.
-  __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+  __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
 
   // Check if the calling frame is an arguments adaptor frame.
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit);
+  __ j(not_equal, &exit, Label::kNear);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame.
@@ -2664,16 +2545,18 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);  // Map is now in rax.
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+  // Map is now in rax.
   __ j(below, &null);
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-  __ j(equal, &function);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ j(above_equal, &function);
 
   // Check if the constructor in the map is a function.
   __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
@@ -2716,13 +2599,11 @@
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
   ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
-#endif
   // Finally, we're expected to leave a value on the top of the stack.
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
   context()->Plug(rax);
@@ -2762,7 +2643,7 @@
   __ movd(xmm1, rcx);
   __ movd(xmm0, rax);
   __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
+  __ xorps(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
 
@@ -3047,17 +2928,17 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in rdi. Move it in there.
-  if (!result_register().is(rdi)) __ movq(rdi, result_register());
+  // InvokeFunction requires the function in rdi. Move it in there.
+  __ movq(rdi, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(rdi, count, CALL_FUNCTION);
+  __ InvokeFunction(rdi, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   context()->Plug(rax);
 }
@@ -3178,9 +3059,9 @@
   __ movq(cache,
           FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
 
-  NearLabel done, not_found;
+  Label done, not_found;
   // tmp now holds finger offset as a smi.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
   SmiIndex index =
       __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
@@ -3188,12 +3069,12 @@
                             index.reg,
                             index.scale,
                             FixedArray::kHeaderSize));
-  __ j(not_equal, &not_found);
+  __ j(not_equal, &not_found, Label::kNear);
   __ movq(rax, FieldOperand(cache,
                             index.reg,
                             index.scale,
                             FixedArray::kHeaderSize + kPointerSize));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&not_found);
   // Call runtime to perform the lookup.
@@ -3217,25 +3098,25 @@
   VisitForAccumulatorValue(args->at(1));
   __ pop(left);
 
-  NearLabel done, fail, ok;
+  Label done, fail, ok;
   __ cmpq(left, right);
-  __ j(equal, &ok);
+  __ j(equal, &ok, Label::kNear);
   // Fail if either is a non-HeapObject.
   Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
-  __ j(either_smi, &fail);
-  __ j(zero, &fail);
+  __ j(either_smi, &fail, Label::kNear);
+  __ j(zero, &fail, Label::kNear);
   __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
           Immediate(JS_REGEXP_TYPE));
-  __ j(not_equal, &fail);
+  __ j(not_equal, &fail, Label::kNear);
   __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  __ j(not_equal, &fail);
+  __ j(not_equal, &fail, Label::kNear);
   __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
   __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  __ j(equal, &ok);
+  __ j(equal, &ok, Label::kNear);
   __ bind(&fail);
   __ Move(rax, isolate()->factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&ok);
   __ Move(rax, isolate()->factory()->true_value());
   __ bind(&done);
@@ -3320,9 +3201,7 @@
   __ j(not_equal, &bailout);
 
   // Check that the array has fast elements.
-  __ testb(FieldOperand(scratch, Map::kBitField2Offset),
-           Immediate(1 << Map::kHasFastElements));
-  __ j(zero, &bailout);
+  __ CheckFastElements(scratch, &bailout);
 
   // Array has fast elements, so its length must be a smi.
   // If the array has length zero, return the empty string.
@@ -3594,10 +3473,10 @@
   if (expr->is_jsruntime()) {
     // Call the JS runtime function using a call IC.
     __ Move(rcx, expr->name());
-    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
+    __ call(ic, mode, expr->id());
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3611,36 +3490,31 @@
   switch (expr->op()) {
     case Token::DELETE: {
       Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
-      Property* prop = expr->expression()->AsProperty();
-      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
 
-      if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ Push(Smi::FromInt(strict_mode_flag()));
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(rax);
-        }
-      } else if (var != NULL) {
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ Push(Smi::FromInt(strict_mode_flag()));
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(rax);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
-        // but "delete this" is.
+        // but "delete this" is allowed.
         ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
-        if (var->is_global()) {
+        if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ Push(var->name());
           __ Push(Smi::FromInt(kNonStrictMode));
           __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
           context()->Plug(rax);
-        } else if (var->AsSlot() != NULL &&
-                   var->AsSlot()->type() != Slot::LOOKUP) {
-          // Result of deleting non-global, non-dynamic variables is false.
-          // The subexpression does not have side effects.
-          context()->Plug(false);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global variables is false.  'this' is
+          // not really a variable, though we implement it as one.  The
+          // subexpression does not have side effects.
+          context()->Plug(var->is_this());
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
@@ -3700,8 +3574,7 @@
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
       VisitForAccumulatorValue(expr->expression());
       Label no_conversion;
-      Condition is_smi = masm_->CheckSmi(result_register());
-      __ j(is_smi, &no_conversion);
+      __ JumpIfSmi(result_register(), &no_conversion);
       ToNumberStub convert_stub;
       __ CallStub(&convert_stub);
       __ bind(&no_conversion);
@@ -3709,46 +3582,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register rax.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(rax);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register rax.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_case) {
-        Label call_stub;
-        __ JumpIfNotSmi(rax, &call_stub);
-        __ SmiNot(rax, rax);
-        __ jmp(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      UnaryOpFlags flags = inline_smi_case
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(rax);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3756,6 +3596,23 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register rax.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  context()->Plug(rax);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3768,7 +3625,7 @@
   }
 
   // Expression can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  // slot.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
   LhsKind assign_type = VARIABLE;
   Property* prop = expr->expression()->AsProperty();
@@ -3783,7 +3640,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3794,16 +3651,8 @@
       __ push(rax);  // Copy of receiver, needed for later store.
       EmitNamedPropertyLoad(prop);
     } else {
-      if (prop->is_arguments_access()) {
-        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
-        MemOperand slot_operand =
-            EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
-        __ push(slot_operand);
-        __ Move(rax, prop->key()->AsLiteral()->handle());
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ movq(rdx, Operand(rsp, 0));  // Leave receiver on stack
       __ push(rax);  // Copy of key, needed for later store.
       EmitKeyedPropertyLoad(prop);
@@ -3819,10 +3668,8 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  NearLabel no_conversion;
-  Condition is_smi;
-  is_smi = masm_->CheckSmi(rax);
-  __ j(is_smi, &no_conversion);
+  Label no_conversion;
+  __ JumpIfSmi(rax, &no_conversion, Label::kNear);
   ToNumberStub convert_stub;
   __ CallStub(&convert_stub);
   __ bind(&no_conversion);
@@ -3848,7 +3695,7 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call, done;
+  Label done, stub_call;
   JumpPatchSite patch_site(masm_);
 
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3857,10 +3704,10 @@
     } else {
       __ SmiSubConstant(rax, rax, Smi::FromInt(1));
     }
-    __ j(overflow, &stub_call);
+    __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    patch_site.EmitJumpIfSmi(rax, &done);
+    patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
 
     __ bind(&stub_call);
     // Call stub. Undo operation first.
@@ -3875,14 +3722,15 @@
   SetSourcePosition(expr->position());
 
   // Call stub for +1/-1.
-  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+  BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
   if (expr->op() == Token::INC) {
     __ Move(rdx, Smi::FromInt(1));
   } else {
     __ movq(rdx, rax);
     __ Move(rax, Smi::FromInt(1));
   }
-  EmitCallIC(stub.GetCode(), &patch_site);
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  patch_site.EmitPatchInfo();
   __ bind(&done);
 
   // Store the value returned in rax.
@@ -3915,7 +3763,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3932,7 +3780,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3952,25 +3800,22 @@
   ASSERT(!context()->IsEffect());
   ASSERT(!context()->IsTest());
 
-  if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
     __ Move(rcx, proxy->name());
     __ movq(rax, GlobalObjectOperand());
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    __ call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(rax);
-  } else if (proxy != NULL &&
-             proxy->var()->AsSlot() != NULL &&
-             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->AsSlot();
-    EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ push(rsi);
@@ -3982,30 +3827,18 @@
     context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    context()->HandleExpression(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
-                                          Expression* left,
-                                          Expression* right,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
-  // Check for the pattern: typeof <expression> == <string literal>.
-  Literal* right_literal = right->AsLiteral();
-  if (right_literal == NULL) return false;
-  Handle<Object> right_literal_value = right_literal->handle();
-  if (!right_literal_value->IsString()) return false;
-  UnaryOperation* left_unary = left->AsUnaryOperation();
-  if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
-  Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(left_unary->expression());
+    VisitForTypeofValue(expr);
   }
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
@@ -4027,6 +3860,10 @@
     __ j(equal, if_true);
     __ CompareRoot(rax, Heap::kFalseValueRootIndex);
     Split(equal, if_true, if_false, fall_through);
+  } else if (FLAG_harmony_typeof &&
+             check->Equals(isolate()->heap()->null_symbol())) {
+    __ CompareRoot(rax, Heap::kNullValueRootIndex);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
     __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
     __ j(equal, if_true);
@@ -4038,16 +3875,19 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    __ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx);
+    STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
     Split(above_equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    __ CompareRoot(rax, Heap::kNullValueRootIndex);
-    __ j(equal, if_true);
-    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdx);
+    if (!FLAG_harmony_typeof) {
+      __ CompareRoot(rax, Heap::kNullValueRootIndex);
+      __ j(equal, if_true);
+    }
+    __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
     __ j(below, if_false);
-    __ CmpInstanceType(rdx, FIRST_FUNCTION_CLASS_TYPE);
-    __ j(above_equal, if_false);
+    __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ j(above, if_false);
     // Check for undetectable objects => false.
     __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
@@ -4055,8 +3895,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
+}
 
-  return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  Split(equal, if_true, if_false, fall_through);
 }
 
 
@@ -4075,14 +3925,12 @@
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
-  Token::Value op = expr->op();
-  Expression* left = expr->left();
-  Expression* right = expr->right();
-  if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
     context()->Plug(if_true, if_false);
     return;
   }
 
+  Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
@@ -4107,11 +3955,8 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = no_condition;
-      bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
-          strict = true;
-          // Fall through.
         case Token::EQ:
           cc = equal;
           __ pop(rdx);
@@ -4145,10 +3990,10 @@
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
-        NearLabel slow_case;
+        Label slow_case;
         __ movq(rcx, rdx);
         __ or_(rcx, rax);
-        patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+        patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
         __ cmpq(rdx, rax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
@@ -4157,7 +4002,8 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      patch_site.EmitPatchInfo();
 
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ testq(rax, rax);
@@ -4189,8 +4035,7 @@
     __ j(equal, if_true);
     __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
     __ j(equal, if_true);
-    Condition is_smi = masm_->CheckSmi(rax);
-    __ j(is_smi, if_false);
+    __ JumpIfSmi(rax, if_false);
     // It can be an undetectable object.
     __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
     __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -4217,79 +4062,6 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  ASSERT(mode == RelocInfo::CODE_TARGET ||
-         mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Counters* counters = isolate()->counters();
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(counters->named_load_full(), 1);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(counters->keyed_load_full(), 1);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(counters->named_store_full(), 1);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(counters->keyed_store_full(), 1);
-    default:
-      break;
-  }
-
-  __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
-  Counters* counters = isolate()->counters();
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-      __ IncrementCounter(counters->named_load_full(), 1);
-      break;
-    case Code::KEYED_LOAD_IC:
-      __ IncrementCounter(counters->keyed_load_full(), 1);
-      break;
-    case Code::STORE_IC:
-      __ IncrementCounter(counters->named_store_full(), 1);
-      break;
-    case Code::KEYED_STORE_IC:
-      __ IncrementCounter(counters->keyed_store_full(), 1);
-    default:
-      break;
-  }
-
-  __ call(ic, RelocInfo::CODE_TARGET);
-  if (patch_site != NULL && patch_site->is_bound()) {
-    patch_site->EmitPatchInfo();
-  } else {
-    __ nop();  // Signals no inlined code.
-  }
-}
-
-
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT(IsAligned(frame_offset, kPointerSize));
   __ movq(Operand(rbp, frame_offset), value);
@@ -4301,6 +4073,26 @@
 }
 
 
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_global_scope()) {
+    // Contexts nested in the global context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ Push(Smi::FromInt(0));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+  } else {
+    ASSERT(declaration_scope->is_function_scope());
+    __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+}
+
+
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
@@ -4309,11 +4101,11 @@
   ASSERT(!result_register().is(rdx));
   ASSERT(!result_register().is(rcx));
   // Cook return address on top of stack (smi encoded Code* delta)
-  __ movq(rdx, Operand(rsp, 0));
+  __ pop(rdx);
   __ Move(rcx, masm_->CodeObject());
   __ subq(rdx, rcx);
   __ Integer32ToSmi(rdx, rdx);
-  __ movq(Operand(rsp, 0), rdx);
+  __ push(rdx);
   // Store result register while executing finally block.
   __ push(result_register());
 }
@@ -4322,21 +4114,45 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(rdx));
   ASSERT(!result_register().is(rcx));
-  // Restore result register from stack.
   __ pop(result_register());
   // Uncook return address.
-  __ movq(rdx, Operand(rsp, 0));
+  __ pop(rdx);
   __ SmiToInteger32(rdx, rdx);
   __ Move(rcx, masm_->CodeObject());
   __ addq(rdx, rcx);
-  __ movq(Operand(rsp, 0), rdx);
-  // And return.
-  __ ret(0);
+  __ jmp(rdx);
 }
 
 
 #undef __
 
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth,
+    int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
+    __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+  }
+  __ PopTryHandler();
+  __ call(finally_entry_);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
+}
+
+
+#undef __
 
 } }  // namespace v8::internal
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 5ca56ac..9d55594 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -76,11 +76,11 @@
   // Check that the receiver is a valid JS object.
   __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
-  __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
   __ j(below, miss);
 
   // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
 
   GenerateGlobalInstanceTypeCheck(masm, r0, miss);
 
@@ -97,58 +97,6 @@
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register r0,
-                                           Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
-  __ decl(r0);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
-    __ shrl(r1, Immediate(String::kHashShift));
-    if (i > 0) {
-      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r1, r0);
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
-
-    // Check if the key is identical to the name.
-    __ cmpq(name, Operand(elements, r1, times_pointer_size,
-                          kElementsStartOffset - kHeapObjectTag));
-    if (i != kProbes - 1) {
-      __ j(equal, done);
-    } else {
-      __ j(not_equal, miss);
-    }
-  }
-}
-
 
 // Helper function used to load a property from a dictionary backing storage.
 // This function may return false negatives, so miss_label
@@ -179,13 +127,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -197,7 +145,7 @@
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ Test(Operand(elements, r1, times_pointer_size,
                   kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(PropertyDetails::TypeField::mask()));
+          Smi::FromInt(PropertyDetails::TypeField::kMask));
   __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
@@ -237,13 +185,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch0,
-                                 scratch1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch0,
+                                                     scratch1);
 
   // If probing finds an entry in the dictionary, scratch0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -253,9 +201,9 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask
-      = (PropertyDetails::TypeField::mask() |
-         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
   __ Test(Operand(elements,
                   scratch1,
                   times_pointer_size,
@@ -277,115 +225,6 @@
 }
 
 
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
-                                         Label* miss,
-                                         Register elements,
-                                         Register key,
-                                         Register r0,
-                                         Register r1,
-                                         Register r2,
-                                         Register result) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // Scratch registers:
-  //
-  // r0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // r1 - used to hold the capacity mask of the dictionary
-  //
-  // r2 - used for the index into the dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-  //          Allowed to be the same as 'key' or 'result'.
-  //          Unchanged on bailout so 'key' or 'result' can be used
-  //          in further computation.
-
-  Label done;
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  __ movl(r1, r0);
-  __ notl(r0);
-  __ shll(r1, Immediate(15));
-  __ addl(r0, r1);
-  // hash = hash ^ (hash >> 12);
-  __ movl(r1, r0);
-  __ shrl(r1, Immediate(12));
-  __ xorl(r0, r1);
-  // hash = hash + (hash << 2);
-  __ leal(r0, Operand(r0, r0, times_4, 0));
-  // hash = hash ^ (hash >> 4);
-  __ movl(r1, r0);
-  __ shrl(r1, Immediate(4));
-  __ xorl(r0, r1);
-  // hash = hash * 2057;
-  __ imull(r0, r0, Immediate(2057));
-  // hash = hash ^ (hash >> 16);
-  __ movl(r1, r0);
-  __ shrl(r1, Immediate(16));
-  __ xorl(r0, r1);
-
-  // Compute capacity mask.
-  __ SmiToInteger32(r1,
-                    FieldOperand(elements, NumberDictionary::kCapacityOffset));
-  __ decl(r1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
-    // Use r2 for index calculations and keep the hash intact in r0.
-    __ movq(r2, r0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r2, r1);
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(NumberDictionary::kEntrySize == 3);
-    __ lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
-
-    // Check if the key matches.
-    __ cmpq(key, FieldOperand(elements,
-                              r2,
-                              times_pointer_size,
-                              NumberDictionary::kElementsStartOffset));
-    if (i != (kProbes - 1)) {
-      __ j(equal, &done);
-    } else {
-      __ j(not_equal, miss);
-    }
-  }
-
-  __ bind(&done);
-  // Check that the value is a normal propety.
-  const int kDetailsOffset =
-      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ASSERT_EQ(NORMAL, 0);
-  __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
-          Smi::FromInt(PropertyDetails::TypeField::mask()));
-  __ j(not_zero, miss);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset =
-      NumberDictionary::kElementsStartOffset + kPointerSize;
-  __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
@@ -539,7 +378,7 @@
   __ j(zero, index_string);  // The value in hash is used at jump target.
 
   // Is the string a symbol?
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
            Immediate(kIsSymbolMask));
   __ j(zero, not_symbol);
@@ -565,11 +404,8 @@
   GenerateKeyedLoadReceiverCheck(
       masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
 
-  // Check the "has fast elements" bit in the receiver's map which is
-  // now in rcx.
-  __ testb(FieldOperand(rcx, Map::kBitField2Offset),
-           Immediate(1 << Map::kHasFastElements));
-  __ j(zero, &check_number_dictionary);
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(rcx, &check_number_dictionary);
 
   GenerateFastArrayLoad(masm,
                         rdx,
@@ -595,7 +431,7 @@
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
                  Heap::kHashTableMapRootIndex);
   __ j(not_equal, &slow);
-  GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
+  __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
   __ ret(0);
 
   __ bind(&slow);
@@ -715,7 +551,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -758,7 +594,7 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -787,9 +623,13 @@
 
   __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
   __ j(equal, &array);
-  // Check that the object is some kind of JS object.
-  __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
+  // Check that the object is some kind of JSObject.
+  __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
   __ j(below, &slow);
+  __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+  __ j(equal, &slow);
+  __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
@@ -852,10 +692,10 @@
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
-  NearLabel non_smi_value;
+  Label non_smi_value;
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
-  __ JumpIfNotSmi(rax, &non_smi_value);
+  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
   __ bind(&non_smi_value);
   // Slow case that needs to retain rcx for use by RecordWrite.
@@ -870,7 +710,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rdx                      : receiver
@@ -879,9 +720,8 @@
 
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
-                                         NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -948,7 +788,8 @@
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -980,7 +821,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1037,12 +881,21 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(rdi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1055,8 +908,8 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -1072,11 +925,13 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1087,7 +942,7 @@
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
@@ -1139,7 +994,7 @@
   __ j(not_equal, &slow_load);
   __ SmiToInteger32(rbx, rcx);
   // ebx: untagged index
-  GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
+  __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
   __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
   __ jmp(&do_call);
 
@@ -1178,7 +1033,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1231,7 +1089,172 @@
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+                                             Register object,
+                                             Register key,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Register scratch3,
+                                             Label* unmapped_case,
+                                             Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+  __ j(below, slow_case);
+
+  // Check that the key is a positive smi.
+  Condition check = masm->CheckNonNegativeSmi(key);
+  __ j(NegateCondition(check), slow_case);
+
+  // Load the elements into scratch1 and check its map. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+  __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments.
+  __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+  __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+  __ cmpq(key, scratch2);
+  __ j(greater_equal, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+  __ SmiToInteger64(scratch3, key);
+  __ movq(scratch2, FieldOperand(scratch1,
+                                 scratch3,
+                                 times_pointer_size,
+                                 kHeaderSize));
+  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  __ j(equal, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+  __ SmiToInteger64(scratch3, scratch2);
+  return FieldOperand(scratch1,
+                      scratch3,
+                      times_pointer_size,
+                      Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                               Register key,
+                                               Register parameter_map,
+                                               Register scratch,
+                                               Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+  __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmpq(key, scratch);
+  __ j(greater_equal, slow_case);
+  __ SmiToInteger64(scratch, key);
+  return FieldOperand(backing_store,
+                      scratch,
+                      times_pointer_size,
+                      FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Label slow, notin;
+  Operand mapped_location =
+      GenerateMappedArgumentsLookup(
+          masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
+  __ movq(rax, mapped_location);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in rbx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
+  __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &slow);
+  __ movq(rax, unmapped_location);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Label slow, notin;
+  Operand mapped_location = GenerateMappedArgumentsLookup(
+      masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
+  __ movq(mapped_location, rax);
+  __ lea(r9, mapped_location);
+  __ movq(r8, rax);
+  __ RecordWrite(rbx, r9, r8);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in rbx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
+  __ movq(unmapped_location, rax);
+  __ lea(r9, unmapped_location);
+  __ movq(r8, rax);
+  __ RecordWrite(rbx, r9, r8);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+                                             int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+  Label slow, notin;
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  Operand mapped_location = GenerateMappedArgumentsLookup(
+      masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
+  __ movq(rdi, mapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow);
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in rbx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
+  __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &slow);
+  __ movq(rdi, unmapped_location);
+  GenerateFunctionTailCall(masm, argc, &slow);
+  __ bind(&slow);
+  GenerateMiss(masm, argc);
 }
 
 
@@ -1243,9 +1266,7 @@
   // -----------------------------------
 
   // Probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
                                                   rdx);
 
@@ -1297,131 +1318,7 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 8 bytes of the 10-byte
-  // immediate move instruction, so we add 2 to get the
-  // offset to the last 8 bytes.
-  Address map_address = test_instruction_address + delta + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the 32-bit displacement of a seven byte
-  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
-  // so we add 3 to get the offset of the displacement.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // TODO(<bug#>): implement this.
-  return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test rax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 8 bytes of
-  // the 10-byte immediate move instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a 7 byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a 7 byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 3;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Arguments are address of start of call sequence that called
-  // the IC,
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map compare
-  // instructions (starting with the 64-bit immediate mov of the map
-  // address). This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 8 bytes
-  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 2;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -1437,8 +1334,10 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref
-      = ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1470,10 +1369,8 @@
   // -----------------------------------
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
-                                         NOT_IN_LOOP,
-                                         MONOMORPHIC,
-                                         strict_mode);
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
                                                   no_reg);
 
@@ -1503,11 +1400,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
@@ -1627,7 +1519,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
@@ -1642,8 +1534,30 @@
   __ push(rbx);  // return address
 
   // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+
+  __ pop(rbx);
+  __ push(rdx);  // receiver
+  __ push(rcx);  // key
+  __ push(rax);  // value
+  __ push(rbx);  // return address
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref = force_generic
+    ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                        masm->isolate())
+    : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 822295e..b82dc54 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -43,35 +43,24 @@
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
-                     int deoptimization_index)
+                     Safepoint::DeoptMode mode)
       : codegen_(codegen),
         pointers_(pointers),
-        deoptimization_index_(deoptimization_index) { }
+        deopt_mode_(mode) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) {
-    ASSERT(call_size >= 0);
-    // Ensure that we have enough space after the previous safepoint position
-    // for the jump generated there.
-    int call_end = codegen_->masm()->pc_offset() + call_size;
-    int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
-    if (call_end < prev_jump_end) {
-      int padding_size = prev_jump_end - call_end;
-      STATIC_ASSERT(kMinSafepointSize <= 9);  // One multibyte nop is enough.
-      codegen_->masm()->nop(padding_size);
-    }
+  virtual void BeforeCall(int call_size) const {
+    codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
   }
 
-  virtual void AfterCall() {
-    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
  private:
-  static const int kMinSafepointSize =
-      MacroAssembler::kShortCallInstructionLength;
   LCodeGen* codegen_;
   LPointerMap* pointers_;
-  int deoptimization_index_;
+  Safepoint::DeoptMode deopt_mode_;
 };
 
 
@@ -91,16 +80,16 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
 }
 
 
 void LCodeGen::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LCodeGen in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -140,13 +129,28 @@
   }
 #endif
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). rcx is zero for method calls and non-zero for function
+  // calls.
+  if (info_->is_strict_mode() || info_->is_native()) {
+    Label ok;
+    __ testq(rcx, rcx);
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+    __ bind(&ok);
+  }
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
   __ push(rdi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ Set(rax, slots);
@@ -182,9 +186,9 @@
       FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
-      __ CallRuntime(Runtime::kNewContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
-    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both rax and rsi.  It replaces the context
     // passed to us.  It's saved in the stack and kept live in rsi.
     __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
@@ -192,14 +196,14 @@
     // Copy any necessary parameters into the context.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->AsSlot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
         // Load parameter from stack.
         __ movq(rax, Operand(rbp, parameter_offset));
         // Store it in the context.
-        int context_offset = Context::SlotOffset(slot->index());
+        int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
         // Update the write barrier. This clobbers all involved
         // registers, so we have use a third register to avoid
@@ -236,19 +240,11 @@
       instr->CompileToNative(this);
     }
   }
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   return !is_aborted();
 }
 
 
-LInstruction* LCodeGen::GetNextInstruction() {
-  if (current_instruction_ < instructions_->length() - 1) {
-    return instructions_->at(current_instruction_ + 1);
-  } else {
-    return NULL;
-  }
-}
-
-
 bool LCodeGen::GenerateJumpTable() {
   for (int i = 0; i < jump_table_.length(); i++) {
     __ bind(&jump_table_[i].label);
@@ -260,11 +256,13 @@
 
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
-  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
-    LDeferredCode* code = deferred_[i];
-    __ bind(code->entry());
-    code->Generate();
-    __ jmp(code->exit());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      code->Generate();
+      __ jmp(code->exit());
+    }
   }
 
   // Deferred code is the last part of the instruction sequence. Mark
@@ -276,21 +274,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  // Ensure that there is space at the end of the code to write a number
-  // of jump instructions, as well as to afford writing a call near the end
-  // of the code.
-  // The jumps are used when there isn't room in the code stream to write
-  // a long call instruction. Instead it writes a shorter call to a
-  // jump instruction in the same code object.
-  // The calls are used when lazy deoptimizing a function and calls to a
-  // deoptimization function.
-  int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
-      static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
-  int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
-  while (byte_count-- > 0) {
-    __ int3();
-  }
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -418,7 +402,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -445,15 +429,16 @@
                                LInstruction* instr,
                                SafepointMode safepoint_mode,
                                int argc) {
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ call(code, mode);
-  RegisterLazyDeoptimization(instr, safepoint_mode, argc);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -476,7 +461,7 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 }
 
 
@@ -486,39 +471,12 @@
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
-                                          SafepointMode safepoint_mode,
-                                          int argc) {
-  // Create the environment to bailout to. If the call has side effects
-  // execution has to continue after the call otherwise execution can continue
-  // from a previous bailout point repeating the call.
-  LEnvironment* deoptimization_environment;
-  if (instr->HasDeoptimizationEnvironment()) {
-    deoptimization_environment = instr->deoptimization_environment();
-  } else {
-    deoptimization_environment = instr->environment();
-  }
-
-  RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    ASSERT(argc == 0);
-    RecordSafepoint(instr->pointer_map(),
-                    deoptimization_environment->deoptimization_index());
-  } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(),
-        argc,
-        deoptimization_environment->deoptimization_index());
-  }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
   if (!environment->HasBeenRegistered()) {
     // Physical stack frame layout:
     // -x ............. -4  0 ..................................... y
@@ -540,14 +498,17 @@
     Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
-    environment->Register(deoptimization_index, translation.index());
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
     deoptimizations_.Add(environment);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -599,6 +560,7 @@
     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
     data->SetArgumentsStackHeight(i,
                                   Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
   }
   code->set_deoptimization_data(*data);
 }
@@ -630,17 +592,29 @@
 }
 
 
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode, int argc) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), argc, Safepoint::kLazyDeopt);
+  }
+}
+
+
 void LCodeGen::RecordSafepoint(
     LPointerMap* pointers,
     Safepoint::Kind kind,
     int arguments,
-    int deoptimization_index) {
+    Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
 
   const ZoneList<LOperand*>* operands = pointers->operands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-      kind, arguments, deoptimization_index);
+      kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
@@ -657,27 +631,26 @@
 
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
 }
 
 
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
   LPointerMap empty_pointers(RelocInfo::kNoPosition);
-  RecordSafepoint(&empty_pointers, deoptimization_index);
+  RecordSafepoint(&empty_pointers, deopt_mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
-                                            int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
-      deoptimization_index);
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
 }
 
 
 void LCodeGen::RecordPosition(int position) {
-  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
 }
 
@@ -690,7 +663,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -707,12 +680,11 @@
     LParallelMove* move = gap->GetParallelMove(inner_pos);
     if (move != NULL) DoParallelMove(move);
   }
+}
 
-  LInstruction* next = GetNextInstruction();
-  if (next != NULL && next->IsLazyBailout()) {
-    int pc = masm()->pc_offset();
-    safepoints_.SetPcAfterGap(pc);
-  }
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
 }
 
 
@@ -780,27 +752,29 @@
 
     if (divisor < 0) divisor = -divisor;
 
-    NearLabel positive_dividend, done;
+    Label positive_dividend, done;
     __ testl(dividend, dividend);
-    __ j(not_sign, &positive_dividend);
+    __ j(not_sign, &positive_dividend, Label::kNear);
     __ negl(dividend);
     __ andl(dividend, Immediate(divisor - 1));
     __ negl(dividend);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       DeoptimizeIf(no_condition, instr->environment());
     } else {
-      __ jmp(&done);
+      __ jmp(&done, Label::kNear);
     }
     __ bind(&positive_dividend);
     __ andl(dividend, Immediate(divisor - 1));
     __ bind(&done);
   } else {
-    LOperand* right = instr->InputAt(1);
-    Register right_reg = ToRegister(right);
+    Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+    Register left_reg = ToRegister(instr->InputAt(0));
+    Register right_reg = ToRegister(instr->InputAt(1));
+    Register result_reg = ToRegister(instr->result());
 
-    ASSERT(ToRegister(instr->result()).is(rdx));
-    ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+    ASSERT(left_reg.is(rax));
+    ASSERT(result_reg.is(rdx));
     ASSERT(!right_reg.is(rax));
     ASSERT(!right_reg.is(rdx));
 
@@ -810,21 +784,60 @@
       DeoptimizeIf(zero, instr->environment());
     }
 
+    __ testl(left_reg, left_reg);
+    __ j(zero, &remainder_eq_dividend, Label::kNear);
+    __ j(sign, &slow, Label::kNear);
+
+    __ testl(right_reg, right_reg);
+    __ j(not_sign, &both_positive, Label::kNear);
+    // The sign of the divisor doesn't matter.
+    __ neg(right_reg);
+
+    __ bind(&both_positive);
+    // If the dividend is smaller than the nonnegative
+    // divisor, the dividend is the result.
+    __ cmpl(left_reg, right_reg);
+    __ j(less, &remainder_eq_dividend, Label::kNear);
+
+    // Check if the divisor is a PowerOfTwo integer.
+    Register scratch = ToRegister(instr->TempAt(0));
+    __ movl(scratch, right_reg);
+    __ subl(scratch, Immediate(1));
+    __ testl(scratch, right_reg);
+    __ j(not_zero, &do_subtraction, Label::kNear);
+    __ andl(left_reg, scratch);
+    __ jmp(&remainder_eq_dividend, Label::kNear);
+
+    __ bind(&do_subtraction);
+    const int kUnfolds = 3;
+    // Try a few subtractions of the dividend.
+    __ movl(scratch, left_reg);
+    for (int i = 0; i < kUnfolds; i++) {
+      // Reduce the dividend by the divisor.
+      __ subl(left_reg, right_reg);
+      // Check if the dividend is less than the divisor.
+      __ cmpl(left_reg, right_reg);
+      __ j(less, &remainder_eq_dividend, Label::kNear);
+    }
+    __ movl(left_reg, scratch);
+
+    // Slow case, using idiv instruction.
+    __ bind(&slow);
     // Sign extend eax to edx.
     // (We are using only the low 32 bits of the values.)
     __ cdq();
 
     // Check for (0 % -x) that will produce negative zero.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel positive_left;
-      NearLabel done;
-      __ testl(rax, rax);
-      __ j(not_sign, &positive_left);
+      Label positive_left;
+      Label done;
+      __ testl(left_reg, left_reg);
+      __ j(not_sign, &positive_left, Label::kNear);
       __ idivl(right_reg);
 
       // Test the remainder for 0, because then the result would be -0.
-      __ testl(rdx, rdx);
-      __ j(not_zero, &done);
+      __ testl(result_reg, result_reg);
+      __ j(not_zero, &done, Label::kNear);
 
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&positive_left);
@@ -833,6 +846,12 @@
     } else {
       __ idivl(right_reg);
     }
+    __ jmp(&done, Label::kNear);
+
+    __ bind(&remainder_eq_dividend);
+    __ movl(result_reg, left_reg);
+
+    __ bind(&done);
   }
 }
 
@@ -855,9 +874,9 @@
 
   // Check for (0 / -x) that will produce negative zero.
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    NearLabel left_not_zero;
+    Label left_not_zero;
     __ testl(left_reg, left_reg);
-    __ j(not_zero, &left_not_zero);
+    __ j(not_zero, &left_not_zero, Label::kNear);
     __ testl(right_reg, right_reg);
     DeoptimizeIf(sign, instr->environment());
     __ bind(&left_not_zero);
@@ -865,9 +884,9 @@
 
   // Check for (-kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    NearLabel left_not_min_int;
+    Label left_not_min_int;
     __ cmpl(left_reg, Immediate(kMinInt));
-    __ j(not_zero, &left_not_min_int);
+    __ j(not_zero, &left_not_min_int, Label::kNear);
     __ cmpl(right_reg, Immediate(-1));
     DeoptimizeIf(zero, instr->environment());
     __ bind(&left_not_min_int);
@@ -946,9 +965,9 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Bail out if the result is supposed to be negative zero.
-    NearLabel done;
+    Label done;
     __ testl(left, left);
-    __ j(not_zero, &done);
+    __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
         DeoptimizeIf(no_condition, instr->environment());
@@ -1113,7 +1132,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (int_val == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register tmp = ToRegister(instr->TempAt(0));
     __ Set(tmp, int_val);
@@ -1135,17 +1154,24 @@
 }
 
 
-void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
   Register result = ToRegister(instr->result());
   Register array = ToRegister(instr->InputAt(0));
-  __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
+  __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
 }
 
 
-void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
   Register result = ToRegister(instr->result());
-  Register array = ToRegister(instr->InputAt(0));
-  __ movl(result, FieldOperand(array, ExternalPixelArray::kLengthOffset));
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte.
+  __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ and_(result, Immediate(Map::kElementsKindMask));
+  __ shr(result, Immediate(Map::kElementsKindShift));
 }
 
 
@@ -1153,13 +1179,13 @@
   Register input = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
   ASSERT(input.is(result));
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
-  __ JumpIfSmi(input, &done);
+  __ JumpIfSmi(input, &done, Label::kNear);
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ movq(result, FieldOperand(input, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -1225,12 +1251,12 @@
       break;
     case Token::MOD:
       __ PrepareCallCFunction(2);
-      __ movsd(xmm0, left);
+      __ movaps(xmm0, left);
       ASSERT(right.is(xmm1));
       __ CallCFunction(
           ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
       __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ movsd(result, xmm0);
+      __ movaps(result, xmm0);
       break;
     default:
       UNREACHABLE();
@@ -1244,7 +1270,7 @@
   ASSERT(ToRegister(instr->InputAt(1)).is(rax));
   ASSERT(ToRegister(instr->result()).is(rax));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ nop();  // Signals no inlined code.
 }
@@ -1283,20 +1309,20 @@
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  Representation r = instr->hydrogen()->representation();
+  Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
     Register reg = ToRegister(instr->InputAt(0));
     __ testl(reg, reg);
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->InputAt(0));
-    HType type = instr->hydrogen()->type();
+    HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
       EmitBranch(true_block, false_block, equal);
@@ -1307,82 +1333,99 @@
       Label* true_label = chunk_->GetAssemblyLabel(true_block);
       Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-      __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
-      __ j(equal, false_label);
-      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
-      __ j(equal, true_label);
-      __ CompareRoot(reg, Heap::kFalseValueRootIndex);
-      __ j(equal, false_label);
-      __ Cmp(reg, Smi::FromInt(0));
-      __ j(equal, false_label);
-      __ JumpIfSmi(reg, true_label);
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
 
-      // Test for double values. Plus/minus zero and NaN are false.
-      NearLabel call_stub;
-      __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
-                     Heap::kHeapNumberMapRootIndex);
-      __ j(not_equal, &call_stub);
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ j(equal, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // true -> true.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ j(equal, true_label);
+        // false -> false.
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ j(equal, false_label);
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ j(equal, false_label);
+      }
 
-      // HeapNumber => false iff +0, -0, or NaN. These three cases set the
-      // zero flag when compared to zero using ucomisd.
-      __ xorpd(xmm0, xmm0);
-      __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
-      __ j(zero, false_label);
-      __ jmp(true_label);
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Cmp(reg, Smi::FromInt(0));
+        __ j(equal, false_label);
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ testb(reg, Immediate(kSmiTagMask));
+        DeoptimizeIf(zero, instr->environment());
+      }
 
-      // The conversion stub doesn't cause garbage collections so it's
-      // safe to not record a safepoint after the call.
-      __ bind(&call_stub);
-      ToBooleanStub stub;
-      __ Pushad();
-      __ push(reg);
-      __ CallStub(&stub);
-      __ testq(rax, rax);
-      __ Popad();
-      EmitBranch(true_block, false_block, not_zero);
+      const Register map = kScratchRegister;
+      if (expected.NeedsMap()) {
+        __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ testb(FieldOperand(map, Map::kBitFieldOffset),
+                   Immediate(1 << Map::kIsUndetectable));
+          __ j(not_zero, false_label);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+        __ j(above_equal, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+        __ j(above_equal, &not_string, Label::kNear);
+        __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+        __ j(not_zero, true_label);
+        __ jmp(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ j(not_equal, &not_heap_number, Label::kNear);
+        __ xorps(xmm0, xmm0);
+        __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+        __ j(zero, false_label);
+        __ jmp(true_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(no_condition, instr->environment());
     }
   }
 }
 
 
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
   block = chunk_->LookupDestination(block);
   int next_block = GetNextEmittedBlock(current_block_);
   if (block != next_block) {
-    // Perform stack overflow check if this goto needs it before jumping.
-    if (deferred_stack_check != NULL) {
-      __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-      __ j(above_equal, chunk_->GetAssemblyLabel(block));
-      __ jmp(deferred_stack_check->entry());
-      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
-    } else {
-      __ jmp(chunk_->GetAssemblyLabel(block));
-    }
+    __ jmp(chunk_->GetAssemblyLabel(block));
   }
 }
 
 
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  PushSafepointRegistersScope scope(this);
-  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
 void LCodeGen::DoGoto(LGoto* instr) {
-  class DeferredStackCheck: public LDeferredCode {
-   public:
-    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-   private:
-    LGoto* instr_;
-  };
-
-  DeferredStackCheck* deferred = NULL;
-  if (instr->include_stack_check()) {
-    deferred = new DeferredStackCheck(this, instr);
-  }
-  EmitGoto(instr->block_id(), deferred);
+  EmitGoto(instr->block_id());
 }
 
 
@@ -1430,32 +1473,6 @@
 }
 
 
-void LCodeGen::DoCmpID(LCmpID* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  LOperand* result = instr->result();
-
-  NearLabel unordered;
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the unordered case, which produces a false value.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, &unordered);
-  } else {
-    EmitCmpI(left, right);
-  }
-
-  NearLabel done;
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
-  __ j(cc, &done);
-
-  __ bind(&unordered);
-  __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
@@ -1476,23 +1493,7 @@
 }
 
 
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
-  Register left = ToRegister(instr->InputAt(0));
-  Register right = ToRegister(instr->InputAt(1));
-  Register result = ToRegister(instr->result());
-
-  NearLabel different, done;
-  __ cmpq(left, right);
-  __ j(not_equal, &different);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
-  __ bind(&different);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->InputAt(0));
   Register right = ToRegister(instr->InputAt(1));
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1503,47 +1504,13 @@
 }
 
 
-void LCodeGen::DoIsNull(LIsNull* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  // If the expression is known to be a smi, then it's
-  // definitely not null. Materialize false.
-  // Consider adding other type and representation tests too.
-  if (instr->hydrogen()->value()->type().IsSmi()) {
-    __ LoadRoot(result, Heap::kFalseValueRootIndex);
-    return;
-  }
-
-  __ CompareRoot(reg, Heap::kNullValueRootIndex);
-  if (instr->is_strict()) {
-    ASSERT(Heap::kTrueValueRootIndex >= 0);
-    __ movl(result, Immediate(Heap::kTrueValueRootIndex));
-    NearLabel load;
-    __ j(equal, &load);
-    __ Set(result, Heap::kFalseValueRootIndex);
-    __ bind(&load);
-    __ LoadRootIndexed(result, result, 0);
-  } else {
-    NearLabel true_value, false_value, done;
-    __ j(equal, &true_value);
-    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &true_value);
-    __ JumpIfSmi(reg, &false_value);
-    // Check for undetectable objects by looking in the bit field in
-    // the map. The object has already been smi checked.
-    Register scratch = result;
-    __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
-    __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
-             Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &true_value);
-    __ bind(&false_value);
-    __ LoadRoot(result, Heap::kFalseValueRootIndex);
-    __ jmp(&done);
-    __ bind(&true_value);
-    __ LoadRoot(result, Heap::kTrueValueRootIndex);
-    __ bind(&done);
-  }
+  __ cmpq(left, Immediate(instr->hydrogen()->right()));
+  EmitBranch(true_block, false_block, equal);
 }
 
 
@@ -1602,32 +1569,13 @@
 
   __ movzxbl(kScratchRegister,
              FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
-  __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, is_not_object);
-  __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+  __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   return below_equal;
 }
 
 
-void LCodeGen::DoIsObject(LIsObject* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Label is_false, is_true, done;
-
-  Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
-  __ j(true_cond, &is_true);
-
-  __ bind(&is_false);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&is_true);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
 
@@ -1642,22 +1590,6 @@
 }
 
 
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
-  LOperand* input_operand = instr->InputAt(0);
-  Register result = ToRegister(instr->result());
-  if (input_operand->IsRegister()) {
-    Register input = ToRegister(input_operand);
-    __ CheckSmiToIndicator(result, input);
-  } else {
-    Operand input = ToOperand(instr->InputAt(0));
-    __ CheckSmiToIndicator(result, input);
-  }
-  // result is zero if input is a smi, and one otherwise.
-  ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
-  __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1674,7 +1606,22 @@
 }
 
 
-static InstanceType TestType(HHasInstanceType* instr) {
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, not_zero);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
@@ -1683,7 +1630,7 @@
 }
 
 
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == to) return equal;
@@ -1694,24 +1641,6 @@
 }
 
 
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ testl(input, Immediate(kSmiTagMask));
-  NearLabel done, is_false;
-  __ j(zero, &is_false);
-  __ CmpObjectType(input, TestType(instr->hydrogen()), result);
-  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
-  __ bind(&is_false);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
 
@@ -1741,21 +1670,6 @@
 }
 
 
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ testl(FieldOperand(input, String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  NearLabel done;
-  __ j(zero, &done);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoHasCachedArrayIndexAndBranch(
     LHasCachedArrayIndexAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
@@ -1777,26 +1691,27 @@
                                Register input,
                                Register temp) {
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
   __ j(below, is_false);
 
   // Map is now in temp.
   // Functions have class 'Function'.
-  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    __ j(equal, is_false);
+    __ j(above_equal, is_false);
   }
 
   // Check if the constructor in the map is a function.
   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
 
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
 
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -1823,29 +1738,6 @@
 }
 
 
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  ASSERT(input.is(result));
-  Register temp = ToRegister(instr->TempAt(0));
-  Handle<String> class_name = instr->hydrogen()->class_name();
-  NearLabel done;
-  Label is_true, is_false;
-
-  EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
-
-  __ j(not_equal, &is_false);
-
-  __ bind(&is_true);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&is_false);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
@@ -1878,30 +1770,17 @@
   __ push(ToRegister(instr->InputAt(0)));
   __ push(ToRegister(instr->InputAt(1)));
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  NearLabel true_value, done;
+  Label true_value, done;
   __ testq(rax, rax);
-  __ j(zero, &true_value);
+  __ j(zero, &true_value, Label::kNear);
   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   __ bind(&done);
 }
 
 
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  InstanceofStub stub(InstanceofStub::kNoFlags);
-  __ push(ToRegister(instr->InputAt(0)));
-  __ push(ToRegister(instr->InputAt(1)));
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ testq(rax, rax);
-  EmitBranch(true_block, false_block, zero);
-}
-
-
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
    public:
@@ -1909,7 +1788,7 @@
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() {
-      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
 
     Label* map_check() { return &map_check_; }
@@ -1932,7 +1811,7 @@
   // This is the inlined call site instanceof cache. The two occurences of the
   // hole value will be patched to the last map/result pair generated by the
   // instanceof stub.
-  NearLabel cache_miss;
+  Label cache_miss;
   // Use a temp register to avoid memory operands with variable lengths.
   Register map = ToRegister(instr->TempAt(0));
   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
@@ -1940,7 +1819,7 @@
   __ movq(kScratchRegister, factory()->the_hole_value(),
           RelocInfo::EMBEDDED_OBJECT);
   __ cmpq(map, kScratchRegister);  // Patched to cached map.
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Patched to load either true or false.
   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
 #ifdef DEBUG
@@ -1955,7 +1834,7 @@
   // before calling the deferred code.
   __ bind(&cache_miss);  // Null is not an instance of anything.
   __ CompareRoot(object, Heap::kNullValueRootIndex);
-  __ j(equal, &false_result);
+  __ j(equal, &false_result, Label::kNear);
 
   // String values are not instances of anything.
   __ JumpIfNotString(object, kScratchRegister, deferred->entry());
@@ -1968,8 +1847,8 @@
 }
 
 
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                                Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
   {
     PushSafepointRegistersScope scope(this);
     InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
@@ -1979,7 +1858,6 @@
     __ push(ToRegister(instr->InputAt(0)));
     __ Push(instr->function());
 
-    Register temp = ToRegister(instr->TempAt(0));
     static const int kAdditionalDelta = 10;
     int delta =
         masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
@@ -1996,6 +1874,9 @@
                     RECORD_SAFEPOINT_WITH_REGISTERS,
                     2);
     ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+    ASSERT(instr->HasDeoptimizationEnvironment());
+    LEnvironment* env = instr->deoptimization_environment();
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
     // Move result to a register that survives the end of the
     // PushSafepointRegisterScope.
     __ movq(kScratchRegister, rax);
@@ -2022,36 +1903,17 @@
   if (op == Token::GT || op == Token::LTE) {
     condition = ReverseCondition(condition);
   }
-  NearLabel true_value, done;
+  Label true_value, done;
   __ testq(rax, rax);
-  __ j(condition, &true_value);
+  __ j(condition, &true_value, Label::kNear);
   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   __ bind(&done);
 }
 
 
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  // The compare stub expects compare condition and the input operands
-  // reversed for GT and LTE.
-  Condition condition = TokenToCondition(op, false);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
-  __ testq(rax, rax);
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 void LCodeGen::DoReturn(LReturn* instr) {
   if (FLAG_trace) {
     // Preserve the return value on the stack and rely on the runtime
@@ -2061,7 +1923,7 @@
   }
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
 }
 
 
@@ -2159,23 +2021,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ movq(result, FieldOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ movq(result, FieldOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+      __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
-    __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2193,30 +2061,30 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   } else {
-    NearLabel done;
+    Label done;
     for (int i = 0; i < map_count - 1; ++i) {
       Handle<Map> map = instr->hydrogen()->types()->at(i);
-      NearLabel next;
+      Label next;
       __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-      __ j(not_equal, &next);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      __ j(not_equal, &next, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
     Handle<Map> map = instr->hydrogen()->types()->last();
     __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
     if (instr->hydrogen()->need_generic()) {
-      NearLabel generic;
-      __ j(not_equal, &generic);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      Label generic;
+      __ j(not_equal, &generic, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&generic);
       __ Move(rcx, instr->hydrogen()->name());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallCode(ic, RelocInfo::CODE_TARGET, instr);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2242,10 +2110,10 @@
   DeoptimizeIf(not_equal, instr->environment());
 
   // Check whether the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   __ testb(FieldOperand(result, Map::kBitFieldOffset),
            Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &non_instance);
+  __ j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   __ movq(result,
@@ -2256,13 +2124,13 @@
   DeoptimizeIf(equal, instr->environment());
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in the function's map.
@@ -2279,21 +2147,29 @@
   Register input = ToRegister(instr->InputAt(0));
   __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
   if (FLAG_debug_code) {
-    NearLabel done;
+    Label done, ok, fail;
     __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                    Heap::kFixedArrayMapRootIndex);
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                    Heap::kFixedCOWArrayMapRootIndex);
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     Register temp((result.is(rax)) ? rbx : rax);
     __ push(temp);
     __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
-    __ movzxbq(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
-    __ subq(temp, Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
-    __ cmpq(temp, Immediate(kExternalArrayTypeCount));
+    __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
+    __ and_(temp, Immediate(Map::kElementsKindMask));
+    __ shr(temp, Immediate(Map::kElementsKindShift));
+    __ cmpl(temp, Immediate(FAST_ELEMENTS));
+    __ j(equal, &ok, Label::kNear);
+    __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ j(less, &fail, Label::kNear);
+    __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ j(less_equal, &ok, Label::kNear);
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed");
+    __ bind(&ok);
     __ pop(temp);
-    __ Check(below, "Check for fast elements failed.");
     __ bind(&done);
   }
 }
@@ -2327,60 +2203,111 @@
 
 
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
-  Register elements = ToRegister(instr->elements());
-  Register key = ToRegister(instr->key());
   Register result = ToRegister(instr->result());
-  ASSERT(result.is(elements));
 
   // Load the result.
-  __ movq(result, FieldOperand(elements,
-                               key,
-                               times_pointer_size,
-                               FixedArray::kHeaderSize));
+  __ movq(result,
+          BuildFastArrayOperand(instr->elements(), instr->key(),
+                                FAST_ELEMENTS,
+                                FixedArray::kHeaderSize - kHeapObjectTag));
 
   // Check for the hole value.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  XMMRegister result(ToDoubleRegister(instr->result()));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+        sizeof(kHoleNanLower32);
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(),
+        instr->key(),
+        FAST_DOUBLE_ELEMENTS,
+        offset);
+    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr->environment());
+  }
+
+  Operand double_load_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+  __ movsd(result, double_load_operand);
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+    LOperand* elements_pointer,
+    LOperand* key,
+    ElementsKind elements_kind,
+    uint32_t offset) {
+  Register elements_pointer_reg = ToRegister(elements_pointer);
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort("array index constant value too big");
+    }
+    return Operand(elements_pointer_reg,
+                   constant_value * (1 << shift_size) + offset);
+  } else {
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(elements_pointer_reg, ToRegister(key),
+                   scale_factor, offset);
+  }
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+                                        instr->key(), elements_kind, 0));
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, Operand(external_pointer, key, times_4, 0));
+    __ movss(result, operand);
     __ cvtss2sd(result, result);
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    __ movsd(ToDoubleRegister(instr->result()), operand);
   } else {
     Register result(ToRegister(instr->result()));
-    switch (array_type) {
-      case kExternalByteArray:
-        __ movsxbq(result, Operand(external_pointer, key, times_1, 0));
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ movsxbq(result, operand);
         break;
-      case kExternalUnsignedByteArray:
-      case kExternalPixelArray:
-        __ movzxbq(result, Operand(external_pointer, key, times_1, 0));
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_PIXEL_ELEMENTS:
+        __ movzxbq(result, operand);
         break;
-      case kExternalShortArray:
-        __ movsxwq(result, Operand(external_pointer, key, times_2, 0));
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ movsxwq(result, operand);
         break;
-      case kExternalUnsignedShortArray:
-        __ movzxwq(result, Operand(external_pointer, key, times_2, 0));
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ movzxwq(result, operand);
         break;
-      case kExternalIntArray:
-        __ movsxlq(result, Operand(external_pointer, key, times_4, 0));
+      case EXTERNAL_INT_ELEMENTS:
+        __ movsxlq(result, operand);
         break;
-      case kExternalUnsignedIntArray:
-        __ movl(result, Operand(external_pointer, key, times_4, 0));
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ movl(result, operand);
         __ testl(result, result);
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
         // happens, and generate code that returns a double rather than int.
         DeoptimizeIf(negative, instr->environment());
         break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -2401,15 +2328,15 @@
   Register result = ToRegister(instr->result());
 
   // Check for arguments adapter frame.
-  NearLabel done, adapted;
+  Label done, adapted;
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adapted);
+  __ j(equal, &adapted, Label::kNear);
 
   // No arguments adaptor frame.
   __ movq(result, rbp);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Arguments adaptor frame present.
   __ bind(&adapted);
@@ -2424,7 +2351,7 @@
 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   Register result = ToRegister(instr->result());
 
-  NearLabel done;
+  Label done;
 
   // If no arguments adaptor frame the number of arguments is fixed.
   if (instr->InputAt(0)->IsRegister()) {
@@ -2433,7 +2360,7 @@
     __ cmpq(rbp, ToOperand(instr->InputAt(0)));
   }
   __ movl(result, Immediate(scope()->num_parameters()));
-  __ j(equal, &done);
+  __ j(equal, &done, Label::kNear);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2455,27 +2382,46 @@
   ASSERT(function.is(rdi));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(rax));
 
-  // If the receiver is null or undefined, we have to pass the global object
-  // as a receiver.
-  NearLabel global_object, receiver_ok;
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ movq(kScratchRegister,
+          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ testb(FieldOperand(kScratchRegister,
+                        SharedFunctionInfo::kStrictModeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+  __ j(not_equal, &receiver_ok, Label::kNear);
+
+  // Do not transform the receiver to object for builtins.
+  __ testb(FieldOperand(kScratchRegister,
+                        SharedFunctionInfo::kNativeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+  __ j(not_equal, &receiver_ok, Label::kNear);
+
+  // Normal function. Replace undefined or null with global receiver.
   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
 
   // The receiver should be a JS object.
   Condition is_smi = __ CheckSmi(receiver);
   DeoptimizeIf(is_smi, instr->environment());
-  __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
   DeoptimizeIf(below, instr->environment());
-  __ jmp(&receiver_ok);
+  __ jmp(&receiver_ok, Label::kNear);
 
   __ bind(&global_object);
   // TODO(kmillikin): We have a hydrogen value for the global object.  See
   // if it's better to use it than to explicitly fetch it from the context
   // here.
-  __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+  __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(receiver,
+          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2489,10 +2435,10 @@
 
   // Loop through the arguments pushing them onto the execution
   // stack.
-  NearLabel invoke, loop;
+  Label invoke, loop;
   // length is a small non-negative integer, due to the test above.
   __ testl(length, length);
-  __ j(zero, &invoke);
+  __ j(zero, &invoke, Label::kNear);
   __ bind(&loop);
   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   __ decl(length);
@@ -2502,33 +2448,31 @@
   __ bind(&invoke);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   v8::internal::ParameterCount actual(rax);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    EmitPushConstantOperand(argument);
-  } else if (argument->IsRegister()) {
-    __ push(ToRegister(argument));
-  } else {
-    ASSERT(!argument->IsDoubleRegister());
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ movq(result, rsi);
 }
 
 
@@ -2536,8 +2480,7 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ movq(result,
-          Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
-  __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+          Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
 }
 
 
@@ -2556,7 +2499,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2576,6 +2520,7 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(rcx, call_kind);
   if (*function == *info()->closure()) {
     __ CallSelf();
   } else {
@@ -2583,7 +2528,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2593,7 +2538,10 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   __ Move(rdi, instr->function());
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
@@ -2680,7 +2628,7 @@
   if (r.IsDouble()) {
     XMMRegister scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(scratch, scratch);
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ andpd(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2703,21 +2651,45 @@
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-  __ ucomisd(input_reg, xmm_scratch);
+  Label done;
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(below_equal, instr->environment());
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatures::Scope scope(SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize if minus zero.
+      __ movq(output_reg, input_reg);
+      __ subq(output_reg, Immediate(1));
+      DeoptimizeIf(overflow, instr->environment());
+    }
+    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+    __ cvttsd2si(output_reg, xmm_scratch);
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   } else {
+    // Deoptimize on negative inputs.
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
     DeoptimizeIf(below, instr->environment());
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Check for negative zero.
+      Label positive_sign;
+      __ j(above, &positive_sign, Label::kNear);
+      __ movmskpd(output_reg, input_reg);
+      __ testq(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr->environment());
+      __ Set(output_reg, 0);
+      __ jmp(&done);
+      __ bind(&positive_sign);
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, input_reg);
+
+    // Overflow is signalled with minint.
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   }
-
-  // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
-
-  // Overflow is signalled with minint.
-  __ cmpl(output_reg, Immediate(0x80000000));
-  DeoptimizeIf(equal, instr->environment());
+  __ bind(&done);
 }
 
 
@@ -2726,33 +2698,45 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label done;
   // xmm_scratch = 0.5
   __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
   __ movq(xmm_scratch, kScratchRegister);
-
+  Label below_half;
+  __ ucomisd(xmm_scratch, input_reg);
+  // If input_reg is NaN, this doesn't jump.
+  __ j(above, &below_half, Label::kNear);
   // input = input + 0.5
-  __ addsd(input_reg, xmm_scratch);
-
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
-
-  // Compute Math.floor(value + 0.5).
+  // This addition might give a result that isn't the correct for
+  // rounding, due to loss of precision, but only for a number that's
+  // so big that the conversion below will overflow anyway.
+  __ addsd(xmm_scratch, input_reg);
+  // Compute Math.floor(input).
   // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
-
+  __ cvttsd2si(output_reg, xmm_scratch);
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x80000000));
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bailout if negative (including -0).
+    __ movq(output_reg, input_reg);
+    __ testq(output_reg, output_reg);
+    DeoptimizeIf(negative, instr->environment());
+  } else {
+    // Bailout if below -0.5, otherwise round to (positive) zero, even
+    // if negative.
+    // xmm_scrach = -0.5
+    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+    __ movq(xmm_scratch, kScratchRegister);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ xorl(output_reg, output_reg);
+
+  __ bind(&done);
 }
 
 
@@ -2767,7 +2751,7 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2783,7 +2767,7 @@
   if (exponent_type.IsDouble()) {
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     ASSERT(ToDoubleRegister(right).is(xmm1));
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
@@ -2791,7 +2775,7 @@
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers: xmm0 and edi (not rdi).
     // On Windows, the registers are xmm0 and edx.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
 #ifdef _WIN64
     ASSERT(ToRegister(right).is(rdx));
 #else
@@ -2817,13 +2801,13 @@
     __ bind(&call);
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers xmm0 and xmm1.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     // Right argument is already in xmm1.
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
   }
   // Return value is in xmm0.
-  __ movsd(result_reg, xmm0);
+  __ movaps(result_reg, xmm0);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2886,13 +2870,26 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
-    arity, NOT_IN_LOOP);
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2902,10 +2899,11 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ Move(rcx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, mode, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2914,7 +2912,7 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   __ Drop(1);
@@ -2924,10 +2922,11 @@
 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   __ Move(rcx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, mode, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2935,7 +2934,7 @@
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   __ Move(rdi, instr->target());
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -2998,40 +2997,37 @@
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
-  ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+                                        instr->key(), elements_kind, 0));
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister value(ToDoubleRegister(instr->value()));
     __ cvtsd2ss(value, value);
-    __ movss(Operand(external_pointer, key, times_4, 0), value);
+    __ movss(operand, value);
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    __ movsd(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value(ToRegister(instr->value()));
-    switch (array_type) {
-      case kExternalPixelArray:
-        {  // Clamp the value to [0..255].
-          NearLabel done;
-          __ testl(value, Immediate(0xFFFFFF00));
-          __ j(zero, &done);
-          __ setcc(negative, value);  // 1 if negative, 0 if positive.
-          __ decb(value);  // 0 if negative, 255 if positive.
-          __ bind(&done);
-          __ movb(Operand(external_pointer, key, times_1, 0), value);
-        }
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ movb(operand, value);
         break;
-      case kExternalByteArray:
-      case kExternalUnsignedByteArray:
-        __ movb(Operand(external_pointer, key, times_1, 0), value);
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ movw(operand, value);
         break;
-      case kExternalShortArray:
-      case kExternalUnsignedShortArray:
-        __ movw(Operand(external_pointer, key, times_2, 0), value);
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ movl(operand, value);
         break;
-      case kExternalIntArray:
-      case kExternalUnsignedIntArray:
-        __ movl(Operand(external_pointer, key, times_4, 0), value);
-        break;
-      case kExternalFloatArray:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3040,12 +3036,22 @@
 
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  if (instr->length()->IsRegister()) {
-    __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
+  if (instr->index()->IsConstantOperand()) {
+    if (instr->length()->IsRegister()) {
+      __ cmpq(ToRegister(instr->length()),
+              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+    } else {
+      __ cmpq(ToOperand(instr->length()),
+              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+    }
   } else {
-    __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
+    if (instr->length()->IsRegister()) {
+      __ cmpq(ToRegister(instr->length()), ToRegister(instr->index()));
+    } else {
+      __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
+    }
   }
-  DeoptimizeIf(above_equal, instr->environment());
+  DeoptimizeIf(below_equal, instr->environment());
 }
 
 
@@ -3080,6 +3086,25 @@
 }
 
 
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  XMMRegister value = ToDoubleRegister(instr->value());
+  Label have_value;
+
+  __ ucomisd(value, value);
+  __ j(parity_odd, &have_value);  // NaN.
+
+  __ Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  __ movq(value, kScratchRegister);
+
+  __ bind(&have_value);
+  Operand double_store_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+  __ movsd(double_store_operand, value);
+}
+
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   ASSERT(ToRegister(instr->object()).is(rdx));
   ASSERT(ToRegister(instr->key()).is(rcx));
@@ -3092,6 +3117,14 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3103,95 +3136,79 @@
   };
 
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  NearLabel flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-sequential strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ testb(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
 
-  // Handle cons strings and go to deferred code for the rest.
-  __ testb(result, Immediate(kIsConsStringMask));
-  __ j(zero, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ testb(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ addq(index, result);
+  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  __ bind(&cons_string);
   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, deferred->entry());
-  // Get the first of the two strings and load its instance type.
   __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result, Immediate(kStringRepresentationMask));
   __ j(not_zero, deferred->entry());
 
-  // Check for ASCII or two-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT(kAsciiStringTag != 0);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
+  Label done;
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzxwl(result,
-               FieldOperand(string,
-                            SeqTwoByteString::kHeaderSize +
-                            (kUC16Size * const_index)));
-  } else {
-    __ movzxwl(result, FieldOperand(string,
-                                    index,
-                                    times_2,
-                                    SeqTwoByteString::kHeaderSize));
-  }
-  __ jmp(&done);
+  __ movzxwl(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzxbl(result, FieldOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ movzxbl(result, FieldOperand(string,
-                                    index,
-                                    times_1,
-                                    SeqAsciiString::kHeaderSize));
-  }
+  __ movzxbl(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
   __ bind(&done);
   __ bind(deferred->exit());
 }
@@ -3369,10 +3386,10 @@
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 LEnvironment* env) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
   // Smi check.
-  __ JumpIfSmi(input_reg, &load_smi);
+  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
 
   // Heap number map check.
   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3380,21 +3397,22 @@
   if (deoptimize_on_undefined) {
     DeoptimizeIf(not_equal, env);
   } else {
-    NearLabel heap_number;
-    __ j(equal, &heap_number);
+    Label heap_number;
+    __ j(equal, &heap_number, Label::kNear);
+
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, env);
 
     // Convert undefined to NaN. Compute NaN as 0/0.
-    __ xorpd(result_reg, result_reg);
+    __ xorps(result_reg, result_reg);
     __ divsd(result_reg, result_reg);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
   }
   // Heap number to XMM conversion.
   __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
   __ bind(&load_smi);
@@ -3415,7 +3433,7 @@
 
 
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  NearLabel done, heap_number;
+  Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
 
   // Heap number map check.
@@ -3423,13 +3441,13 @@
                  Heap::kHeapNumberMapRootIndex);
 
   if (instr->truncating()) {
-    __ j(equal, &heap_number);
+    __ j(equal, &heap_number, Label::kNear);
     // Check for undefined. Undefined is converted to zero for truncating
     // conversions.
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, instr->environment());
     __ Set(input_reg, 0);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
 
@@ -3504,7 +3522,7 @@
     __ cvttsd2siq(result_reg, input_reg);
     __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
     __ cmpq(result_reg, kScratchRegister);
-      DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr->environment());
   } else {
     __ cvttsd2si(result_reg, input_reg);
     __ cvtlsi2sd(xmm0, result_reg);
@@ -3512,11 +3530,11 @@
     DeoptimizeIf(not_equal, instr->environment());
     DeoptimizeIf(parity_even, instr->environment());  // NaN.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel done;
+      Label done;
       // The integer converted back is equal to the original. We
       // only have to test if we got -0 as an input.
       __ testl(result_reg, result_reg);
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       __ movmskpd(result_reg, input_reg);
       // Bit 0 contains the sign of the double in input_reg.
       // If input was positive, we are ok and return 0, otherwise
@@ -3545,30 +3563,45 @@
 
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
     __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
             Immediate(static_cast<int8_t>(first)));
-    DeoptimizeIf(not_equal, instr->environment());
-  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
-    // String has a dedicated bit in instance type.
-    __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-             Immediate(kIsNotStringMask));
-    DeoptimizeIf(not_zero, instr->environment());
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+                Immediate(static_cast<int8_t>(last)));
+        DeoptimizeIf(above, instr->environment());
+      }
+    }
   } else {
-    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-            Immediate(static_cast<int8_t>(first)));
-    DeoptimizeIf(below, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-              Immediate(static_cast<int8_t>(last)));
-      DeoptimizeIf(above, instr->environment());
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+               Immediate(mask));
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+    } else {
+      __ movzxbl(kScratchRegister,
+                 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+      __ andb(kScratchRegister, Immediate(mask));
+      __ cmpb(kScratchRegister, Immediate(tag));
+      DeoptimizeIf(not_equal, instr->environment());
     }
   }
 }
@@ -3592,6 +3625,57 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  Register temp_reg = ToRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  Register temp_reg = ToRegister(instr->TempAt(0));
+  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+  Label is_smi, done, heap_number;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ Cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr->environment());
+  __ movq(input_reg, Immediate(0));
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiToInteger32(input_reg, input_reg);
+  __ ClampUint8(input_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
   if (heap()->InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
@@ -3684,7 +3768,7 @@
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  NearLabel materialized;
+  Label materialized;
   // Registers will be used as follows:
   // rdi = JS function.
   // rcx = literals array.
@@ -3696,7 +3780,7 @@
       instr->hydrogen()->literal_index() * kPointerSize;
   __ movq(rbx, FieldOperand(rcx, literal_offset));
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in rax.
@@ -3758,54 +3842,19 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(0);
-  if (input->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(input)));
-  } else if (input->IsRegister()) {
-    __ push(ToRegister(input));
-  } else {
-    ASSERT(input->IsStackSlot());
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
 
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Label true_label;
-  Label false_label;
-  NearLabel done;
-
-  Condition final_branch_condition = EmitTypeofIs(&true_label,
-                                                  &false_label,
-                                                  input,
-                                                  instr->type_literal());
-  __ j(final_branch_condition, &true_label);
-  __ bind(&false_label);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&true_label);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-  __ bind(&done);
-}
-
-
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
-  ASSERT(operand->IsConstantOperand());
-  LConstantOperand* const_op = LConstantOperand::cast(operand);
-  Handle<Object> literal = chunk_->LookupLiteral(const_op);
-  Representation r = chunk_->LookupLiteralRepresentation(const_op);
-  if (r.IsInteger32()) {
-    ASSERT(literal->IsNumber());
-    __ push(Immediate(static_cast<int32_t>(literal->Number())));
-  } else if (r.IsDouble()) {
-    Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
   } else {
-    ASSERT(r.IsTagged());
-    __ Push(literal);
+    __ push(ToOperand(operand));
   }
 }
 
@@ -3852,6 +3901,10 @@
     __ CompareRoot(input, Heap::kFalseValueRootIndex);
     final_branch_condition = equal;
 
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    final_branch_condition = equal;
+
   } else if (type_name->Equals(heap()->undefined_symbol())) {
     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
     __ j(equal, true_label);
@@ -3864,17 +3917,19 @@
 
   } else if (type_name->Equals(heap()->function_symbol())) {
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
     final_branch_condition = above_equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
-    __ CompareRoot(input, Heap::kNullValueRootIndex);
-    __ j(equal, true_label);
-    __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+    if (!FLAG_harmony_typeof) {
+      __ CompareRoot(input, Heap::kNullValueRootIndex);
+      __ j(equal, true_label);
+    }
+    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
     __ j(below, false_label);
-    __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
-    __ j(above_equal, false_label);
+    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ j(above, false_label);
     // Check for undetectable objects => false.
     __ testb(FieldOperand(input, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
@@ -3889,26 +3944,6 @@
 }
 
 
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
-  Register result = ToRegister(instr->result());
-  NearLabel true_label;
-  NearLabel false_label;
-  NearLabel done;
-
-  EmitIsConstructCall(result);
-  __ j(equal, &true_label);
-
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&true_label);
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-
-  __ bind(&done);
-}
-
-
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp = ToRegister(instr->TempAt(0));
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -3924,10 +3959,10 @@
   __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  NearLabel check_frame_marker;
+  Label check_frame_marker;
   __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &check_frame_marker);
+  __ j(not_equal, &check_frame_marker, Label::kNear);
   __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
@@ -3937,9 +3972,28 @@
 }
 
 
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+    while (padding_size > 0) {
+      int nop_size = padding_size > 9 ? 9 : padding_size;
+      __ nop(nop_size);
+      padding_size -= nop_size;
+    }
+  }
+}
+
+
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  // No code for lazy bailout instruction. Used to capture environment after a
-  // call for populating the safepoint data with deoptimization data.
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
@@ -3951,45 +4005,88 @@
 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  // Push object.
-  if (obj->IsRegister()) {
-    __ push(ToRegister(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
-  // Push key.
-  if (key->IsConstantOperand()) {
-    EmitPushConstantOperand(key);
-  } else if (key->IsRegister()) {
-    __ push(ToRegister(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(obj);
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
   // Create safepoint generator that will also ensure enough space in the
   // reloc info for patching in deoptimization (since this is invoking a
   // builtin)
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   __ Push(Smi::FromInt(strict_mode_flag()));
-  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  EmitPushTaggedOperand(key);
+  EmitPushTaggedOperand(obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  // Perform stack overflow check.
-  NearLabel done;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &done);
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LStackCheck* instr_;
+  };
 
-  StackCheckStub stub;
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ bind(&done);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &done, Label::kNear);
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    last_lazy_deopt_pc_ = masm()->pc_offset();
+    __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(below, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    last_lazy_deopt_pc_ = masm()->pc_offset();
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
 }
 
 
@@ -4004,7 +4101,7 @@
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(osr_pc_offset_ == -1);
   osr_pc_offset_ = masm()->pc_offset();
 }
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index d95ab21..43c045f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -60,6 +60,7 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
+        last_lazy_deopt_pc_(0),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -94,14 +95,15 @@
   void DoDeferredNumberTagD(LNumberTagD* instr);
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
-  void DoDeferredStackCheck(LGoto* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                        Label* map_check);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -133,7 +135,6 @@
   HGraph* graph() const { return chunk_->graph(); }
 
   int GetNextEmittedBlock(int block);
-  LInstruction* GetNextInstruction();
 
   void EmitClassOfTest(Label* if_true,
                        Label* if_false,
@@ -141,8 +142,8 @@
                        Register input,
                        Register temporary);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -193,14 +194,16 @@
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr,
-                                  SafepointMode safepoint_mode,
-                                  int argc);
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode,
+                                    int argc);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
   void AddToTranslation(Translation* translation,
@@ -213,6 +216,11 @@
 
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
+  Operand BuildFastArrayOperand(
+      LOperand* elements_pointer,
+      LOperand* key,
+      ElementsKind elements_kind,
+      uint32_t offset);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -229,19 +237,16 @@
   void RecordSafepoint(LPointerMap* pointers,
                        Safepoint::Kind kind,
                        int arguments,
-                       int deoptimization_index);
-  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
-  void RecordSafepoint(int deoptimization_index);
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
-                                    int deoptimization_index);
+                                    Safepoint::DeoptMode mode);
   void RecordPosition(int position);
-  int LastSafepointEnd() {
-    return static_cast<int>(safepoints_.GetPcAfterGap());
-  }
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
-  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
@@ -266,13 +271,14 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
 
-  // Emits code for pushing a constant operand.
-  void EmitPushConstantOperand(LOperand* operand);
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
 
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
@@ -282,6 +288,8 @@
     Address address;
   };
 
+  void EnsureSpaceForLazyDeopt(int space_needed);
+
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -298,6 +306,7 @@
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
+  int last_lazy_deopt_pc_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index cedd025..c3c617c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@
   } else if (source->IsDoubleRegister()) {
     XMMRegister src = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
-      __ movsd(cgen_->ToDoubleRegister(destination), src);
+      __ movaps(cgen_->ToDoubleRegister(destination), src);
     } else {
       ASSERT(destination->IsDoubleStackSlot());
       __ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@
     // Swap two double registers.
     XMMRegister source_reg = cgen_->ToDoubleRegister(source);
     XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
-    __ movsd(xmm0, source_reg);
-    __ movsd(source_reg, destination_reg);
-    __ movsd(destination_reg, xmm0);
+    __ movaps(xmm0, source_reg);
+    __ movaps(source_reg, destination_reg);
+    __ movaps(destination_reg, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 4601cd9..5fc5646 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
-  for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
-  for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -114,21 +113,18 @@
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  inputs_.PrintOperandsTo(stream);
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
 }
 
 
 template<int R, int I, int T>
 void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
-  for (int i = 0; i < N; i++) {
+  for (int i = 0; i < results_.length(); i++) {
     if (i > 0) stream->Add(" ");
-    elems_[i]->PrintTo(stream);
+    results_[i]->PrintTo(stream);
   }
 }
 
@@ -240,6 +236,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -264,12 +267,6 @@
 }
 
 
-void LTypeofIs::PrintDataTo(StringStream* stream) {
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
 void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if typeof ");
   InputAt(0)->PrintTo(stream);
@@ -303,19 +300,26 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[rcx] #%d / ", arity());
 }
 
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
-  SmartPointer<char> name_string = name()->ToCString();
+  SmartArrayPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
@@ -332,13 +336,6 @@
 }
 
 
-void LClassOfTest::PrintDataTo(StringStream* stream) {
-  stream->Add("= class_of_test(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
 void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
   arguments()->PrintTo(stream);
 
@@ -380,8 +377,7 @@
     LLabel* label = LLabel::cast(first_instr);
     if (last_instr->IsGoto()) {
       LGoto* goto_instr = LGoto::cast(last_instr);
-      if (!goto_instr->include_stack_check() &&
-          label->IsRedundant() &&
+      if (label->IsRedundant() &&
           !label->is_loop_header()) {
         bool can_eliminate = true;
         for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -432,6 +428,15 @@
 }
 
 
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
 void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add("[");
@@ -442,7 +447,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -534,7 +539,8 @@
 
 void LChunkBuilder::Abort(const char* format, ...) {
   if (FLAG_trace_bailout) {
-    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
     PrintF("Aborting LChunk building in @\"%s\": ", *name);
     va_list arguments;
     va_start(arguments, format);
@@ -790,6 +796,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new LDeoptimize);
 }
@@ -830,11 +841,11 @@
   }
 
   ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
-  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
-  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
 
-  HValue* right_value = instr->OperandAt(1);
+  HValue* right_value = instr->right();
   LOperand* right = NULL;
   int constant_value = 0;
   if (right_value->IsConstant()) {
@@ -845,24 +856,22 @@
     right = UseFixed(right_value, rcx);
   }
 
-  // Shift operations can only deoptimize if we do a logical shift
-  // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  // Shift operations can only deoptimize if we do a logical shift by 0 and
+  // the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
-  LShiftI* result = new LShiftI(op, left, right, can_deopt);
-  return can_deopt
-      ? AssignEnvironment(DefineSameAsFirst(result))
-      : DefineSameAsFirst(result);
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -973,18 +982,7 @@
     if (FLAG_stress_environments && !instr->HasEnvironment()) {
       instr = AssignEnvironment(instr);
     }
-    if (current->IsTest() && !instr->IsGoto()) {
-      ASSERT(instr->IsControl());
-      HTest* test = HTest::cast(current);
-      instr->set_hydrogen_value(test->value());
-      HBasicBlock* first = test->FirstSuccessor();
-      HBasicBlock* second = test->SecondSuccessor();
-      ASSERT(first != NULL && second != NULL);
-      instr->SetBranchTargets(first->block_id(), second->block_id());
-    } else {
-      instr->set_hydrogen_value(current);
-    }
-
+    instr->set_hydrogen_value(current);
     chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
@@ -1006,6 +1004,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1023,108 +1023,21 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
-                            instr->include_stack_check());
-  return (instr->include_stack_check())
-      ? AssignPointerMap(result)
-      : result;
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
   HValue* v = instr->value();
   if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseOrConstantAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
-        LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
-        LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
-                                                    right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasInstanceTypeAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      // We only need a temp register for non-strict compare.
-      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  temp);
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-      return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LInstanceOfAndBranch* result =
-          new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
-                                   UseFixed(instance_of->right(), rdx));
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
-    } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
-    }
+    ASSERT(v->IsConstant());
+    ASSERT(!v->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
   }
-  return new LBranch(UseRegisterAtStart(v));
+  return AssignEnvironment(new LBranch(UseRegister(v)));
 }
 
 
@@ -1156,7 +1069,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax),
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
                                  FixedTemp(rdi));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
@@ -1182,8 +1095,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1211,6 +1129,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1464,71 +1390,83 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
   Token::Value op = instr->token();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  bool reversed = (op == Token::GT || op == Token::LTE);
+  LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+  LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+  LCmpT* result = new LCmpT(left, right);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
   Representation r = instr->GetInputRepresentation();
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else if (r.IsDouble()) {
+    return new LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(left, right));
-  } else {
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-    bool reversed = (op == Token::GT || op == Token::LTE);
-    LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
-    LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
-    LCmpT* result = new LCmpT(left, right);
-    return MarkAsCall(DefineFixed(result, rax), instr);
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
 
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
-    HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
-  return DefineAsRegister(result);
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new LIsNull(value));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+    HCompareConstantEqAndBranch* instr) {
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-
-  return DefineAsRegister(new LIsObject(value));
+  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseAtStart(instr->value());
-
-  return DefineAsRegister(new LIsSmi(value));
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
+  return new LIsSmiAndBranch(Use(instr->value()));
+}
 
-  return DefineAsRegister(new LHasInstanceType(value));
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1541,17 +1479,17 @@
 }
 
 
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
-    HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-  return DefineAsRegister(new LHasCachedArrayIndex(value));
+  return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
-  Abort("Unimplemented: %s", "DoClassOfTest");
-  return NULL;
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
 }
 
 
@@ -1561,16 +1499,16 @@
 }
 
 
-LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
-LInstruction* LChunkBuilder::DoExternalArrayLength(
-    HExternalArrayLength* instr) {
-  LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LExternalArrayLength(array));
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
@@ -1582,8 +1520,9 @@
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
-                                            Use(instr->length())));
+  return AssignEnvironment(new LBoundsCheck(
+      UseRegisterOrConstantAtStart(instr->index()),
+      Use(instr->length())));
 }
 
 
@@ -1600,6 +1539,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
@@ -1613,8 +1565,8 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
-        LOperand* xmm_temp = instr->CanTruncateToInt32() ? NULL
-                                                         : FixedTemp(xmm1);
+        bool truncating = instr->CanTruncateToInt32();
+        LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
@@ -1694,6 +1646,53 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new LClampDToUint8(reg,
+                                               TempRegister()));
+  } else if (input_rep.IsInteger32()) {
+    return DefineSameAsFirst(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg,
+                                                TempRegister(),
+                                                FixedTemp(xmm1));
+    return AssignEnvironment(DefineSameAsFirst(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseRegister(value);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* xmm_temp =
+        CpuFeatures::IsSupported(SSE3)
+        ? NULL
+        : FixedTemp(xmm1);
+    return AssignEnvironment(
+        DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), rax));
 }
@@ -1822,27 +1821,44 @@
   ASSERT(instr->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineSameAsFirst(result));
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
-  ExternalArrayType array_type = instr->array_type();
+  ElementsKind elements_kind = instr->elements_kind();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
-  return (array_type == kExternalUnsignedIntArray) ?
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
       AssignEnvironment(load_instr) : load_instr;
 }
 
@@ -1875,22 +1891,42 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
-  ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ElementsKind elements_kind = instr->elements_kind();
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  bool val_is_temp_register = array_type == kExternalPixelArray ||
-      array_type == kExternalFloatArray;
+  bool val_is_temp_register =
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
@@ -1941,9 +1977,16 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
   LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
@@ -1984,7 +2027,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2049,13 +2093,14 @@
 }
 
 
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
-  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
-  return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2091,7 +2136,12 @@
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  return MarkAsCall(new LStackCheck, instr);
+  if (instr->is_function_entry()) {
+    return MarkAsCall(new LStackCheck, instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+  }
 }
 
 
@@ -2100,8 +2150,8 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2114,6 +2164,15 @@
   return NULL;
 }
 
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LOperand* object = UseOrConstantAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 8e12282..d169bf6 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -32,6 +32,7 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
@@ -73,15 +74,15 @@
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
-  V(ClassOfTest)                                \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTestAndBranch)                       \
-  V(CmpID)                                      \
+  V(CmpConstantEqAndBranch)                     \
   V(CmpIDAndBranch)                             \
-  V(CmpJSObjectEq)                              \
-  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpObjectEqAndBranch)                       \
   V(CmpMapAndBranch)                            \
   V(CmpT)                                       \
-  V(CmpTAndBranch)                              \
   V(ConstantD)                                  \
   V(ConstantI)                                  \
   V(ConstantT)                                  \
@@ -90,43 +91,42 @@
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
-  V(ExternalArrayLength)                        \
-  V(FixedArrayLength)                           \
+  V(ElementsKind)                               \
+  V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
-  V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
-  V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
-  V(IsNull)                                     \
+  V(InvokeFunction)                             \
+  V(IsConstructCallAndBranch)                   \
   V(IsNullAndBranch)                            \
-  V(IsObject)                                   \
   V(IsObjectAndBranch)                          \
-  V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
+  V(LoadKeyedFastDoubleElement)                 \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
-  V(LoadFunctionPrototype)                      \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -147,42 +147,38 @@
   V(StoreContextSlot)                           \
   V(StoreGlobalCell)                            \
   V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
   V(SubI)                                       \
   V(TaggedToI)                                  \
-  V(ToFastProperties)                           \
+  V(ThisFunction)                               \
   V(Throw)                                      \
+  V(ToFastProperties)                           \
   V(Typeof)                                     \
-  V(TypeofIs)                                   \
   V(TypeofIsAndBranch)                          \
-  V(IsConstructCall)                            \
-  V(IsConstructCallAndBranch)                   \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -205,13 +201,27 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
-  virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
@@ -266,37 +276,6 @@
 };
 
 
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
-  OperandContainer() {
-    for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
-  }
-  int length() { return NumElements; }
-  ElementType& operator[](int i) {
-    ASSERT(i < length());
-    return elems_[i];
-  }
-  void PrintOperandsTo(StringStream* stream);
-
- private:
-  ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
-  int length() { return 0; }
-  void PrintOperandsTo(StringStream* stream) { }
-  ElementType& operator[](int i) {
-    UNREACHABLE();
-    static ElementType t = 0;
-    return t;
-  }
-};
-
-
 // R = number of result operands (0 or 1).
 // I = number of input operands.
 // T = number of temporary operands.
@@ -319,9 +298,9 @@
   virtual void PrintOutputOperandTo(StringStream* stream);
 
  protected:
-  OperandContainer<LOperand*, R> results_;
-  OperandContainer<LOperand*, I> inputs_;
-  OperandContainer<LOperand*, T> temps_;
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
 };
 
 
@@ -335,8 +314,13 @@
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
   virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -366,21 +350,26 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
-  LGoto(int block_id, bool include_stack_check = false)
-    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+  explicit LGoto(int block_id) : block_id_(block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
   virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int block_id() const { return block_id_; }
-  bool include_stack_check() const { return include_stack_check_; }
 
  private:
   int block_id_;
-  bool include_stack_check_;
 };
 
 
@@ -454,19 +443,17 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
-  int true_block_id() const { return true_block_id_; }
-  int false_block_id() const { return false_block_id_; }
-  void SetBranchTargets(int true_block_id, int false_block_id) {
-    true_block_id_ = true_block_id;
-    false_block_id_ = false_block_id;
-  }
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
 
  private:
-  int true_block_id_;
-  int false_block_id_;
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
 };
 
 
@@ -565,23 +552,6 @@
 };
 
 
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
-  LCmpID(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
-
-  Token::Value op() const { return hydrogen()->token(); }
-  bool is_double() const {
-    return hydrogen()->GetInputRepresentation().IsDouble();
-  }
-};
-
-
 class LCmpIDAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -590,7 +560,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
 
   Token::Value op() const { return hydrogen()->token(); }
   bool is_double() const {
@@ -615,39 +585,27 @@
 };
 
 
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
-  LCmpJSObjectEq(LOperand* left, LOperand* right) {
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
 };
 
 
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
  public:
-  LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
     inputs_[0] = left;
-    inputs_[1] = right;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
-                               "cmp-jsobject-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsNull(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
 };
 
 
@@ -659,7 +617,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNull)
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
   bool is_strict() const { return hydrogen()->is_strict(); }
 
@@ -667,16 +625,6 @@
 };
 
 
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsObject(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
 class LIsObjectAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsObjectAndBranch(LOperand* value) {
@@ -684,22 +632,12 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LIsSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
-  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -707,19 +645,24 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
  public:
-  explicit LHasInstanceType(LOperand* value) {
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
 };
 
 
@@ -731,7 +674,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
                                "has-instance-type-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -748,17 +691,6 @@
 };
 
 
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LHasCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
 class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -767,19 +699,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
-  LClassOfTest(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -794,7 +714,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
                                "class-of-test-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -808,23 +728,23 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
   Token::Value op() const { return hydrogen()->token(); }
 };
 
 
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
+class LIn: public LTemplateInstruction<1, 2, 0> {
  public:
-  LCmpTAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(Compare)
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
 
-  Token::Value op() const { return hydrogen()->token(); }
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
 };
 
 
@@ -839,17 +759,6 @@
 };
 
 
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
-  LInstanceOfAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
 class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -965,7 +874,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-  DECLARE_HYDROGEN_ACCESSOR(Value)
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1003,25 +912,26 @@
 };
 
 
-class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LExternalArrayLength(LOperand* value) {
+  explicit LFixedArrayBaseLength(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
 };
 
 
-class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LFixedArrayLength(LOperand* value) {
+  explicit LElementsKind(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
 };
 
 
@@ -1090,6 +1000,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1106,6 +1017,7 @@
     inputs_[1] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1213,6 +1125,22 @@
 };
 
 
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1227,8 +1155,8 @@
 
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
@@ -1345,6 +1273,11 @@
 };
 
 
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
 class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1393,6 +1326,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1521,7 +1471,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1536,7 +1486,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
 };
@@ -1642,6 +1592,28 @@
 };
 
 
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1659,8 +1631,8 @@
   LOperand* external_pointer() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  ExternalArrayType array_type() const {
-    return hydrogen()->array_type();
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
   }
 };
 
@@ -1685,6 +1657,21 @@
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1783,6 +1770,47 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LClampTToUint8(LOperand* value,
+                 LOperand* temp,
+                 LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
@@ -1844,21 +1872,6 @@
 };
 
 
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LTypeofIs(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
-  Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
@@ -1866,7 +1879,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
@@ -1874,13 +1887,6 @@
 };
 
 
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
-  DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
 class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -1889,6 +1895,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
                                "is-construct-call-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
 };
 
 
@@ -1932,6 +1939,12 @@
 class LStackCheck: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
 };
 
 
@@ -2111,14 +2124,18 @@
   template<int I, int T>
       LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
                                       XMMRegister reg);
+  // Assigns an environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
   LInstruction* AssignEnvironment(LInstruction* instr);
+  // Assigns a pointer map to an instruction.  An instruction which can
+  // trigger a GC or a lazy deoptimization must have a pointer map.
   LInstruction* AssignPointerMap(LInstruction* instr);
 
   enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
 
-  // By default we assume that instruction sequences generated for calls
-  // cannot deoptimize eagerly and we do not attach environment to this
-  // instruction.
+  // Marks a call for the register allocator.  Assigns a pointer map to
+  // support GC and lazy deoptimization.  Assigns an environment to support
+  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
   LInstruction* MarkAsCall(
       LInstruction* instr,
       HInstruction* hinstr,
@@ -2158,7 +2175,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::int
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 7f027f7..8fcad23 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -201,8 +201,8 @@
                                        Register scratch) {
   if (emit_debug_code()) {
     // Check that the object is not in new space.
-    NearLabel not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
   }
@@ -221,6 +221,42 @@
 }
 
 
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance near_jump) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch, near_jump);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+    j(cc, branch, near_jump);
+  }
+}
+
+
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -287,8 +323,8 @@
   Label done;
 
   if (emit_debug_code()) {
-    NearLabel okay;
-    JumpIfNotSmi(object, &okay);
+    Label okay;
+    JumpIfNotSmi(object, &okay, Label::kNear);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
 
@@ -344,13 +380,16 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    NearLabel ok;
+    Label ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
+    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+                Heap::kFixedDoubleArrayMapRootIndex);
+    j(equal, &ok, Label::kNear);
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedCOWArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
   }
@@ -358,8 +397,8 @@
 
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
-  NearLabel L;
-  j(cc, &L);
+  Label L;
+  j(cc, &L, Label::kNear);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -371,9 +410,9 @@
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
     ASSERT(IsPowerOf2(frame_alignment));
-    NearLabel alignment_as_expected;
+    Label alignment_as_expected;
     testq(rsp, Immediate(frame_alignment_mask));
-    j(zero, &alignment_as_expected);
+    j(zero, &alignment_as_expected, Label::kNear);
     // Abort if stack is not aligned.
     int3();
     bind(&alignment_as_expected);
@@ -384,9 +423,9 @@
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
-  NearLabel ok;
+  Label ok;
   testl(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   testl(op, op);
   j(sign, then_label);
   bind(&ok);
@@ -425,9 +464,9 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
   ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
@@ -650,6 +689,7 @@
   Label leave_exit_frame;
   Label write_back;
 
+  Factory* factory = isolate()->factory();
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -697,7 +737,7 @@
 
   // Check if the function scheduled an exception.
   movq(rsi, scheduled_exception_address);
-  Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
+  Cmp(Operand(rsi, 0), factory->the_hole_value());
   j(not_equal, &promote_scheduled_exception);
 
   LeaveApiExitFrame();
@@ -712,7 +752,7 @@
 
   bind(&empty_result);
   // It was zero; the result is undefined.
-  Move(rax, FACTORY->undefined_value());
+  Move(rax, factory->undefined_value());
   jmp(&prologue);
 
   // HandleScope limit has changed. Delete allocated extensions.
@@ -754,7 +794,7 @@
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
-                                   CallWrapper* call_wrapper) {
+                                   const CallWrapper& call_wrapper) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -763,7 +803,7 @@
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
   GetBuiltinEntry(rdx, id);
-  InvokeCode(rdx, expected, expected, flag, call_wrapper);
+  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
 
@@ -831,8 +871,8 @@
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
     } else {
-      NearLabel ok;
-      j(equal, &ok);
+      Label ok;
+      j(equal, &ok, Label::kNear);
       int3();
       bind(&ok);
     }
@@ -883,7 +923,7 @@
 
 
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   if (!dst.is(src)) {
     movl(dst, src);
   }
@@ -894,8 +934,8 @@
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   if (emit_debug_code()) {
     testb(dst, Immediate(0x01));
-    NearLabel ok;
-    j(zero, &ok);
+    Label ok;
+    j(zero, &ok, Label::kNear);
     if (allow_stub_calls()) {
       Abort("Integer32ToSmiField writing to non-smi location");
     } else {
@@ -921,7 +961,7 @@
 
 
 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   if (!dst.is(src)) {
     movq(dst, src);
   }
@@ -935,7 +975,7 @@
 
 
 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   if (!dst.is(src)) {
     movq(dst, src);
   }
@@ -1052,22 +1092,40 @@
 }
 
 
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 Label* on_not_smis,
+                                 Label::Distance near_jump) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis, near_jump);
+  }
+}
+
+
 Condition MacroAssembler::CheckSmi(Register src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   testb(src, Immediate(kSmiTagMask));
   return zero;
 }
 
 
 Condition MacroAssembler::CheckSmi(const Operand& src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   testb(src, Immediate(kSmiTagMask));
   return zero;
 }
 
 
 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   // Test that both bits of the mask 0x8000000000000001 are zero.
   movq(kScratchRegister, src);
   rol(kScratchRegister, Immediate(1));
@@ -1080,7 +1138,7 @@
   if (first.is(second)) {
     return CheckSmi(first);
   }
-  ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
+  STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
   leal(kScratchRegister, Operand(first, second, times_1, 0));
   testb(kScratchRegister, Immediate(0x03));
   return zero;
@@ -1162,6 +1220,95 @@
 }
 
 
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+                                            Label* on_invalid,
+                                            Label::Distance near_jump) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                Label* on_invalid,
+                                                Label::Distance near_jump) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register src,
+                               Label* on_smi,
+                               Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src,
+                                  Label* on_not_smi,
+                                  Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+    Register src, Label* on_not_smi_or_negative,
+    Label::Distance near_jump) {
+  Condition non_negative_smi = CheckNonNegativeSmi(src);
+  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             Label* on_equals,
+                                             Label::Distance near_jump) {
+  SmiCompare(src, constant);
+  j(equal, on_equals, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      Label* on_not_both_smi,
+                                      Label::Distance near_jump) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+                                                  Register src2,
+                                                  Label* on_not_both_smi,
+                                                  Label::Distance near_jump) {
+  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       Smi* constant,
+                                       Label* on_not_smi_result,
+                                       Label::Distance near_jump) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+
+  JumpIfNotSmi(src, on_not_smi_result, near_jump);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  LoadSmiConstant(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result, near_jump);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1218,6 +1365,30 @@
 }
 
 
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    LoadSmiConstant(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1242,17 +1413,148 @@
 }
 
 
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result, near_jump);
+      movq(dst, kScratchRegister);
+    }
+  } else {
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result, near_jump);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            Label* on_smi_result,
+                            Label::Distance near_jump) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result, near_jump);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    ASSERT(!src2.AddressUsesRegister(dst));
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiAdd(Register dst,
                             Register src1,
                             Register src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible.
-  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movq(dst, src1);
+    if (emit_debug_code()) {
+      movq(kScratchRegister, src1);
+      addq(kScratchRegister, src2);
+      Check(no_overflow, "Smi addition overflow");
+    }
+    lea(dst, Operand(src1, src2, times_1, 0));
+  } else {
+    addq(dst, src2);
+    Assert(no_overflow, "Smi addition overflow");
   }
-  addq(dst, src2);
-  Assert(no_overflow, "Smi addition overflow");
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
 }
 
 
@@ -1270,6 +1572,25 @@
 
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(src1, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
                             const Operand& src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible (e.g., subtracting two positive smis).
@@ -1281,6 +1602,180 @@
 }
 
 
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+
+  if (dst.is(src1)) {
+    Label failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure, Label::kNear);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    // Result was positive zero.
+    j(positive, &zero_correct_result, Label::kNear);
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+
+    bind(&zero_correct_result);
+    Set(dst, 0);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
+    movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result, near_jump);
+    bind(&correct_result);
+  }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
+
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  Label safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div, Label::kNear);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+  } else {
+    j(negative, on_not_smi_result, near_jump);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
+  idivl(src2);
+  Integer32ToSmi(src2, src2);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  if (src1.is(rax)) {
+    Label smi_result;
+    j(zero, &smi_result, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result, near_jump);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
+
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  Label safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div, Label::kNear);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div, Label::kNear);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result, near_jump);
+  bind(&safe_div);
+
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  Label smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result, Label::kNear);
+  testq(src1, src1);
+  j(negative, on_not_smi_result, near_jump);
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
+}
+
+
 void MacroAssembler::SmiNot(Register dst, Register src) {
   ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src.is(kScratchRegister));
@@ -1387,11 +1882,28 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRightConstant(
+    Register dst, Register src, int shift_value,
+    Label* on_not_smi_result, Label::Distance near_jump) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
+      j(negative, on_not_smi_result, near_jump);
+    }
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
+  }
+}
+
+
 void MacroAssembler::SmiShiftLeft(Register dst,
                                   Register src1,
                                   Register src2) {
   ASSERT(!dst.is(rcx));
-  NearLabel result_ok;
   // Untag shift amount.
   if (!dst.is(src1)) {
     movq(dst, src1);
@@ -1403,6 +1915,45 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          Label* on_not_smi_result,
+                                          Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(rcx));
+  // dst and src1 can be the same, because the one case that bails out
+  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    Label positive_result;
+    j(positive, &positive_result, Label::kNear);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result, near_jump);
+    bind(&positive_result);
+  } else {
+    // src2 was zero and src1 negative.
+    j(negative, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
@@ -1430,6 +1981,45 @@
 }
 
 
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smis,
+                                  Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
+#endif
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
+  j(not_zero, on_not_smis, near_jump);
+
+  // Exactly one operand is a smi.
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1471,6 +2061,97 @@
 }
 
 
+void MacroAssembler::JumpIfNotString(Register object,
+                                     Register object_map,
+                                     Label* not_string,
+                                     Label::Distance near_jump) {
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, not_string, near_jump);
+  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+  j(above_equal, not_string, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
+    Register first_object,
+    Register second_object,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Check that both objects are not smis.
+  Condition either_smi = CheckEitherSmi(first_object, second_object);
+  j(either_smi, on_fail, near_jump);
+
+  // Load instance type for both strings.
+  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    Label* failure,
+    Label::Distance near_jump) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure, near_jump);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
 
 void MacroAssembler::Move(Register dst, Register src) {
   if (!dst.is(src)) {
@@ -1604,12 +2285,14 @@
 }
 
 
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object,
+                          RelocInfo::Mode rmode,
+                          unsigned ast_id) {
 #ifdef DEBUG
   int end_position = pc_offset() + CallSize(code_object);
 #endif
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  call(code_object, rmode);
+  call(code_object, rmode, ast_id);
 #ifdef DEBUG
   CHECK_EQ(end_position, pc_offset());
 #endif
@@ -1704,18 +2387,15 @@
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
   // The pc (return address) is already on TOS.  This code pushes state,
-  // frame pointer and current handler.  Check that they are expected
-  // next on the stack, in that order.
-  ASSERT_EQ(StackHandlerConstants::kStateOffset,
-            StackHandlerConstants::kPCOffset - kPointerSize);
-  ASSERT_EQ(StackHandlerConstants::kFPOffset,
-            StackHandlerConstants::kStateOffset - kPointerSize);
-  ASSERT_EQ(StackHandlerConstants::kNextOffset,
-            StackHandlerConstants::kFPOffset - kPointerSize);
-
+  // frame pointer, context, and current handler.
   if (try_location == IN_JAVASCRIPT) {
     if (type == TRY_CATCH_HANDLER) {
       push(Immediate(StackHandler::TRY_CATCH));
@@ -1723,6 +2403,7 @@
       push(Immediate(StackHandler::TRY_FINALLY));
     }
     push(rbp);
+    push(rsi);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
     // The frame pointer does not point to a JS frame so we save NULL
@@ -1730,10 +2411,11 @@
     // before dereferencing it to restore the context.
     push(Immediate(StackHandler::ENTRY));
     push(Immediate(0));  // NULL frame pointer.
+    Push(Smi::FromInt(0));  // No context.
   }
   // Save the current handler.
   Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
   push(handler_operand);
   // Link this handler.
   movq(handler_operand, rsp);
@@ -1744,7 +2426,7 @@
   ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
   // Unlink this handler.
   Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
   pop(handler_operand);
   // Remove the remaining fields.
   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
@@ -1752,54 +2434,64 @@
 
 
 void MacroAssembler::Throw(Register value) {
-  // Check that stack should contain next handler, frame pointer, state and
-  // return address in that order.
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
-            StackHandlerConstants::kStateOffset);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
-            StackHandlerConstants::kPCOffset);
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // Keep thrown value in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
 
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   Operand handler_operand = ExternalOperand(handler_address);
   movq(rsp, handler_operand);
   // get next in chain
   pop(handler_operand);
-  pop(rbp);  // pop frame pointer
-  pop(rdx);  // remove state
+  pop(rsi);  // Context.
+  pop(rbp);  // Frame pointer.
+  pop(rdx);  // State.
 
-  // Before returning we restore the context from the frame pointer if not NULL.
-  // The frame pointer is NULL in the exception handler of a JS entry frame.
-  Set(rsi, 0);  // Tentatively set context pointer to NULL
-  NearLabel skip;
-  cmpq(rbp, Immediate(0));
-  j(equal, &skip);
-  movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // If the handler is a JS frame, restore the context to the frame.
+  // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
+  // of them.
+  Label skip;
+  cmpq(rdx, Immediate(StackHandler::ENTRY));
+  j(equal, &skip, Label::kNear);
+  movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   bind(&skip);
+
   ret(0);
 }
 
 
 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
                                       Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
   // Keep thrown value in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
   // Fetch top stack handler.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   Load(rsp, handler_address);
 
   // Unwind the handlers until the ENTRY handler is found.
-  NearLabel loop, done;
+  Label loop, done;
   bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
   cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done);
+  j(equal, &done, Label::kNear);
   // Fetch the next handler in the list.
   const int kNextOffset = StackHandlerConstants::kNextOffset;
   movq(rsp, Operand(rsp, kNextOffset));
@@ -1813,30 +2505,24 @@
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
     ExternalReference external_caught(
-        Isolate::k_external_caught_exception_address, isolate());
+        Isolate::kExternalCaughtExceptionAddress, isolate());
     Set(rax, static_cast<int64_t>(false));
     Store(external_caught, rax);
 
     // Set pending exception and rax to out of memory exception.
-    ExternalReference pending_exception(Isolate::k_pending_exception_address,
+    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                         isolate());
     movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
     Store(pending_exception, rax);
   }
 
-  // Clear the context pointer.
+  // Discard the context saved in the handler and clear the context pointer.
+  pop(rdx);
   Set(rsi, 0);
 
-  // Restore registers from handler.
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
-                StackHandlerConstants::kFPOffset);
-  pop(rbp);  // FP
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
-                StackHandlerConstants::kStateOffset);
-  pop(rdx);  // State
+  pop(rbp);  // Restore frame pointer.
+  pop(rdx);  // Discard state.
 
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
-                StackHandlerConstants::kPCOffset);
   ret(0);
 }
 
@@ -1878,11 +2564,21 @@
 }
 
 
+void MacroAssembler::CheckFastElements(Register map,
+                                       Label* fail,
+                                       Label::Distance distance) {
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastElementValue));
+  j(above, fail, distance);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
@@ -1890,19 +2586,75 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register reg) {
+  Label done;
+  testl(reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(negative, reg);  // 1 if negative, 0 if positive.
+  decb(reg);  // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+                                        XMMRegister temp_xmm_reg,
+                                        Register result_reg,
+                                        Register temp_reg) {
+  Label done;
+  Set(result_reg, 0);
+  xorps(temp_xmm_reg, temp_xmm_reg);
+  ucomisd(input_reg, temp_xmm_reg);
+  j(below, &done, Label::kNear);
+  uint64_t one_half = BitCast<uint64_t, double>(0.5);
+  Set(temp_reg, one_half);
+  movq(temp_xmm_reg, temp_reg);
+  addsd(temp_xmm_reg, input_reg);
+  cvttsd2si(result_reg, temp_xmm_reg);
+  testl(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  Set(result_reg, 255);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  movq(descriptors, FieldOperand(map,
+                                 Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+  Move(descriptors, isolate()->factory()->empty_descriptor_array());
+  bind(&not_smi);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  j(equal, success, RelocInfo::CODE_TARGET);
+
+  bind(&fail);
+}
+
+
 void MacroAssembler::AbortIfNotNumber(Register object) {
-  NearLabel ok;
+  Label ok;
   Condition is_smi = CheckSmi(object);
-  j(is_smi, &ok);
+  j(is_smi, &ok, Label::kNear);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
-      FACTORY->heap_number_map());
+      isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
   bind(&ok);
 }
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(NegateCondition(is_smi), "Operand is a smi");
 }
@@ -1947,7 +2699,7 @@
                                              Register instance_type) {
   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  ASSERT(kNotStringTag != 0);
+  STATIC_ASSERT(kNotStringTag != 0);
   testb(instance_type, Immediate(kIsNotStringMask));
   return zero;
 }
@@ -1965,10 +2717,10 @@
   j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
         Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance);
+  j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   movq(result,
@@ -1981,13 +2733,13 @@
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  j(not_equal, &done);
+  j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   movq(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
+  jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in initial map.
@@ -2044,25 +2796,44 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be rcx to
+  // follow the calling convention which requires the call type to be
+  // in rcx.
+  ASSERT(dst.is(rcx));
+  if (call_kind == CALL_AS_FUNCTION) {
+    LoadSmiConstant(dst, Smi::FromInt(1));
+  } else {
+    LoadSmiConstant(dst, Smi::FromInt(0));
+  }
+}
+
+
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     call(code);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     jmp(code);
   }
   bind(&done);
@@ -2074,8 +2845,9 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
@@ -2083,13 +2855,17 @@
                  dummy,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     Call(code, rmode);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     Jump(code, rmode);
   }
   bind(&done);
@@ -2099,7 +2875,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2110,14 +2887,15 @@
   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(rbx);
-  InvokeCode(rdx, expected, actual, flag, call_wrapper);
+  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   Move(rdi, Handle<JSFunction>(function));
@@ -2128,7 +2906,7 @@
     // the Code object every time we call the function.
     movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
     ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(rdx, expected, actual, flag, call_wrapper);
+    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   } else {
     // Invoke the cached code.
     Handle<Code> code(function->code());
@@ -2138,7 +2916,79 @@
                actual,
                RelocInfo::CODE_TARGET,
                flag,
-               call_wrapper);
+               call_wrapper,
+               call_kind);
+  }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    Label* done,
+                                    InvokeFlag flag,
+                                    Label::Distance near_jump,
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      Set(rax, actual.immediate());
+      if (expected.immediate() ==
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        Set(rbx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke, Label::kNear);
+      ASSERT(expected.reg().is(rbx));
+      Set(rax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke, Label::kNear);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      SetCallKind(rcx, call_kind);
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      call_wrapper.AfterCall();
+      jmp(done, near_jump);
+    } else {
+      SetCallKind(rcx, call_kind);
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
   }
 }
 
@@ -2152,7 +3002,7 @@
   push(kScratchRegister);
   if (emit_debug_code()) {
     movq(kScratchRegister,
-         FACTORY->undefined_value(),
+         isolate()->factory()->undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
     cmpq(Operand(rsp, 0), kScratchRegister);
     Check(not_equal, "code object not properly patched");
@@ -2191,8 +3041,8 @@
     movq(r14, rax);  // Backup rax in callee-save register.
   }
 
-  Store(ExternalReference(Isolate::k_c_entry_fp_address, isolate()), rbp);
-  Store(ExternalReference(Isolate::k_context_address, isolate()), rsi);
+  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
+  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
 }
 
 
@@ -2282,7 +3132,7 @@
 
 void MacroAssembler::LeaveExitFrameEpilogue() {
   // Restore current context from top and clear it in debug mode.
-  ExternalReference context_address(Isolate::k_context_address, isolate());
+  ExternalReference context_address(Isolate::kContextAddress, isolate());
   Operand context_operand = ExternalOperand(context_address);
   movq(rsi, context_operand);
 #ifdef DEBUG
@@ -2290,7 +3140,7 @@
 #endif
 
   // Clear the top frame.
-  ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
                                        isolate());
   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
   movq(c_entry_fp_operand, Immediate(0));
@@ -2320,7 +3170,7 @@
   // Check the context is a global context.
   if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        FACTORY->global_context_map());
+        isolate()->factory()->global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
   }
 
@@ -2360,6 +3210,122 @@
 }
 
 
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiToInteger32(scratch, scratch);
+
+  // Xor original key with a seed.
+  xorl(r0, scratch);
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  movl(scratch, r0);
+  notl(r0);
+  shll(scratch, Immediate(15));
+  addl(r0, scratch);
+  // hash = hash ^ (hash >> 12);
+  movl(scratch, r0);
+  shrl(scratch, Immediate(12));
+  xorl(r0, scratch);
+  // hash = hash + (hash << 2);
+  leal(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  movl(scratch, r0);
+  shrl(scratch, Immediate(4));
+  xorl(r0, scratch);
+  // hash = hash * 2057;
+  imull(r0, r0, Immediate(2057));
+  // hash = hash ^ (hash >> 16);
+  movl(scratch, r0);
+  shrl(scratch, Immediate(16));
+  xorl(r0, scratch);
+}
+
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+                                              Register elements,
+                                              Register key,
+                                              Register r0,
+                                              Register r1,
+                                              Register r2,
+                                              Register result) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // Scratch registers:
+  //
+  // r0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // r1 - used to hold the capacity mask of the dictionary
+  //
+  // r2 - used for the index into the dictionary.
+  //
+  // result - holds the result on exit if the load succeeded.
+  //          Allowed to be the same as 'key' or 'result'.
+  //          Unchanged on bailout so 'key' or 'result' can be used
+  //          in further computation.
+
+  Label done;
+
+  GetNumberHash(r0, r1);
+
+  // Compute capacity mask.
+  SmiToInteger32(r1, FieldOperand(elements,
+                                  SeededNumberDictionary::kCapacityOffset));
+  decl(r1);
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use r2 for index calculations and keep the hash intact in r0.
+    movq(r2, r0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(r2, r1);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
+
+    // Check if the key matches.
+    cmpq(key, FieldOperand(elements,
+                           r2,
+                           times_pointer_size,
+                           SeededNumberDictionary::kElementsStartOffset));
+    if (i != (kProbes - 1)) {
+      j(equal, &done);
+    } else {
+      j(not_equal, miss);
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a normal propety.
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ASSERT_EQ(NORMAL, 0);
+  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+       Smi::FromInt(PropertyDetails::TypeField::kMask));
+  j(not_zero, miss);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
 void MacroAssembler::LoadAllocationTopHelper(Register result,
                                              Register scratch,
                                              AllocationFlags flags) {
@@ -2670,7 +3636,7 @@
 }
 
 
-void MacroAssembler::AllocateConsString(Register result,
+void MacroAssembler::AllocateTwoByteConsString(Register result,
                                         Register scratch1,
                                         Register scratch2,
                                         Label* gc_required) {
@@ -2706,6 +3672,42 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                          Register scratch1,
+                                          Register scratch2,
+                                          Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(SlicedString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
 // long or aligned copies.  The contents of scratch and length are destroyed.
 // Destination is incremented by length, source, length and scratch are
@@ -2773,15 +3775,10 @@
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
-    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
-    // Load the function context (which is the incoming, outer context).
-    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+    movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     for (int i = 1; i < context_chain_length; i++) {
-      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
-      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+      movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
     }
-    // The context may be an intermediate context, not a function context.
-    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   } else {
     // Slot is in the current function context.  Move it into the
     // destination register in case we store into it (the write barrier
@@ -2789,14 +3786,14 @@
     movq(dst, rsi);
   }
 
-  // We should not have found a 'with' context by walking the context chain
-  // (i.e., the static scope chain and runtime context chain do not agree).
-  // A variable occurring in such a scope should have slot type LOOKUP and
-  // not CONTEXT.
+  // We should not have found a with context by walking the context
+  // chain (i.e., the static scope chain and runtime context chain do
+  // not agree).  A variable occurring in such a scope should have
+  // slot type LOOKUP and not CONTEXT.
   if (emit_debug_code()) {
-    cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-    Check(equal, "Yo dawg, I heard you liked function contexts "
-                 "so I put function contexts in all your contexts");
+    CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+                Heap::kWithContextMapRootIndex);
+    Check(not_equal, "Variable resolved to with context.");
   }
 }
 
@@ -2822,7 +3819,7 @@
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, FACTORY->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4c17720..ff6edc5 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -44,6 +45,7 @@
   RESULT_CONTAINS_TOP = 1 << 1
 };
 
+
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
@@ -61,7 +63,6 @@
 
 // Forward declaration.
 class JumpTarget;
-class CallWrapper;
 
 struct SmiIndex {
   SmiIndex(Register index_register, ScaleFactor scale)
@@ -146,11 +147,11 @@
   // Check if object is in new space. The condition cc can be equal or
   // not_equal. If it is equal a jump will be done if the object is on new
   // space. The register scratch can be object itself, but it will be clobbered.
-  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,
-                  LabelType* branch);
+                  Label* branch,
+                  Label::Distance near_jump = Label::kFar);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -240,37 +241,46 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in rcx. The method takes rcx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeFlag flag,
-                     CallWrapper* call_wrapper = NULL);
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -327,11 +337,11 @@
   // If either argument is not a smi, jump to on_not_smis and retain
   // the original values of source registers. The destination register
   // may be changed if it's not one of the source registers.
-  template <typename LabelType>
   void SmiOrIfSmis(Register dst,
                    Register src1,
                    Register src2,
-                   LabelType* on_not_smis);
+                   Label* on_not_smis,
+                   Label::Distance near_jump = Label::kFar);
 
 
   // Simple comparison of smis.  Both sides must be known smis to use these,
@@ -389,42 +399,45 @@
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
-  template <typename LabelType>
-  void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
+  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
+                              Label::Distance near_jump = Label::kFar);
 
   // Jump if the unsigned integer value cannot be represented by a smi.
-  template <typename LabelType>
-  void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
+  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
+                                  Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is a tagged smi.
-  template <typename LabelType>
-  void JumpIfSmi(Register src, LabelType* on_smi);
+  void JumpIfSmi(Register src,
+                 Label* on_smi,
+                 Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is not a tagged smi.
-  template <typename LabelType>
-  void JumpIfNotSmi(Register src, LabelType* on_not_smi);
+  void JumpIfNotSmi(Register src,
+                    Label* on_not_smi,
+                    Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is not a non-negative tagged smi.
-  template <typename LabelType>
-  void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
+  void JumpUnlessNonNegativeSmi(Register src,
+                                Label* on_not_smi,
+                                Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  template <typename LabelType>
   void JumpIfSmiEqualsConstant(Register src,
                                Smi* constant,
-                               LabelType* on_equals);
+                               Label* on_equals,
+                               Label::Distance near_jump = Label::kFar);
 
   // Jump if either or both register are not smi values.
-  template <typename LabelType>
   void JumpIfNotBothSmi(Register src1,
                         Register src2,
-                        LabelType* on_not_both_smi);
+                        Label* on_not_both_smi,
+                        Label::Distance near_jump = Label::kFar);
 
   // Jump if either or both register are not non-negative smi values.
-  template <typename LabelType>
   void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
-                                    LabelType* on_not_both_smi);
+                                    Label* on_not_both_smi,
+                                    Label::Distance near_jump = Label::kFar);
 
   // Operations on tagged smi values.
 
@@ -434,11 +447,11 @@
   // Optimistically adds an integer constant to a supposed smi.
   // If the src is not a smi, or the result is not a smi, jump to
   // the label.
-  template <typename LabelType>
   void SmiTryAddConstant(Register dst,
                          Register src,
                          Smi* constant,
-                         LabelType* on_not_smi_result);
+                         Label* on_not_smi_result,
+                         Label::Distance near_jump = Label::kFar);
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result.
   // No overflow testing on the result is done.
@@ -450,11 +463,11 @@
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
-  template <typename LabelType>
   void SmiAddConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      LabelType* on_not_smi_result);
+                      Label* on_not_smi_result,
+                      Label::Distance near_jump = Label::kFar);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result. No testing on the result is done. Sets the N and Z flags
@@ -463,32 +476,32 @@
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
-  template <typename LabelType>
   void SmiSubConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      LabelType* on_not_smi_result);
+                      Label* on_not_smi_result,
+                      Label::Distance near_jump = Label::kFar);
 
   // Negating a smi can give a negative zero or too large positive value.
   // NOTICE: This operation jumps on success, not failure!
-  template <typename LabelType>
   void SmiNeg(Register dst,
               Register src,
-              LabelType* on_smi_result);
+              Label* on_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiAdd(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
-  template <typename LabelType>
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
   void SmiAdd(Register dst,
               Register src1,
               const Operand& src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiAdd(Register dst,
               Register src1,
@@ -497,21 +510,21 @@
   // Subtracts smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiSub(Register dst,
               Register src1,
               Register src2);
 
-  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               const Operand& src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiSub(Register dst,
               Register src1,
@@ -521,27 +534,27 @@
   // if possible.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiMul(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Divides one smi by another and returns the quotient.
   // Clobbers rax and rdx registers.
-  template <typename LabelType>
   void SmiDiv(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Divides one smi by another and returns the remainder.
   // Clobbers rax and rdx registers.
-  template <typename LabelType>
   void SmiMod(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Bitwise operations.
   void SmiNot(Register dst, Register src);
@@ -555,11 +568,11 @@
   void SmiShiftLeftConstant(Register dst,
                             Register src,
                             int shift_value);
-  template <typename LabelType>
   void SmiShiftLogicalRightConstant(Register dst,
                                   Register src,
                                   int shift_value,
-                                  LabelType* on_not_smi_result);
+                                  Label* on_not_smi_result,
+                                  Label::Distance near_jump = Label::kFar);
   void SmiShiftArithmeticRightConstant(Register dst,
                                        Register src,
                                        int shift_value);
@@ -572,11 +585,11 @@
   // Shifts a smi value to the right, shifting in zero bits at the top, and
   // returns the unsigned intepretation of the result if that is a smi.
   // Uses and clobbers rcx, so dst may not be rcx.
-  template <typename LabelType>
   void SmiShiftLogicalRight(Register dst,
                             Register src1,
                             Register src2,
-                            LabelType* on_not_smi_result);
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump = Label::kFar);
   // Shifts a smi value to the right, sign extending the top, and
   // returns the signed intepretation of the result. That will always
   // be a valid smi value, since it's numerically smaller than the
@@ -590,11 +603,11 @@
 
   // Select the non-smi register of two registers where exactly one is a
   // smi. If neither are smis, jump to the failure label.
-  template <typename LabelType>
   void SelectNonSmi(Register dst,
                     Register src1,
                     Register src2,
-                    LabelType* on_not_smis);
+                    Label* on_not_smis,
+                    Label::Distance near_jump = Label::kFar);
 
   // Converts, if necessary, a smi to a combination of number and
   // multiplier to be used as a scaled index.
@@ -630,35 +643,36 @@
   // String macros.
 
   // If object is a string, its map is loaded into object_map.
-  template <typename LabelType>
   void JumpIfNotString(Register object,
                        Register object_map,
-                       LabelType* not_string);
+                       Label* not_string,
+                       Label::Distance near_jump = Label::kFar);
 
 
-  template <typename LabelType>
-  void JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                           Register second_object,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           LabelType* on_not_both_flat_ascii);
+  void JumpIfNotBothSequentialAsciiStrings(
+      Register first_object,
+      Register second_object,
+      Register scratch1,
+      Register scratch2,
+      Label* on_not_both_flat_ascii,
+      Label::Distance near_jump = Label::kFar);
 
   // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
-  template <typename LabelType>
   void JumpIfInstanceTypeIsNotSequentialAscii(
       Register instance_type,
       Register scratch,
-      LabelType *on_not_flat_ascii_string);
+      Label*on_not_flat_ascii_string,
+      Label::Distance near_jump = Label::kFar);
 
-  template <typename LabelType>
   void JumpIfBothInstanceTypesAreNotSequentialAscii(
       Register first_object_instance_type,
       Register second_object_instance_type,
       Register scratch1,
       Register scratch2,
-      LabelType* on_fail);
+      Label* on_fail,
+      Label::Distance near_jump = Label::kFar);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -692,7 +706,9 @@
 
   void Call(Address destination, RelocInfo::Mode rmode);
   void Call(ExternalReference ext);
-  void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+  void Call(Handle<Code> code_object,
+            RelocInfo::Mode rmode,
+            unsigned ast_id = kNoASTId);
 
   // The size of the code generated for different call instructions.
   int CallSize(Address destination, RelocInfo::Mode rmode) {
@@ -738,13 +754,27 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map,
+                         Label* fail,
+                         Label::Distance distance = Label::kFar);
+
   // Check if the map of an object is equal to a specified map and
   // branch to label if not. Skip the smi check if not required
   // (object is known to be a heap object)
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
@@ -760,6 +790,15 @@
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
+  void ClampUint8(Register reg);
+
+  void ClampDoubleToUint8(XMMRegister input_reg,
+                          XMMRegister temp_xmm_reg,
+                          Register result_reg,
+                          Register temp_reg);
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object);
 
@@ -806,6 +845,16 @@
                               Register scratch,
                               Label* miss);
 
+  void GetNumberHash(Register r0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss,
+                                Register elements,
+                                Register key,
+                                Register r0,
+                                Register r1,
+                                Register r2,
+                                Register result);
+
 
   // ---------------------------------------------------------------------------
   // Allocation support
@@ -873,7 +922,7 @@
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
-  void AllocateConsString(Register result,
+  void AllocateTwoByteConsString(Register result,
                           Register scratch1,
                           Register scratch2,
                           Label* gc_required);
@@ -882,6 +931,17 @@
                                Register scratch2,
                                Label* gc_required);
 
+  // Allocate a raw sliced string object. Only the map field of the result is
+  // initialized.
+  void AllocateTwoByteSlicedString(Register result,
+                            Register scratch1,
+                            Register scratch2,
+                            Label* gc_required);
+  void AllocateAsciiSlicedString(Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -932,7 +992,7 @@
   // Runtime calls
 
   // Call a code stub.
-  void CallStub(CodeStub* stub);
+  void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
   // Call a code stub and return the code object called.  Try to generate
   // the code if necessary.  Do not perform a GC but instead return a retry
@@ -1102,6 +1162,7 @@
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
   static int kSafepointPushRegisterIndices[Register::kNumRegisters];
   static const int kNumSafepointSavedRegisters = 11;
+  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
   bool generating_stub_;
   bool allow_stub_calls_;
@@ -1118,14 +1179,15 @@
   Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
-  template <typename LabelType>
   void InvokePrologue(const ParameterCount& expected,
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       Register code_register,
-                      LabelType* done,
+                      Label* done,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper);
+                      Label::Distance near_jump = Label::kFar,
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1190,21 +1252,6 @@
 };
 
 
-// Helper class for generating code or data associated with the code
-// right before or after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
-  CallWrapper() { }
-  virtual ~CallWrapper() { }
-  // Called just before emitting a call. Argument is the size of the generated
-  // call code.
-  virtual void BeforeCall(int call_size) = 0;
-  // Called just after emitting a call, i.e., at the return site for the call.
-  virtual void AfterCall() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
@@ -1266,751 +1313,6 @@
 #define ACCESS_MASM(masm) masm->
 #endif
 
-// -----------------------------------------------------------------------------
-// Template implementations.
-
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
-
-template <typename LabelType>
-void MacroAssembler::SmiNeg(Register dst,
-                            Register src,
-                            LabelType* on_smi_result) {
-  if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    movq(kScratchRegister, src);
-    neg(dst);  // Low 32 bits are retained as zero by negation.
-    // Test if result is zero or Smi::kMinValue.
-    cmpq(dst, kScratchRegister);
-    j(not_equal, on_smi_result);
-    movq(src, kScratchRegister);
-  } else {
-    movq(dst, src);
-    neg(dst);
-    cmpq(dst, src);
-    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
-    j(not_equal, on_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    ASSERT(!src2.AddressUsesRegister(dst));
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
-    cmpq(dst, src2);
-    j(overflow, on_not_smi_result);
-    subq(dst, src2);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src2);
-    cmpq(src1, kScratchRegister);
-    j(overflow, on_not_smi_result);
-    subq(src1, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMul(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-
-  if (dst.is(src1)) {
-    NearLabel failure, zero_correct_result;
-    movq(kScratchRegister, src1);  // Create backup for later testing.
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, &failure);
-
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    NearLabel correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-
-    movq(dst, kScratchRegister);
-    xor_(dst, src2);
-    j(positive, &zero_correct_result);  // Result was positive zero.
-
-    bind(&failure);  // Reused failure exit, restores src1.
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-
-    bind(&zero_correct_result);
-    Set(dst, 0);
-
-    bind(&correct_result);
-  } else {
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, on_not_smi_result);
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    NearLabel correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-    // One of src1 and src2 is zero, the check whether the other is
-    // negative.
-    movq(kScratchRegister, src1);
-    xor_(kScratchRegister, src2);
-    j(negative, on_not_smi_result);
-    bind(&correct_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiTryAddConstant(Register dst,
-                                       Register src,
-                                       Smi* constant,
-                                       LabelType* on_not_smi_result) {
-  // Does not assume that src is a smi.
-  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src.is(kScratchRegister));
-
-  JumpIfNotSmi(src, on_not_smi_result);
-  Register tmp = (dst.is(src) ? kScratchRegister : dst);
-  LoadSmiConstant(tmp, constant);
-  addq(tmp, src);
-  j(overflow, on_not_smi_result);
-  if (dst.is(src)) {
-    movq(dst, tmp);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAddConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    LabelType* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-
-    LoadSmiConstant(kScratchRegister, constant);
-    addq(kScratchRegister, src);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    LoadSmiConstant(dst, constant);
-    addq(dst, src);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSubConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    LabelType* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(kScratchRegister, constant);
-      subq(dst, kScratchRegister);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
-      addq(kScratchRegister, dst);
-      j(overflow, on_not_smi_result);
-      movq(dst, kScratchRegister);
-    }
-  } else {
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(dst, constant);
-      // Adding and subtracting the min-value gives the same result, it only
-      // differs on the overflow bit, which we don't check here.
-      addq(dst, src);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
-      addq(dst, src);
-      j(overflow, on_not_smi_result);
-    }
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiDiv(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-
-  // Check for 0 divisor (result is +/-Infinity).
-  NearLabel positive_divisor;
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  // We need to rule out dividing Smi::kMinValue by -1, since that would
-  // overflow in idiv and raise an exception.
-  // We combine this with negative zero test (negative zero only happens
-  // when dividing zero by a negative number).
-
-  // We overshoot a little and go to slow case if we divide min-value
-  // by any negative value, not just -1.
-  NearLabel safe_div;
-  testl(rax, Immediate(0x7fffffff));
-  j(not_zero, &safe_div);
-  testq(src2, src2);
-  if (src1.is(rax)) {
-    j(positive, &safe_div);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-  } else {
-    j(negative, on_not_smi_result);
-  }
-  bind(&safe_div);
-
-  SmiToInteger32(src2, src2);
-  // Sign extend src1 into edx:eax.
-  cdq();
-  idivl(src2);
-  Integer32ToSmi(src2, src2);
-  // Check that the remainder is zero.
-  testl(rdx, rdx);
-  if (src1.is(rax)) {
-    NearLabel smi_result;
-    j(zero, &smi_result);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
-  } else {
-    j(not_zero, on_not_smi_result);
-  }
-  if (!dst.is(src1) && src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  Integer32ToSmi(dst, rax);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMod(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-  ASSERT(!src1.is(src2));
-
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  SmiToInteger32(src2, src2);
-
-  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
-  NearLabel safe_div;
-  cmpl(rax, Immediate(Smi::kMinValue));
-  j(not_equal, &safe_div);
-  cmpl(src2, Immediate(-1));
-  j(not_equal, &safe_div);
-  // Retag inputs and go slow case.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  jmp(on_not_smi_result);
-  bind(&safe_div);
-
-  // Sign extend eax into edx:eax.
-  cdq();
-  idivl(src2);
-  // Restore smi tags on inputs.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, go slow to return a floating point negative zero.
-  NearLabel smi_result;
-  testl(rdx, rdx);
-  j(not_zero, &smi_result);
-  testq(src1, src1);
-  j(negative, on_not_smi_result);
-  bind(&smi_result);
-  Integer32ToSmi(dst, rdx);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRightConstant(
-    Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
-  // Logic right shift interprets its result as an *unsigned* number.
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movq(dst, src);
-    if (shift_value == 0) {
-      testq(dst, dst);
-      j(negative, on_not_smi_result);
-    }
-    shr(dst, Immediate(shift_value + kSmiShift));
-    shl(dst, Immediate(kSmiShift));
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
-                                          Register src1,
-                                          Register src2,
-                                          LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(rcx));
-  // dst and src1 can be the same, because the one case that bails out
-  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
-  NearLabel result_ok;
-  if (src1.is(rcx) || src2.is(rcx)) {
-    movq(kScratchRegister, rcx);
-  }
-  if (!dst.is(src1)) {
-    movq(dst, src1);
-  }
-  SmiToInteger32(rcx, src2);
-  orl(rcx, Immediate(kSmiShift));
-  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
-  shl(dst, Immediate(kSmiShift));
-  testq(dst, dst);
-  if (src1.is(rcx) || src2.is(rcx)) {
-    NearLabel positive_result;
-    j(positive, &positive_result);
-    if (src1.is(rcx)) {
-      movq(src1, kScratchRegister);
-    } else {
-      movq(src2, kScratchRegister);
-    }
-    jmp(on_not_smi_result);
-    bind(&positive_result);
-  } else {
-    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SelectNonSmi(Register dst,
-                                  Register src1,
-                                  Register src2,
-                                  LabelType* on_not_smis) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src1));
-  ASSERT(!dst.is(src2));
-  // Both operands must not be smis.
-#ifdef DEBUG
-  if (allow_stub_calls()) {  // Check contains a stub call.
-    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
-    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
-  }
-#endif
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(0, Smi::FromInt(0));
-  movl(kScratchRegister, Immediate(kSmiTagMask));
-  and_(kScratchRegister, src1);
-  testl(kScratchRegister, src2);
-  // If non-zero then both are smis.
-  j(not_zero, on_not_smis);
-
-  // Exactly one operand is a smi.
-  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
-  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
-  subq(kScratchRegister, Immediate(1));
-  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
-  movq(dst, src1);
-  xor_(dst, src2);
-  and_(dst, kScratchRegister);
-  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
-  xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  Condition smi = CheckSmi(src);
-  j(smi, on_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
-  Condition smi = CheckSmi(src);
-  j(NegateCondition(smi), on_not_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessNonNegativeSmi(
-    Register src, LabelType* on_not_smi_or_negative) {
-  Condition non_negative_smi = CheckNonNegativeSmi(src);
-  j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             Smi* constant,
-                                             LabelType* on_equals) {
-  SmiCompare(src, constant);
-  j(equal, on_equals);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
-                                            LabelType* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
-                                                LabelType* on_invalid) {
-  Condition is_valid = CheckUInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
-                                      Register src2,
-                                      LabelType* on_not_both_smi) {
-  Condition both_smi = CheckBothSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
-                                                  Register src2,
-                                                  LabelType* on_not_both_smi) {
-  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
-                                 LabelType* on_not_smis) {
-  if (dst.is(src1) || dst.is(src2)) {
-    ASSERT(!src1.is(kScratchRegister));
-    ASSERT(!src2.is(kScratchRegister));
-    movq(kScratchRegister, src1);
-    or_(kScratchRegister, src2);
-    JumpIfNotSmi(kScratchRegister, on_not_smis);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    or_(dst, src2);
-    JumpIfNotSmi(dst, on_not_smis);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotString(Register object,
-                                     Register object_map,
-                                     LabelType* not_string) {
-  Condition is_smi = CheckSmi(object);
-  j(is_smi, not_string);
-  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
-  j(above_equal, not_string);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                                         Register second_object,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         LabelType* on_fail) {
-  // Check that both objects are not smis.
-  Condition either_smi = CheckEitherSmi(first_object, second_object);
-  j(either_smi, on_fail);
-
-  // Load instance type for both strings.
-  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
-  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
-  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
-  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    LabelType *failure) {
-  if (!scratch.is(instance_type)) {
-    movl(scratch, instance_type);
-  }
-
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
-  andl(scratch, Immediate(kFlatAsciiStringMask));
-  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-  j(not_equal, failure);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first_object_instance_type,
-    Register second_object_instance_type,
-    Register scratch1,
-    Register scratch2,
-    LabelType* on_fail) {
-  // Load instance type for both strings.
-  movq(scratch1, first_object_instance_type);
-  movq(scratch2, second_object_instance_type);
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                LabelType* branch) {
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
-      and_(scratch, kScratchRegister);
-    } else {
-      movq(scratch, ExternalReference::new_space_mask(isolate()));
-      and_(scratch, object);
-    }
-    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
-    cmpq(scratch, kScratchRegister);
-    j(cc, branch);
-  } else {
-    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
-    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
-    if (scratch.is(object)) {
-      addq(scratch, kScratchRegister);
-    } else {
-      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
-                                    const ParameterCount& actual,
-                                    Handle<Code> code_constant,
-                                    Register code_register,
-                                    LabelType* done,
-                                    InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
-  bool definitely_matches = false;
-  NearLabel invoke;
-  if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
-    if (expected.immediate() == actual.immediate()) {
-      definitely_matches = true;
-    } else {
-      Set(rax, actual.immediate());
-      if (expected.immediate() ==
-              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
-        // Don't worry about adapting arguments for built-ins that
-        // don't want that done. Skip adaption code by making it look
-        // like we have a match between expected and actual number of
-        // arguments.
-        definitely_matches = true;
-      } else {
-        Set(rbx, expected.immediate());
-      }
-    }
-  } else {
-    if (actual.is_immediate()) {
-      // Expected is in register, actual is immediate. This is the
-      // case when we invoke function values without going through the
-      // IC mechanism.
-      cmpq(expected.reg(), Immediate(actual.immediate()));
-      j(equal, &invoke);
-      ASSERT(expected.reg().is(rbx));
-      Set(rax, actual.immediate());
-    } else if (!expected.reg().is(actual.reg())) {
-      // Both expected and actual are in (different) registers. This
-      // is the case when we invoke functions using call and apply.
-      cmpq(expected.reg(), actual.reg());
-      j(equal, &invoke);
-      ASSERT(actual.reg().is(rax));
-      ASSERT(expected.reg().is(rbx));
-    }
-  }
-
-  if (!definitely_matches) {
-    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
-    if (!code_constant.is_null()) {
-      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
-      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    } else if (!code_register.is(rdx)) {
-      movq(rdx, code_register);
-    }
-
-    if (flag == CALL_FUNCTION) {
-      if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(adaptor));
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      if (call_wrapper != NULL) call_wrapper->AfterCall();
-      jmp(done);
-    } else {
-      Jump(adaptor, RelocInfo::CODE_TARGET);
-    }
-    bind(&invoke);
-  }
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index c16da94..a782bd7 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -661,7 +661,6 @@
     }
     __ movq(rbx, ExternalReference::re_word_character_map());
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
-    ExternalReference word_map = ExternalReference::re_word_character_map();
     __ testb(Operand(rbx, current_character(), times_1, 0),
              current_character());
     BranchOrBacktrack(zero, on_no_match);
@@ -676,7 +675,6 @@
     }
     __ movq(rbx, ExternalReference::re_word_character_map());
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
-    ExternalReference word_map = ExternalReference::re_word_character_map();
     __ testb(Operand(rbx, current_character(), times_1, 0),
              current_character());
     BranchOrBacktrack(not_zero, on_no_match);
@@ -1065,9 +1063,9 @@
 
 
 void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
-  NearLabel after_position;
+  Label after_position;
   __ cmpq(rdi, Immediate(-by * char_size()));
-  __ j(greater_equal, &after_position);
+  __ j(greater_equal, &after_position, Label::kNear);
   __ movq(rdi, Immediate(-by * char_size()));
   // On RegExp code entry (where this operation is used), the character before
   // the current position is expected to be already loaded.
@@ -1172,12 +1170,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1186,7 +1185,7 @@
   MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
-    intptr_t delta = *code_handle - re_code;
+    intptr_t delta = code_handle->address() - re_code->address();
     // Overwrite the return address on the stack.
     *return_address += delta;
   }
@@ -1195,8 +1194,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1207,8 +1218,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1216,7 +1227,8 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 02b510f..7102225 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -28,6 +28,12 @@
 #ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
 #define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
 
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#include "macro-assembler.h"
+#include "code.h"
+#include "x64/macro-assembler-x64.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index cfaa5b8..df8423a 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -55,7 +55,8 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
     return c_limit;
   }
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index c19d29d..76d2555 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -82,18 +82,18 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register r0,
-                                             Register r1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register r0,
+    Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
   __ IncrementCounter(counters->negative_lookups_miss(), 1);
 
-  Label done;
   __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
 
   const int kInterceptorOrAccessCheckNeededMask =
@@ -105,7 +105,7 @@
   __ j(not_zero, miss_label);
 
   // Check that receiver is a JSObject.
-  __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
   __ j(below, miss_label);
 
   // Load properties array.
@@ -117,64 +117,20 @@
                  Heap::kHashTableMapRootIndex);
   __ j(not_equal, miss_label);
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // r0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r1;
-    // Capacity is smi 2^n.
-    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
-    __ decl(index);
-    __ and_(index,
-            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-
-    Register entity_name = r1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ movq(entity_name, Operand(properties, index, times_pointer_size,
-                                 kElementsStartOffset - kHeapObjectTag));
-    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    // __ jmp(miss_label);
-    if (i != kProbes - 1) {
-      __ j(equal, &done);
-
-      // Stop if found the property.
-      __ Cmp(entity_name, Handle<String>(name));
-      __ j(equal, miss_label);
-
-      // Check if the entry name is not a symbol.
-      __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-      __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-               Immediate(kIsSymbolMask));
-      __ j(zero, miss_label);
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ j(not_equal, miss_label);
-    }
-  }
+  Label done;
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      properties,
+      name,
+      r1);
+  if (result->IsFailure()) return result;
 
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -302,7 +258,7 @@
   // Check that the object is a string.
   __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  ASSERT(kNotStringTag != 0);
+  STATIC_ASSERT(kNotStringTag != 0);
   __ testl(scratch, Immediate(kNotStringTag));
   __ j(not_zero, non_string_object);
 }
@@ -522,10 +478,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -650,8 +608,11 @@
                                                 arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -730,6 +691,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -747,6 +709,14 @@
 }
 
 
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -907,12 +877,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
@@ -1114,9 +1089,8 @@
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(object, receiver, holder,
+                  scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
   __ Move(rax, Handle<Object>(value));
@@ -1325,8 +1299,10 @@
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj = isolate()->stub_cache()->ComputeCallMiss(
-      arguments().immediate(), kind_);
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1377,7 +1353,11 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1657,7 +1637,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1739,7 +1721,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1856,7 +1840,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // rcx: function name.
@@ -1944,7 +1932,7 @@
 
   // Check if the argument is a heap number and load its value.
   __ bind(&not_smi);
-  __ CheckMap(rax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -1969,7 +1957,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // rcx: function name.
@@ -1993,6 +1985,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2162,7 +2155,11 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2199,7 +2196,7 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx);
+  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2229,7 +2226,11 @@
 
   // Invoke the function.
   __ movq(rdi, rax);
-  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle load cache miss.
   __ bind(&miss);
@@ -2290,16 +2291,21 @@
   __ IncrementCounter(counters->call_global_inline(), 1);
   ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
+    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
     Handle<Code> code(function->code());
     __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   }
   // Handle call cache miss.
   __ bind(&miss);
@@ -2523,8 +2529,35 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -2532,51 +2565,26 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  // Check that the map matches.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(receiver->map()));
-  __ j(not_equal, &miss);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &miss);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
-         factory()->fixed_array_map());
-  __ j(not_equal, &miss);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-    __ j(above_equal, &miss);
-  } else {
-    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-    __ j(above_equal, &miss);
+  Register map_reg = rbx;
+  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    // Check map and tail call if there's a match
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ movq(rdx, rax);
-  __ SmiToInteger32(rcx, rcx);
-  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ RecordWrite(rdi, 0, rdx, rcx);
-
-  // Done.
-  __ ret(0);
-
-  // Handle store cache miss.
   __ bind(&miss);
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -2590,7 +2598,7 @@
   // -----------------------------------
   Label miss;
 
-  // Chech that receiver is not a smi.
+  // Check that receiver is not a smi.
   __ JumpIfSmi(rax, &miss);
 
   // Check the maps of the full prototype chain. Also check that
@@ -2981,49 +2989,57 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Code* stub;
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  // Check that the map matches.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(receiver->map()));
-  __ j(not_equal, &miss);
+  Register map_reg = rbx;
+  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    // Check map and tail call if there's a match
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
+  }
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &miss);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss);
-
-  // Load the result and make sure it's not the hole.
-  SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx, FieldOperand(rcx,
-                            index.reg,
-                            index.scale,
-                            FixedArray::kHeaderSize));
-  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &miss);
-  __ movq(rax, rbx);
-  __ ret(0);
-
-  __ bind(&miss);
+  __  bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3054,7 +3070,7 @@
   // Load the initial map and verify that it is in fact a map.
   __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
   // Will both indicate a NULL and a Smi.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(rbx, &generic_stub_call);
   __ CmpObjectType(rbx, MAP_TYPE, rcx);
   __ j(not_equal, &generic_stub_call);
@@ -3160,30 +3176,77 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+    MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label slow;
+  Label slow, miss_force_generic;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  __ JumpIfNotSmi(rax, &miss_force_generic);
+  __ SmiToInteger32(rbx, rax);
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+
+  // Check whether the elements is a number dictionary.
+  // rdx: receiver
+  // rax: key
+  // rbx: key as untagged int32
+  // rcx: elements
+  __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
+  __ ret(0);
+
+  __ bind(&slow);
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label slow, miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &slow);
-
-  // Check that the map matches.
-  __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ JumpIfNotSmi(rax, &miss_force_generic);
 
   // Check that the index is in range.
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   __ SmiToInteger32(rcx, rax);
-  __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
+  __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
 
   // rax: index (as a smi)
   // rdx: receiver (JSObject)
@@ -3191,29 +3254,32 @@
   // rbx: elements array
   __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
   // rbx: base pointer of external storage
-  switch (array_type) {
-    case kExternalByteArray:
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
       __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
       break;
-    case kExternalPixelArray:
-    case kExternalUnsignedByteArray:
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
       __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
       break;
-    case kExternalShortArray:
+    case EXTERNAL_SHORT_ELEMENTS:
       __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
       break;
-    case kExternalUnsignedShortArray:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
       break;
-    case kExternalIntArray:
+    case EXTERNAL_INT_ELEMENTS:
       __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
       break;
-    case kExternalUnsignedIntArray:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       __ movl(rcx, Operand(rbx, rcx, times_4, 0));
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
       break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
+      break;
     default:
       UNREACHABLE();
       break;
@@ -3227,13 +3293,13 @@
   // xmm0: value as double.
 
   ASSERT(kSmiValueSize == 32);
-  if (array_type == kExternalUnsignedIntArray) {
+  if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
     // For the UnsignedInt array type, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
-    NearLabel box_int;
+    Label box_int;
 
-    __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+    __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
 
     __ Integer32ToSmi(rax, rcx);
     __ ret(0);
@@ -3251,7 +3317,8 @@
     __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
     __ movq(rax, rcx);
     __ ret(0);
-  } else if (array_type == kExternalFloatArray) {
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
     __ AllocateHeapNumber(rcx, rbx, &slow);
@@ -3266,7 +3333,7 @@
 
   // Slow case: Jump to runtime.
   __ bind(&slow);
-  Counters* counters = isolate()->counters();
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
 
   // ----------- S t a t e -------------
@@ -3275,44 +3342,46 @@
   //  -- rsp[0]  : return address
   // -----------------------------------
 
-  __ pop(rbx);
-  __ push(rdx);  // receiver
-  __ push(rax);  // name
-  __ push(rbx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
 
-  // Return the generated code.
-  return GetCode(flags);
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow;
+  Label slow, miss_force_generic;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
-
-  // Check that the map matches.
-  __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &slow);
+  __ JumpIfNotSmi(rcx, &miss_force_generic);
 
   // Check that the index is in range.
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   __ SmiToInteger32(rdi, rcx);  // Untag the index.
-  __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
+  __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
@@ -3321,54 +3390,62 @@
   // rdx: receiver (a JSObject)
   // rbx: elements array
   // rdi: untagged key
-  NearLabel check_heap_number;
-  if (array_type == kExternalPixelArray) {
+  Label check_heap_number;
+  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Float to pixel conversion is only implemented in the runtime for now.
     __ JumpIfNotSmi(rax, &slow);
   } else {
-    __ JumpIfNotSmi(rax, &check_heap_number);
+    __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
   }
   // No more branches to slow case on this path.  Key and receiver not needed.
   __ SmiToInteger32(rdx, rax);
   __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
   // rbx: base pointer of external storage
-  switch (array_type) {
-    case kExternalPixelArray:
+  switch (elements_kind) {
+    case EXTERNAL_PIXEL_ELEMENTS:
       {  // Clamp the value to [0..255].
-        NearLabel done;
+        Label done;
         __ testl(rdx, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
+        __ j(zero, &done, Label::kNear);
         __ setcc(negative, rdx);  // 1 if negative, 0 if positive.
         __ decb(rdx);  // 0 if negative, 255 if positive.
         __ bind(&done);
       }
       __ movb(Operand(rbx, rdi, times_1, 0), rdx);
       break;
-    case kExternalByteArray:
-    case kExternalUnsignedByteArray:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
       __ movb(Operand(rbx, rdi, times_1, 0), rdx);
       break;
-    case kExternalShortArray:
-    case kExternalUnsignedShortArray:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       __ movw(Operand(rbx, rdi, times_2, 0), rdx);
       break;
-    case kExternalIntArray:
-    case kExternalUnsignedIntArray:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       __ movl(Operand(rbx, rdi, times_4, 0), rdx);
       break;
-    case kExternalFloatArray:
+    case EXTERNAL_FLOAT_ELEMENTS:
       // Need to perform int-to-float conversion.
       __ cvtlsi2ss(xmm0, rdx);
       __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
       break;
-    default:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      // Need to perform int-to-float conversion.
+      __ cvtlsi2sd(xmm0, rdx);
+      __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+      break;
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
       UNREACHABLE();
       break;
   }
   __ ret(0);
 
   // TODO(danno): handle heap number -> pixel array conversion
-  if (array_type != kExternalPixelArray) {
+  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     __ bind(&check_heap_number);
     // rax: value
     // rcx: key (a smi)
@@ -3387,10 +3464,13 @@
     // rdi: untagged index
     // rbx: base pointer of external storage
     // top of FPU stack: value
-    if (array_type == kExternalFloatArray) {
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
       __ cvtsd2ss(xmm0, xmm0);
       __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
       __ ret(0);
+    } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+      __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+      __ ret(0);
     } else {
       // Perform float-to-int conversion with truncation (round-to-zero)
       // behavior.
@@ -3401,26 +3481,31 @@
       // rdx: value (converted to an untagged integer)
       // rdi: untagged index
       // rbx: base pointer of external storage
-      switch (array_type) {
-        case kExternalByteArray:
-        case kExternalUnsignedByteArray:
+      switch (elements_kind) {
+        case EXTERNAL_BYTE_ELEMENTS:
+        case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
           __ cvttsd2si(rdx, xmm0);
           __ movb(Operand(rbx, rdi, times_1, 0), rdx);
           break;
-        case kExternalShortArray:
-        case kExternalUnsignedShortArray:
+        case EXTERNAL_SHORT_ELEMENTS:
+        case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
           __ cvttsd2si(rdx, xmm0);
           __ movw(Operand(rbx, rdi, times_2, 0), rdx);
           break;
-        case kExternalIntArray:
-        case kExternalUnsignedIntArray: {
+        case EXTERNAL_INT_ELEMENTS:
+        case EXTERNAL_UNSIGNED_INT_ELEMENTS:
           // Convert to int64, so that NaN and infinities become
           // 0x8000000000000000, which is zero mod 2^32.
           __ cvttsd2siq(rdx, xmm0);
           __ movl(Operand(rbx, rdi, times_4, 0), rdx);
           break;
-        }
-        default:
+        case EXTERNAL_PIXEL_ELEMENTS:
+        case EXTERNAL_FLOAT_ELEMENTS:
+        case EXTERNAL_DOUBLE_ELEMENTS:
+        case FAST_ELEMENTS:
+        case FAST_DOUBLE_ELEMENTS:
+        case DICTIONARY_ELEMENTS:
+        case NON_STRICT_ARGUMENTS_ELEMENTS:
           UNREACHABLE();
           break;
       }
@@ -3438,21 +3523,252 @@
   //  -- rsp[0]  : return address
   // -----------------------------------
 
-  __ pop(rbx);
-  __ push(rdx);  // receiver
-  __ push(rcx);  // key
-  __ push(rax);  // value
-  __ Push(Smi::FromInt(NONE));   // PropertyAttributes
-  __ Push(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
-  __ push(rbx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+  // Miss case: call runtime.
+  __ bind(&miss_force_generic);
 
-  return GetCode(flags);
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 }
 
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rcx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss_force_generic);
+  __ movq(rax, rbx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Check for the hole
+  __ SmiToInteger32(kScratchRegister, rax);
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
+          Immediate(kHoleNanUpper32));
+  __ j(equal, &miss_force_generic);
+
+  // Always allocate a heap number for the result.
+  __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
+                              FixedDoubleArray::kHeaderSize));
+  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+  // Set the value.
+  __ movq(rax, rcx);
+  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+  __ ret(0);
+
+  __ bind(&slow_allocate_heapnumber);
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &miss_force_generic);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+    __ j(above_equal, &miss_force_generic);
+  } else {
+    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+    __ j(above_equal, &miss_force_generic);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ movq(rdx, rax);
+  __ SmiToInteger32(rcx, rcx);
+  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ RecordWrite(rdi, 0, rdx, rcx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+    MacroAssembler* masm,
+    bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, smi_value, is_nan, maybe_nan;
+  Label have_double_value, not_nan;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rdi);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+  } else {
+    __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+  }
+  __ j(above_equal, &miss_force_generic);
+
+  // Handle smi values specially
+  __ JumpIfSmi(rax, &smi_value, Label::kNear);
+
+  __ CheckMap(rax,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  __ cmpl(FieldOperand(rax, offset),
+          Immediate(kNaNOrInfinityLowerBoundUpper32));
+  __ j(greater_equal, &maybe_nan, Label::kNear);
+
+  __ bind(&not_nan);
+  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ bind(&have_double_value);
+  __ SmiToInteger32(rcx, rcx);
+  __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
+           xmm0);
+  __ ret(0);
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ j(greater, &is_nan, Label::kNear);
+  __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
+  __ j(zero, &not_nan);
+  __ bind(&is_nan);
+  // Convert all NaNs to the same canonical NaN value when they are stored in
+  // the double array.
+  __ Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  __ movq(xmm0, kScratchRegister);
+  __ jmp(&have_double_value, Label::kNear);
+
+  __ bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  __ SmiToInteger32(rdx, rax);
+  __ push(rdx);
+  __ fild_s(Operand(rsp, 0));
+  __ pop(rdx);
+  __ SmiToInteger32(rcx, rcx);
+  __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+  __ ret(0);
+
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 17e83dc..4870105 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -55,7 +55,12 @@
 
   // Check if the requested size is available without expanding.
   Address result = position_;
-  if ((position_ += size) > limit_) result = NewExpand(size);
+
+  if (size > limit_ - position_) {
+     result = NewExpand(size);
+  } else {
+     position_ += size;
+  }
 
   // Check that the result has the proper alignment and return it.
   ASSERT(IsAddressAligned(result, kAlignment, 0));
@@ -107,9 +112,20 @@
 }
 
 
-ZoneScope::ZoneScope(ZoneScopeMode mode)
-    : isolate_(Isolate::Current()),
-      mode_(mode) {
+template <typename T>
+void* ZoneList<T>::operator new(size_t size) {
+  return ZONE->New(static_cast<int>(size));
+}
+
+
+template <typename T>
+void* ZoneList<T>::operator new(size_t size, Zone* zone) {
+  return zone->New(static_cast<int>(size));
+}
+
+
+ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
+    : isolate_(isolate), mode_(mode) {
   isolate_->zone()->scope_nesting_++;
 }
 
diff --git a/src/zone.cc b/src/zone.cc
index 42ce8c5..2d14d13 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -34,24 +34,6 @@
 namespace internal {
 
 
-Zone::Zone()
-    : zone_excess_limit_(256 * MB),
-      segment_bytes_allocated_(0),
-      position_(0),
-      limit_(0),
-      scope_nesting_(0),
-      segment_head_(NULL) {
-}
-unsigned Zone::allocation_size_ = 0;
-
-
-ZoneScope::~ZoneScope() {
-  ASSERT_EQ(Isolate::Current(), isolate_);
-  if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
-  isolate_->zone()->scope_nesting_--;
-}
-
-
 // Segments represent chunks of memory: They have starting address
 // (encoded in the this pointer) and a size in bytes. Segments are
 // chained together forming a LIFO structure with the newest segment
@@ -60,6 +42,11 @@
 
 class Segment {
  public:
+  void Initialize(Segment* next, int size) {
+    next_ = next;
+    size_ = size;
+  }
+
   Segment* next() const { return next_; }
   void clear_next() { next_ = NULL; }
 
@@ -77,19 +64,33 @@
 
   Segment* next_;
   int size_;
-
-  friend class Zone;
 };
 
 
+Zone::Zone()
+    : zone_excess_limit_(256 * MB),
+      segment_bytes_allocated_(0),
+      position_(0),
+      limit_(0),
+      scope_nesting_(0),
+      segment_head_(NULL) {
+}
+unsigned Zone::allocation_size_ = 0;
+
+ZoneScope::~ZoneScope() {
+  ASSERT_EQ(Isolate::Current(), isolate_);
+  if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
+  isolate_->zone()->scope_nesting_--;
+}
+
+
 // Creates a new segment, sets it size, and pushes it to the front
 // of the segment chain. Returns the new segment.
 Segment* Zone::NewSegment(int size) {
   Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
   adjust_segment_bytes_allocated(size);
   if (result != NULL) {
-    result->next_ = segment_head_;
-    result->size_ = size;
+    result->Initialize(segment_head_, size);
     segment_head_ = result;
   }
   return result;
@@ -155,11 +156,19 @@
 }
 
 
+void Zone::DeleteKeptSegment() {
+  if (segment_head_ != NULL) {
+    DeleteSegment(segment_head_, segment_head_->size());
+    segment_head_ = NULL;
+  }
+}
+
+
 Address Zone::NewExpand(int size) {
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
   ASSERT(size == RoundDown(size, kAlignment));
-  ASSERT(position_ + size > limit_);
+  ASSERT(size > limit_ - position_);
 
   // Compute the new segment size. We use a 'high water mark'
   // strategy, where we increase the segment size every time we expand
@@ -168,7 +177,13 @@
   Segment* head = segment_head_;
   int old_size = (head == NULL) ? 0 : head->size();
   static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
-  int new_size = kSegmentOverhead + size + (old_size << 1);
+  int new_size_no_overhead = size + (old_size << 1);
+  int new_size = kSegmentOverhead + new_size_no_overhead;
+  // Guard against integer overflow.
+  if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
+    V8::FatalProcessOutOfMemory("Zone");
+    return NULL;
+  }
   if (new_size < kMinimumSegmentSize) {
     new_size = kMinimumSegmentSize;
   } else if (new_size > kMaximumSegmentSize) {
@@ -187,6 +202,11 @@
   // Recompute 'top' and 'limit' based on the new segment.
   Address result = RoundUp(segment->start(), kAlignment);
   position_ = result + size;
+  // Check for address overflow.
+  if (position_ < result) {
+    V8::FatalProcessOutOfMemory("Zone");
+    return NULL;
+  }
   limit_ = segment->end();
   ASSERT(position_ <= limit_);
   return result;
diff --git a/src/zone.h b/src/zone.h
index 9efe4f5..f60ac0d 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -28,6 +28,8 @@
 #ifndef V8_ZONE_H_
 #define V8_ZONE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -63,15 +65,21 @@
   template <typename T>
   inline T* NewArray(int length);
 
-  // Delete all objects and free all memory allocated in the Zone.
+  // Deletes all objects and free all memory allocated in the Zone. Keeps one
+  // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
   void DeleteAll();
 
+  // Deletes the last small segment kept around by DeleteAll().
+  void DeleteKeptSegment();
+
   // Returns true if more memory has been allocated in zones than
   // the limit allows.
   inline bool excess_allocation();
 
   inline void adjust_segment_bytes_allocated(int delta);
 
+  inline Isolate* isolate() { return isolate_; }
+
   static unsigned allocation_size_;
 
  private:
@@ -132,8 +140,8 @@
 class ZoneObject {
  public:
   // Allocate a new ZoneObject of 'size' bytes in the Zone.
-  inline void* operator new(size_t size);
-  inline void* operator new(size_t size, Zone* zone);
+  INLINE(void* operator new(size_t size));
+  INLINE(void* operator new(size_t size, Zone* zone));
 
   // Ideally, the delete operator should be private instead of
   // public, but unfortunately the compiler sometimes synthesizes
@@ -144,6 +152,7 @@
   // ZoneObjects should never be deleted individually; use
   // Zone::DeleteAll() to delete all zone objects in one go.
   void operator delete(void*, size_t) { UNREACHABLE(); }
+  void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
 
@@ -162,7 +171,7 @@
 class ZoneListAllocationPolicy {
  public:
   // Allocate 'size' bytes of memory in the zone.
-  static inline void* New(int size);
+  static void* New(int size);
 
   // De-allocation attempts are silently ignored.
   static void Delete(void* p) { }
@@ -176,6 +185,9 @@
 template<typename T>
 class ZoneList: public List<T, ZoneListAllocationPolicy> {
  public:
+  INLINE(void* operator new(size_t size));
+  INLINE(void* operator new(size_t size, Zone* zone));
+
   // Construct a new ZoneList with the given capacity; the length is
   // always zero. The capacity must be non-negative.
   explicit ZoneList(int capacity)
@@ -186,20 +198,18 @@
       : List<T, ZoneListAllocationPolicy>(other.length()) {
     AddAll(other);
   }
+
+  void operator delete(void* pointer) { UNREACHABLE(); }
+  void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
 
-// Introduce a convenience type for zone lists of map handles.
-typedef ZoneList<Handle<Map> > ZoneMapList;
-
-
 // ZoneScopes keep track of the current parsing and compilation
 // nesting and cleans up generated ASTs in the Zone when exiting the
 // outer-most scope.
 class ZoneScope BASE_EMBEDDED {
  public:
-  // TODO(isolates): pass isolate pointer here.
-  inline explicit ZoneScope(ZoneScopeMode mode);
+  INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
 
   virtual ~ZoneScope();
 
diff --git a/test/benchmarks/testcfg.py b/test/benchmarks/testcfg.py
index 51d8520..ab9d40f 100644
--- a/test/benchmarks/testcfg.py
+++ b/test/benchmarks/testcfg.py
@@ -91,7 +91,7 @@
     return [test]
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     pass
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index f4cb4a9..5c92671 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -29,7 +29,21 @@
 from os.path import join, dirname, abspath
 root_dir = dirname(File('SConstruct').rfile().abspath)
 sys.path.append(join(root_dir, 'tools'))
-Import('context object_files')
+import js2c
+Import('context object_files tools')
+
+
+# Needed for test-log. Paths are relative to the cctest dir.
+JS_FILES_FOR_TESTS = [
+  '../../../tools/splaytree.js',
+  '../../../tools/codemap.js',
+  '../../../tools/csvparser.js',
+  '../../../tools/consarray.js',
+  '../../../tools/profile.js',
+  '../../../tools/profile_view.js',
+  '../../../tools/logreader.js',
+  'log-eq-of-logging-and-traversal.js',
+]
 
 
 SOURCES = {
@@ -51,6 +65,7 @@
     'test-debug.cc',
     'test-decls.cc',
     'test-deoptimization.cc',
+    'test-dictionary.cc',
     'test-diy-fp.cc',
     'test-double.cc',
     'test-dtoa.cc',
@@ -58,13 +73,14 @@
     'test-fixed-dtoa.cc',
     'test-flags.cc',
     'test-func-name-inference.cc',
+    'test-hashing.cc',
     'test-hashmap.cc',
     'test-heap-profiler.cc',
     'test-heap.cc',
     'test-list.cc',
     'test-liveedit.cc',
     'test-lock.cc',
-    'test-log-utils.cc',
+    'test-lockers.cc',
     'test-log.cc',
     'test-mark-compact.cc',
     'test-parsing.cc',
@@ -79,10 +95,10 @@
     'test-strtod.cc',
     'test-thread-termination.cc',
     'test-threads.cc',
-    'test-type-info.cc',
     'test-unbound-queue.cc',
     'test-utils.cc',
-    'test-version.cc'
+    'test-version.cc',
+    'test-weakmaps.cc'
   ],
   'arch:arm':  [
     'test-assembler-arm.cc',
@@ -96,7 +112,8 @@
   'arch:x64': ['test-assembler-x64.cc',
                'test-macro-assembler-x64.cc',
                'test-log-stack-tracer.cc'],
-  'arch:mips': ['test-assembler-mips.cc'],
+  'arch:mips': ['test-assembler-mips.cc',
+                'test-disasm-mips.cc'],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
@@ -106,12 +123,22 @@
 
 def Build():
   cctest_files = context.GetRelevantSources(SOURCES)
-  env = Environment()
+  env = Environment(tools=tools)
   env.Replace(**context.flags['cctest'])
   context.ApplyEnvOverrides(env)
+  env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
+
+  # Combine the JavaScript library files into a single C++ file and
+  # compile it.
+  js_files = [s for s in JS_FILES_FOR_TESTS]
+  js_files_src = env.JS2C(
+    ['js-files-for-cctest.cc'], js_files, **{'TYPE': 'TEST', 'COMPRESSION': 'off'})
+  js_files_obj = context.ConfigureObject(env, js_files_src, CPPPATH=['.'])
+
   # There seems to be a glitch in the way scons decides where to put
   # PDB files when compiling using MSVC so we specify it manually.
   # This should not affect any other platforms.
+  object_files.append(js_files_obj)
   return env.Program('cctest', ['cctest.cc', cctest_files, object_files],
       PDB='cctest.exe.pdb')
 
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index aa2b355..5d0cab3 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -26,42 +26,22 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
-  'target_defaults': {
-    'conditions': [
-      ['OS!="mac"', {
-        # TODO(sgjesse): This is currently copied from v8.gyp, should probably
-        # be refactored.
-        'conditions': [
-          ['v8_target_arch=="arm"', {
-            'defines': [
-              'V8_TARGET_ARCH_ARM',
-            ],
-          }],
-          ['v8_target_arch=="ia32"', {
-            'defines': [
-              'V8_TARGET_ARCH_IA32',
-            ],
-          }],
-          ['v8_target_arch=="x64"', {
-            'defines': [
-              'V8_TARGET_ARCH_X64',
-            ],
-          }],
-        ],
-      }],
-    ],
+  'includes': ['../../build/common.gypi'],
+  'variables': {
+    'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
   },
   'targets': [
     {
       'target_name': 'cctest',
       'type': 'executable',
       'dependencies': [
-        '../../tools/gyp/v8.gyp:v8',
+        'resources',
       ],
       'include_dirs': [
         '../../src',
       ],
       'sources': [
+        '<(generated_file)',
         'cctest.cc',
         'gay-fixed.cc',
         'gay-precision.cc',
@@ -80,6 +60,7 @@
         'test-debug.cc',
         'test-decls.cc',
         'test-deoptimization.cc',
+        'test-dictionary.cc',
         'test-diy-fp.cc',
         'test-double.cc',
         'test-dtoa.cc',
@@ -93,10 +74,11 @@
         'test-list.cc',
         'test-liveedit.cc',
         'test-lock.cc',
+        'test-lockers.cc',
         'test-log.cc',
-        'test-log-utils.cc',
         'test-mark-compact.cc',
         'test-parsing.cc',
+        'test-platform-tls.cc',
         'test-profile-generator.cc',
         'test-regexp.cc',
         'test-reloc-info.cc',
@@ -107,7 +89,6 @@
         'test-strtod.cc',
         'test-thread-termination.cc',
         'test-threads.cc',
-        'test-type-info.cc',
         'test-unbound-queue.cc',
         'test-utils.cc',
         'test-version.cc'
@@ -136,7 +117,7 @@
         ['v8_target_arch=="mips"', {
           'sources': [
             'test-assembler-mips.cc',
-            'test-mips.cc',
+            'test-disasm-mips.cc',
           ],
         }],
         [ 'OS=="linux"', {
@@ -154,6 +135,56 @@
             'test-platform-win32.cc',
           ],
         }],
+        ['component=="shared_library"', {
+          # cctest can't be built against a shared library, so we need to
+          # depend on the underlying static target in that case.
+          'conditions': [
+            ['v8_use_snapshot=="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+            },
+            {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_nosnapshot'],
+            }],
+          ],
+        }, {
+          'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+        }],
+      ],
+    },
+    {
+      'target_name': 'resources',
+      'type': 'none',
+      'variables': {
+        'file_list': [
+           '../../tools/splaytree.js',
+           '../../tools/codemap.js',
+           '../../tools/csvparser.js',
+           '../../tools/consarray.js',
+           '../../tools/profile.js',
+           '../../tools/profile_view.js',
+           '../../tools/logreader.js',
+           'log-eq-of-logging-and-traversal.js',
+        ],
+      },
+      'actions': [
+        {
+          'action_name': 'js2c',
+          'inputs': [
+            '../../tools/js2c.py',
+            '<@(file_list)',
+          ],
+          'outputs': [
+            '<(generated_file)',
+          ],
+          'action': [
+            'python',
+            '../../tools/js2c.py',
+            '<@(_outputs)',
+            'TEST',  # type
+            'off',  # compression
+            '<@(file_list)',
+          ],
+        }
       ],
     },
   ],
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index 277593c..c04d893 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -87,8 +87,8 @@
 class ApiTestFuzzer: public v8::internal::Thread {
  public:
   void CallTest();
-  explicit ApiTestFuzzer(v8::internal::Isolate* isolate, int num)
-      : Thread(isolate, "ApiTestFuzzer"),
+  explicit ApiTestFuzzer(int num)
+      : Thread("ApiTestFuzzer"),
         test_number_(num),
         gate_(v8::internal::OS::CreateSemaphore(0)),
         active_(true) {
@@ -98,7 +98,11 @@
   // The ApiTestFuzzer is also a Thread, so it has a Run method.
   virtual void Run();
 
-  enum PartOfTest { FIRST_PART, SECOND_PART };
+  enum PartOfTest { FIRST_PART,
+                    SECOND_PART,
+                    THIRD_PART,
+                    FOURTH_PART,
+                    LAST_PART = FOURTH_PART };
 
   static void Setup(PartOfTest part);
   static void RunAllTests();
@@ -106,6 +110,7 @@
   // This method switches threads if we are running the Threading test.
   // Otherwise it does nothing.
   static void Fuzz();
+
  private:
   static bool fuzzing_;
   static int tests_being_run_;
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 3bb9998..5122da5 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -30,9 +30,6 @@
 test-api/Bug*: FAIL
 
 ##############################################################################
-# BUG(281): This test fails on some Linuxes.
-test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
-
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
@@ -41,6 +38,9 @@
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+# We do not yet shrink weak maps after they have been emptied by the GC
+test-weakmaps/Shrinking: FAIL
+
 ##############################################################################
 [ $arch == arm ]
 
@@ -54,14 +54,15 @@
 test-log/ProfLazyMode: SKIP
 
 # BUG(945): Socket connect fails on ARM
+test-debug/DebuggerAgent: SKIP
 test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
 test-sockets/Socket: SKIP
 
 # BUG(1075): Unresolved crashes.
-cctest/test-serialize/Deserialize: PASS || FAIL
-cctest/test-serialize/DeserializeFromSecondSerializationAndRunScript2: PASS || FAIL
-cctest/test-serialize/DeserializeAndRunScript2: PASS || FAIL
-cctest/test-serialize/DeserializeFromSecondSerialization: PASS || FAIL
+test-serialize/Deserialize: SKIP
+test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
+test-serialize/DeserializeAndRunScript2: SKIP
+test-serialize/DeserializeFromSecondSerialization: SKIP
 
 ##############################################################################
 [ $arch == arm && $crankshaft ]
@@ -73,28 +74,9 @@
 
 ##############################################################################
 [ $arch == mips ]
-test-accessors: SKIP
-test-alloc: SKIP
-test-api: SKIP
-test-compiler: SKIP
-test-cpu-profiler: SKIP
-test-debug: SKIP
-test-decls: SKIP
 test-deoptimization: SKIP
-test-func-name-inference: SKIP
-test-heap: SKIP
-test-heap-profiler: SKIP
-test-log: SKIP
-test-log-utils: SKIP
-test-mark-compact: SKIP
-test-parsing: SKIP
-test-profile-generator: SKIP
-test-regexp: SKIP
 test-serialize: SKIP
-test-sockets: SKIP
-test-strings: SKIP
-test-threads: SKIP
-test-thread-termination: SKIP
 
-##############################################################################
-# Tests that time out with Isolates
+# Tests that may time out.
+test-api/ExternalArrays: PASS || TIMEOUT
+test-api/Threading: PASS || TIMEOUT
diff --git a/test/cctest/log-eq-of-logging-and-traversal.js b/test/cctest/log-eq-of-logging-and-traversal.js
new file mode 100644
index 0000000..05643bf
--- /dev/null
+++ b/test/cctest/log-eq-of-logging-and-traversal.js
@@ -0,0 +1,185 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is a supplementary file for test-log/EquivalenceOfLoggingAndTraversal.
+
+function parseState(s) {
+  switch (s) {
+  case "": return Profile.CodeState.COMPILED;
+  case "~": return Profile.CodeState.OPTIMIZABLE;
+  case "*": return Profile.CodeState.OPTIMIZED;
+  }
+  throw new Error("unknown code state: " + s);
+}
+
+function LogProcessor() {
+  LogReader.call(this, {
+      'code-creation': {
+          parsers: [null, parseInt, parseInt, null, 'var-args'],
+          processor: this.processCodeCreation },
+      'code-move': { parsers: [parseInt, parseInt],
+          processor: this.processCodeMove },
+      'code-delete': null,
+      'sfi-move': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionMove },
+      'shared-library': null,
+      'profiler': null,
+      'tick': null });
+  this.profile = new Profile();
+
+}
+LogProcessor.prototype.__proto__ = LogReader.prototype;
+
+LogProcessor.prototype.processCodeCreation = function(
+    type, start, size, name, maybe_func) {
+  if (type != "LazyCompile" && type != "Script" && type != "Function") return;
+  // Discard types to avoid discrepancies in "LazyCompile" vs. "Function".
+  type = "";
+  if (maybe_func.length) {
+    var funcAddr = parseInt(maybe_func[0]);
+    var state = parseState(maybe_func[1]);
+    this.profile.addFuncCode(type, name, start, size, funcAddr, state);
+  } else {
+    this.profile.addCode(type, name, start, size);
+  }
+};
+
+LogProcessor.prototype.processCodeMove = function(from, to) {
+  this.profile.moveCode(from, to);
+};
+
+LogProcessor.prototype.processFunctionMove = function(from, to) {
+  this.profile.moveFunc(from, to);
+};
+
+function RunTest() {
+  // _log must be provided externally.
+  var log_lines = _log.split("\n");
+  var line, pos = 0, log_lines_length = log_lines.length;
+  if (log_lines_length < 2)
+    return "log_lines_length < 2";
+  var logging_processor = new LogProcessor();
+  for ( ; pos < log_lines_length; ++pos) {
+    line = log_lines[pos];
+    if (line === "test-logging-done,\"\"") {
+      ++pos;
+      break;
+    }
+    logging_processor.processLogLine(line);
+  }
+  logging_processor.profile.cleanUpFuncEntries();
+  var logging_entries =
+    logging_processor.profile.codeMap_.getAllDynamicEntriesWithAddresses();
+  if (logging_entries.length === 0)
+    return "logging_entries.length === 0";
+  var traversal_processor = new LogProcessor();
+  for ( ; pos < log_lines_length; ++pos) {
+    line = log_lines[pos];
+    if (line === "test-traversal-done,\"\"") break;
+    traversal_processor.processLogLine(line);
+  }
+  var traversal_entries =
+    traversal_processor.profile.codeMap_.getAllDynamicEntriesWithAddresses();
+  if (traversal_entries.length === 0)
+    return "traversal_entries.length === 0";
+
+  function addressComparator(entryA, entryB) {
+    return entryA[0] < entryB[0] ? -1 : (entryA[0] > entryB[0] ? 1 : 0);
+  }
+
+  logging_entries.sort(addressComparator);
+  traversal_entries.sort(addressComparator);
+
+  function entityNamesEqual(entityA, entityB) {
+    if ("getRawName" in entityB &&
+        entityNamesEqual.builtins.indexOf(entityB.getRawName()) !== -1) {
+      return true;
+    }
+    if (entityNamesEqual.builtins.indexOf(entityB.getName()) !== -1) return true;
+    return entityA.getName() === entityB.getName();
+  }
+  entityNamesEqual.builtins =
+    ["Boolean", "Function", "Number", "Object",
+     "Script", "String", "RegExp", "Date", "Error"];
+
+  function entitiesEqual(entityA, entityB) {
+    if ((entityA === null && entityB !== null) ||
+      (entityA !== null && entityB === null)) return true;
+    return entityA.size === entityB.size && entityNamesEqual(entityA, entityB);
+  }
+
+  var l_pos = 0, t_pos = 0;
+  var l_len = logging_entries.length, t_len = traversal_entries.length;
+  var comparison = [];
+  var equal = true;
+  // Do a merge-like comparison of entries. At the same address we expect to
+  // find the same entries. We skip builtins during log parsing, but compiled
+  // functions traversal may erroneously recognize them as functions, so we are
+  // expecting more functions in traversal vs. logging.
+  // Since we don't track code deletions, logging can also report more entries
+  // than traversal.
+  while (l_pos < l_len && t_pos < t_len) {
+    var entryA = logging_entries[l_pos];
+    var entryB = traversal_entries[t_pos];
+    var cmp = addressComparator(entryA, entryB);
+    var entityA = entryA[1], entityB = entryB[1];
+    var address = entryA[0];
+    if (cmp < 0) {
+      ++l_pos;
+      entityB = null;
+    } else if (cmp > 0) {
+      ++t_pos;
+      entityA = null;
+      address = entryB[0];
+    } else {
+      ++l_pos;
+      ++t_pos;
+    }
+    var entities_equal = entitiesEqual(entityA, entityB);
+    if (!entities_equal) equal = false;
+    comparison.push([entities_equal, address, entityA, entityB]);
+  }
+  return [equal, comparison];
+}
+
+var result = RunTest();
+if (typeof result !== "string") {
+  var out = [];
+  if (!result[0]) {
+    var comparison = result[1];
+    for (var i = 0, l = comparison.length; i < l; ++i) {
+      var c = comparison[i];
+      out.push((c[0] ? "  " : "* ") +
+               c[1].toString(16) + " " +
+               (c[2] ? c[2] : "---") + " " +
+               (c[3] ? c[3] : "---"));
+    }
+  }
+  result[0] ? true : out.join("\n");
+} else {
+  result;
+}
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index 028f82f..d95536d 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -44,8 +44,6 @@
 using ::v8::AccessorInfo;
 using ::v8::Extension;
 
-namespace i = ::v8::internal;
-
 static v8::Handle<Value> handle_property(Local<String> name,
                                          const AccessorInfo&) {
   ApiTestFuzzer::Fuzz();
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 83ab1a9..9767192 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -139,11 +139,11 @@
   // Patch the map to have an accessor for "get".
   Handle<Map> map(function->initial_map());
   Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
-  Handle<Proxy> proxy = FACTORY->NewProxy(&kDescriptor);
-  instance_descriptors = FACTORY->CopyAppendProxyDescriptor(
+  Handle<Foreign> foreign = FACTORY->NewForeign(&kDescriptor);
+  instance_descriptors = FACTORY->CopyAppendForeignDescriptor(
       instance_descriptors,
       FACTORY->NewStringFromAscii(Vector<const char>("get", 3)),
-      proxy,
+      foreign,
       static_cast<PropertyAttributes>(0));
   map->set_instance_descriptors(*instance_descriptors);
   // Add the Foo constructor the global object.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index d7621d1..c1c8aae 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -30,6 +30,7 @@
 #include "v8.h"
 
 #include "api.h"
+#include "isolate.h"
 #include "compilation-cache.h"
 #include "execution.h"
 #include "snapshot.h"
@@ -39,7 +40,7 @@
 #include "parser.h"
 #include "unicode-inl.h"
 
-static const bool kLogThreading = true;
+static const bool kLogThreading = false;
 
 static bool IsNaN(double x) {
 #ifdef WIN32
@@ -71,8 +72,6 @@
 using ::v8::V8;
 using ::v8::Value;
 
-namespace i = ::i;
-
 
 static void ExpectString(const char* code, const char* expected) {
   Local<Value> result = CompileRun(code);
@@ -202,8 +201,6 @@
 }
 
 
-
-
 THREADED_TEST(ArgumentSignature) {
   v8::HandleScope scope;
   LocalContext env;
@@ -332,16 +329,14 @@
 
 class TestResource: public String::ExternalStringResource {
  public:
-  static int dispose_count;
-
-  explicit TestResource(uint16_t* data)
-      : data_(data), length_(0) {
+  explicit TestResource(uint16_t* data, int* counter = NULL)
+    : data_(data), length_(0), counter_(counter) {
     while (data[length_]) ++length_;
   }
 
   ~TestResource() {
     i::DeleteArray(data_);
-    ++dispose_count;
+    if (counter_ != NULL) ++*counter_;
   }
 
   const uint16_t* data() const {
@@ -354,23 +349,18 @@
  private:
   uint16_t* data_;
   size_t length_;
+  int* counter_;
 };
 
 
-int TestResource::dispose_count = 0;
-
-
 class TestAsciiResource: public String::ExternalAsciiStringResource {
  public:
-  static int dispose_count;
-
-  explicit TestAsciiResource(const char* data)
-      : data_(data),
-        length_(strlen(data)) { }
+  explicit TestAsciiResource(const char* data, int* counter = NULL)
+    : data_(data), length_(strlen(data)), counter_(counter) { }
 
   ~TestAsciiResource() {
     i::DeleteArray(data_);
-    ++dispose_count;
+    if (counter_ != NULL) ++*counter_;
   }
 
   const char* data() const {
@@ -383,20 +373,18 @@
  private:
   const char* data_;
   size_t length_;
+  int* counter_;
 };
 
 
-int TestAsciiResource::dispose_count = 0;
-
-
 THREADED_TEST(ScriptUsingStringResource) {
-  TestResource::dispose_count = 0;
+  int dispose_count = 0;
   const char* c_source = "1 + 2 * 3";
   uint16_t* two_byte_source = AsciiToTwoByteString(c_source);
   {
     v8::HandleScope scope;
     LocalContext env;
-    TestResource* resource = new TestResource(two_byte_source);
+    TestResource* resource = new TestResource(two_byte_source, &dispose_count);
     Local<String> source = String::NewExternal(resource);
     Local<Script> script = Script::Compile(source);
     Local<Value> value = script->Run();
@@ -406,37 +394,38 @@
     CHECK_EQ(resource,
              static_cast<TestResource*>(source->GetExternalStringResource()));
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   v8::internal::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(1, TestResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
 THREADED_TEST(ScriptUsingAsciiStringResource) {
-  TestAsciiResource::dispose_count = 0;
+  int dispose_count = 0;
   const char* c_source = "1 + 2 * 3";
   {
     v8::HandleScope scope;
     LocalContext env;
     Local<String> source =
-        String::NewExternal(new TestAsciiResource(i::StrDup(c_source)));
+        String::NewExternal(new TestAsciiResource(i::StrDup(c_source),
+                                                  &dispose_count));
     Local<Script> script = Script::Compile(source);
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestAsciiResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(1, TestAsciiResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
 THREADED_TEST(ScriptMakingExternalString) {
-  TestResource::dispose_count = 0;
+  int dispose_count = 0;
   uint16_t* two_byte_source = AsciiToTwoByteString("1 + 2 * 3");
   {
     v8::HandleScope scope;
@@ -445,23 +434,24 @@
     // Trigger GCs so that the newly allocated string moves to old gen.
     HEAP->CollectGarbage(i::NEW_SPACE);  // in survivor space now
     HEAP->CollectGarbage(i::NEW_SPACE);  // in old gen now
-    bool success = source->MakeExternal(new TestResource(two_byte_source));
+    bool success = source->MakeExternal(new TestResource(two_byte_source,
+                                                         &dispose_count));
     CHECK(success);
     Local<Script> script = Script::Compile(source);
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(1, TestResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
 THREADED_TEST(ScriptMakingExternalAsciiString) {
-  TestAsciiResource::dispose_count = 0;
+  int dispose_count = 0;
   const char* c_source = "1 + 2 * 3";
   {
     v8::HandleScope scope;
@@ -471,18 +461,18 @@
     HEAP->CollectGarbage(i::NEW_SPACE);  // in survivor space now
     HEAP->CollectGarbage(i::NEW_SPACE);  // in old gen now
     bool success = source->MakeExternal(
-        new TestAsciiResource(i::StrDup(c_source)));
+        new TestAsciiResource(i::StrDup(c_source), &dispose_count));
     CHECK(success);
     Local<Script> script = Script::Compile(source);
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestAsciiResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(1, TestAsciiResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
@@ -606,49 +596,52 @@
 
 
 THREADED_TEST(ScavengeExternalString) {
-  TestResource::dispose_count = 0;
+  int dispose_count = 0;
   bool in_new_space = false;
   {
     v8::HandleScope scope;
     uint16_t* two_byte_string = AsciiToTwoByteString("test string");
     Local<String> string =
-        String::NewExternal(new TestResource(two_byte_string));
+      String::NewExternal(new TestResource(two_byte_string,
+                                           &dispose_count));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     HEAP->CollectGarbage(i::NEW_SPACE);
     in_new_space = HEAP->InNewSpace(*istring);
     CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
-    CHECK_EQ(0, TestResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
-  CHECK_EQ(1, TestResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
 THREADED_TEST(ScavengeExternalAsciiString) {
-  TestAsciiResource::dispose_count = 0;
+  int dispose_count = 0;
   bool in_new_space = false;
   {
     v8::HandleScope scope;
     const char* one_byte_string = "test string";
     Local<String> string = String::NewExternal(
-        new TestAsciiResource(i::StrDup(one_byte_string)));
+        new TestAsciiResource(i::StrDup(one_byte_string), &dispose_count));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     HEAP->CollectGarbage(i::NEW_SPACE);
     in_new_space = HEAP->InNewSpace(*istring);
     CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
-    CHECK_EQ(0, TestAsciiResource::dispose_count);
+    CHECK_EQ(0, dispose_count);
   }
   HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
-  CHECK_EQ(1, TestAsciiResource::dispose_count);
+  CHECK_EQ(1, dispose_count);
 }
 
 
 class TestAsciiResourceWithDisposeControl: public TestAsciiResource {
  public:
+  // Only used by non-threaded tests, so it can use static fields.
   static int dispose_calls;
+  static int dispose_count;
 
   TestAsciiResourceWithDisposeControl(const char* data, bool dispose)
-      : TestAsciiResource(data),
+      : TestAsciiResource(data, &dispose_count),
         dispose_(dispose) { }
 
   void Dispose() {
@@ -660,6 +653,7 @@
 };
 
 
+int TestAsciiResourceWithDisposeControl::dispose_count = 0;
 int TestAsciiResourceWithDisposeControl::dispose_calls = 0;
 
 
@@ -667,7 +661,7 @@
   const char* c_source = "1 + 2 * 3";
 
   // Use a stack allocated external string resource allocated object.
-  TestAsciiResource::dispose_count = 0;
+  TestAsciiResourceWithDisposeControl::dispose_count = 0;
   TestAsciiResourceWithDisposeControl::dispose_calls = 0;
   TestAsciiResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
   {
@@ -679,15 +673,15 @@
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestAsciiResource::dispose_count);
+    CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
-  CHECK_EQ(0, TestAsciiResource::dispose_count);
+  CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
 
   // Use a heap allocated external string resource allocated object.
-  TestAsciiResource::dispose_count = 0;
+  TestAsciiResourceWithDisposeControl::dispose_count = 0;
   TestAsciiResourceWithDisposeControl::dispose_calls = 0;
   TestAsciiResource* res_heap =
       new TestAsciiResourceWithDisposeControl(i::StrDup(c_source), true);
@@ -700,12 +694,12 @@
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     HEAP->CollectAllGarbage(false);
-    CHECK_EQ(0, TestAsciiResource::dispose_count);
+    CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
-  CHECK_EQ(1, TestAsciiResource::dispose_count);
+  CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
 }
 
 
@@ -829,7 +823,7 @@
 static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
   void* ptr = v8::External::Unwrap(args.Data());
   CHECK_EQ(expected_ptr, ptr);
-  return v8::Boolean::New(true);
+  return v8::True();
 }
 
 
@@ -1027,6 +1021,90 @@
 }
 
 
+THREADED_TEST(IsNativeError) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::Handle<Value> syntax_error = CompileRun(
+      "var out = 0; try { eval(\"#\"); } catch(x) { out = x; } out; ");
+  CHECK(syntax_error->IsNativeError());
+  v8::Handle<Value> not_error = CompileRun("{a:42}");
+  CHECK(!not_error->IsNativeError());
+  v8::Handle<Value> not_object = CompileRun("42");
+  CHECK(!not_object->IsNativeError());
+}
+
+
+THREADED_TEST(StringObject) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::Handle<Value> boxed_string = CompileRun("new String(\"test\")");
+  CHECK(boxed_string->IsStringObject());
+  v8::Handle<Value> unboxed_string = CompileRun("\"test\"");
+  CHECK(!unboxed_string->IsStringObject());
+  v8::Handle<Value> boxed_not_string = CompileRun("new Number(42)");
+  CHECK(!boxed_not_string->IsStringObject());
+  v8::Handle<Value> not_object = CompileRun("0");
+  CHECK(!not_object->IsStringObject());
+  v8::Handle<v8::StringObject> as_boxed = boxed_string.As<v8::StringObject>();
+  CHECK(!as_boxed.IsEmpty());
+  Local<v8::String> the_string = as_boxed->StringValue();
+  CHECK(!the_string.IsEmpty());
+  ExpectObject("\"test\"", the_string);
+  v8::Handle<v8::Value> new_boxed_string = v8::StringObject::New(the_string);
+  CHECK(new_boxed_string->IsStringObject());
+  as_boxed = new_boxed_string.As<v8::StringObject>();
+  the_string = as_boxed->StringValue();
+  CHECK(!the_string.IsEmpty());
+  ExpectObject("\"test\"", the_string);
+}
+
+
+THREADED_TEST(NumberObject) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::Handle<Value> boxed_number = CompileRun("new Number(42)");
+  CHECK(boxed_number->IsNumberObject());
+  v8::Handle<Value> unboxed_number = CompileRun("42");
+  CHECK(!unboxed_number->IsNumberObject());
+  v8::Handle<Value> boxed_not_number = CompileRun("new Boolean(false)");
+  CHECK(!boxed_not_number->IsNumberObject());
+  v8::Handle<v8::NumberObject> as_boxed = boxed_number.As<v8::NumberObject>();
+  CHECK(!as_boxed.IsEmpty());
+  double the_number = as_boxed->NumberValue();
+  CHECK_EQ(42.0, the_number);
+  v8::Handle<v8::Value> new_boxed_number = v8::NumberObject::New(43);
+  CHECK(new_boxed_number->IsNumberObject());
+  as_boxed = new_boxed_number.As<v8::NumberObject>();
+  the_number = as_boxed->NumberValue();
+  CHECK_EQ(43.0, the_number);
+}
+
+
+THREADED_TEST(BooleanObject) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::Handle<Value> boxed_boolean = CompileRun("new Boolean(true)");
+  CHECK(boxed_boolean->IsBooleanObject());
+  v8::Handle<Value> unboxed_boolean = CompileRun("true");
+  CHECK(!unboxed_boolean->IsBooleanObject());
+  v8::Handle<Value> boxed_not_boolean = CompileRun("new Number(42)");
+  CHECK(!boxed_not_boolean->IsBooleanObject());
+  v8::Handle<v8::BooleanObject> as_boxed =
+      boxed_boolean.As<v8::BooleanObject>();
+  CHECK(!as_boxed.IsEmpty());
+  bool the_boolean = as_boxed->BooleanValue();
+  CHECK_EQ(true, the_boolean);
+  v8::Handle<v8::Value> boxed_true = v8::BooleanObject::New(true);
+  v8::Handle<v8::Value> boxed_false = v8::BooleanObject::New(false);
+  CHECK(boxed_true->IsBooleanObject());
+  CHECK(boxed_false->IsBooleanObject());
+  as_boxed = boxed_true.As<v8::BooleanObject>();
+  CHECK_EQ(true, as_boxed->BooleanValue());
+  as_boxed = boxed_false.As<v8::BooleanObject>();
+  CHECK_EQ(false, as_boxed->BooleanValue());
+}
+
+
 THREADED_TEST(Number) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1052,8 +1130,10 @@
   v8::HandleScope scope;
   LocalContext env;
   double PI = 3.1415926;
-  Local<Value> date_obj = v8::Date::New(PI);
-  CHECK_EQ(3.0, date_obj->NumberValue());
+  Local<Value> date = v8::Date::New(PI);
+  CHECK_EQ(3.0, date->NumberValue());
+  date.As<v8::Date>()->Set(v8_str("property"), v8::Integer::New(42));
+  CHECK_EQ(42, date.As<v8::Date>()->Get(v8_str("property"))->Int32Value());
 }
 
 
@@ -1728,6 +1808,34 @@
 }
 
 
+THREADED_TEST(Regress97784) {
+  // Regression test for crbug.com/97784
+  // Messing with the Object.prototype should not have effect on
+  // hidden properties.
+  v8::HandleScope scope;
+  LocalContext env;
+
+  v8::Local<v8::Object> obj = v8::Object::New();
+  v8::Local<v8::String> key = v8_str("hidden");
+
+  CompileRun(
+      "set_called = false;"
+      "Object.defineProperty("
+      "    Object.prototype,"
+      "    'hidden',"
+      "    {get: function() { return 45; },"
+      "     set: function() { set_called = true; }})");
+
+  CHECK(obj->GetHiddenValue(key).IsEmpty());
+  // Make sure that the getter and setter from Object.prototype is not invoked.
+  // If it did we would have full access to the hidden properties in
+  // the accessor.
+  CHECK(obj->SetHiddenValue(key, v8::Integer::New(42)));
+  ExpectFalse("set_called");
+  CHECK_EQ(42, obj->GetHiddenValue(key)->Int32Value());
+}
+
+
 static bool interceptor_for_hidden_properties_called;
 static v8::Handle<Value> InterceptorForHiddenProperties(
     Local<String> name, const AccessorInfo& info) {
@@ -2047,10 +2155,15 @@
 THREADED_TEST(PropertyAttributes) {
   v8::HandleScope scope;
   LocalContext context;
+  // none
+  Local<String> prop = v8_str("none");
+  context->Global()->Set(prop, v8_num(7));
+  CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(prop));
   // read-only
-  Local<String> prop = v8_str("read_only");
+  prop = v8_str("read_only");
   context->Global()->Set(prop, v8_num(7), v8::ReadOnly);
   CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
+  CHECK_EQ(v8::ReadOnly, context->Global()->GetPropertyAttributes(prop));
   Script::Compile(v8_str("read_only = 9"))->Run();
   CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
   context->Global()->Set(prop, v8_num(10));
@@ -2061,6 +2174,25 @@
   CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
   Script::Compile(v8_str("delete dont_delete"))->Run();
   CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
+  CHECK_EQ(v8::DontDelete, context->Global()->GetPropertyAttributes(prop));
+  // dont-enum
+  prop = v8_str("dont_enum");
+  context->Global()->Set(prop, v8_num(28), v8::DontEnum);
+  CHECK_EQ(v8::DontEnum, context->Global()->GetPropertyAttributes(prop));
+  // absent
+  prop = v8_str("absent");
+  CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(prop));
+  Local<Value> fake_prop = v8_num(1);
+  CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(fake_prop));
+  // exception
+  TryCatch try_catch;
+  Local<Value> exception =
+      CompileRun("({ toString: function() { throw 'exception';} })");
+  CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(exception));
+  CHECK(try_catch.HasCaught());
+  String::AsciiValue exception_value(try_catch.Exception());
+  CHECK_EQ("exception", *exception_value);
+  try_catch.Reset();
 }
 
 
@@ -2505,7 +2637,7 @@
 
 
 v8::Handle<Value> CCatcher(const v8::Arguments& args) {
-  if (args.Length() < 1) return v8::Boolean::New(false);
+  if (args.Length() < 1) return v8::False();
   v8::HandleScope scope;
   v8::TryCatch try_catch;
   Local<Value> result = v8::Script::Compile(args[0]->ToString())->Run();
@@ -3038,8 +3170,7 @@
   result = script_define->Run();
   CHECK(try_catch.HasCaught());
   String::AsciiValue exception_value(try_catch.Exception());
-  CHECK_EQ(*exception_value,
-           "TypeError: Cannot redefine property: defineProperty");
+  CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
 }
 
 THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
@@ -3084,8 +3215,7 @@
   result = script_define->Run();
   CHECK(try_catch.HasCaught());
   String::AsciiValue exception_value(try_catch.Exception());
-  CHECK_EQ(*exception_value,
-           "TypeError: Cannot redefine property: defineProperty");
+  CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
 }
 
 
@@ -3203,8 +3333,7 @@
         "{get: function() { return 'func'; }})");
     CHECK(try_catch.HasCaught());
     String::AsciiValue exception_value(try_catch.Exception());
-    CHECK_EQ(*exception_value,
-            "TypeError: Cannot redefine property: defineProperty");
+    CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
   }
   {
     v8::TryCatch try_catch;
@@ -3212,8 +3341,7 @@
         "{get: function() { return 'func'; }})");
     CHECK(try_catch.HasCaught());
     String::AsciiValue exception_value(try_catch.Exception());
-    CHECK_EQ(*exception_value,
-            "TypeError: Cannot redefine property: defineProperty");
+    CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
   }
 }
 
@@ -3455,6 +3583,114 @@
 }
 
 
+static v8::Handle<Value> UnboxedDoubleIndexedPropertyGetter(
+    uint32_t index,
+    const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (index < 25) {
+    return v8::Handle<Value>(v8_num(index));
+  }
+  return v8::Handle<Value>();
+}
+
+
+static v8::Handle<Value> UnboxedDoubleIndexedPropertySetter(
+    uint32_t index,
+    Local<Value> value,
+    const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (index < 25) {
+    return v8::Handle<Value>(v8_num(index));
+  }
+  return v8::Handle<Value>();
+}
+
+
+Handle<v8::Array> UnboxedDoubleIndexedPropertyEnumerator(
+    const AccessorInfo& info) {
+  // Force the list of returned keys to be stored in a FastDoubleArray.
+  Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+      "keys = new Array(); keys[125000] = 1;"
+      "for(i = 0; i < 80000; i++) { keys[i] = i; };"
+      "keys.length = 25; keys;"));
+  Local<Value> result = indexed_property_names_script->Run();
+  return Local<v8::Array>(::v8::Array::Cast(*result));
+}
+
+
+// Make sure that the the interceptor code in the runtime properly handles
+// merging property name lists for double-array-backed arrays.
+THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetIndexedPropertyHandler(UnboxedDoubleIndexedPropertyGetter,
+                                   UnboxedDoubleIndexedPropertySetter,
+                                   0,
+                                   0,
+                                   UnboxedDoubleIndexedPropertyEnumerator);
+  LocalContext context;
+  context->Global()->Set(v8_str("obj"), templ->NewInstance());
+  // When obj is created, force it to be Stored in a FastDoubleArray.
+  Local<Script> create_unboxed_double_script = Script::Compile(v8_str(
+      "obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
+      "key_count = 0; "
+      "for (x in obj) {key_count++;};"
+      "obj;"));
+  Local<Value> result = create_unboxed_double_script->Run();
+  CHECK(result->ToObject()->HasRealIndexedProperty(2000));
+  Local<Script> key_count_check = Script::Compile(v8_str(
+      "key_count;"));
+  result = key_count_check->Run();
+  CHECK_EQ(v8_num(40013), result);
+}
+
+
+Handle<v8::Array> NonStrictArgsIndexedPropertyEnumerator(
+    const AccessorInfo& info) {
+  // Force the list of returned keys to be stored in a Arguments object.
+  Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+      "function f(w,x) {"
+      " return arguments;"
+      "}"
+      "keys = f(0, 1, 2, 3);"
+      "keys;"));
+  Local<Value> result = indexed_property_names_script->Run();
+  return Local<v8::Array>(static_cast<v8::Array*>(::v8::Object::Cast(*result)));
+}
+
+
+static v8::Handle<Value> NonStrictIndexedPropertyGetter(
+    uint32_t index,
+    const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (index < 4) {
+    return v8::Handle<Value>(v8_num(index));
+  }
+  return v8::Handle<Value>();
+}
+
+
+// Make sure that the the interceptor code in the runtime properly handles
+// merging property name lists for non-string arguments arrays.
+THREADED_TEST(IndexedInterceptorNonStrictArgsWithIndexedAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetIndexedPropertyHandler(NonStrictIndexedPropertyGetter,
+                                   0,
+                                   0,
+                                   0,
+                                   NonStrictArgsIndexedPropertyEnumerator);
+  LocalContext context;
+  context->Global()->Set(v8_str("obj"), templ->NewInstance());
+  Local<Script> create_args_script =
+      Script::Compile(v8_str(
+          "var key_count = 0;"
+          "for (x in obj) {key_count++;} key_count;"));
+  Local<Value> result = create_args_script->Run();
+  CHECK_EQ(v8_num(4), result);
+}
+
+
 static v8::Handle<Value> IdentityIndexedPropertyGetter(
     uint32_t index,
     const AccessorInfo& info) {
@@ -3865,6 +4101,49 @@
 }
 
 
+THREADED_TEST(VoidLiteral) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<v8::FunctionTemplate> desc =
+      v8::FunctionTemplate::New(0, v8::Handle<Value>());
+  desc->InstanceTemplate()->MarkAsUndetectable();  // undetectable
+
+  Local<v8::Object> obj = desc->GetFunction()->NewInstance();
+  env->Global()->Set(v8_str("undetectable"), obj);
+
+  ExpectBoolean("undefined == void 0", true);
+  ExpectBoolean("undetectable == void 0", true);
+  ExpectBoolean("null == void 0", true);
+  ExpectBoolean("undefined === void 0", true);
+  ExpectBoolean("undetectable === void 0", false);
+  ExpectBoolean("null === void 0", false);
+
+  ExpectBoolean("void 0 == undefined", true);
+  ExpectBoolean("void 0 == undetectable", true);
+  ExpectBoolean("void 0 == null", true);
+  ExpectBoolean("void 0 === undefined", true);
+  ExpectBoolean("void 0 === undetectable", false);
+  ExpectBoolean("void 0 === null", false);
+
+  ExpectString("(function() {"
+               "  try {"
+               "    return x === void 0;"
+               "  } catch(e) {"
+               "    return e.toString();"
+               "  }"
+               "})()",
+               "ReferenceError: x is not defined");
+  ExpectString("(function() {"
+               "  try {"
+               "    return void 0 === x;"
+               "  } catch(e) {"
+               "    return e.toString();"
+               "  }"
+               "})()",
+               "ReferenceError: x is not defined");
+}
+
 
 THREADED_TEST(ExtensibleOnUndetectable) {
   v8::HandleScope scope;
@@ -3939,11 +4218,43 @@
 }
 
 
+TEST(UndetectableOptimized) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<String> obj = String::NewUndetectable("foo");
+  env->Global()->Set(v8_str("undetectable"), obj);
+  env->Global()->Set(v8_str("detectable"), v8_str("bar"));
+
+  ExpectString(
+      "function testBranch() {"
+      "  if (!%_IsUndetectableObject(undetectable)) throw 1;"
+      "  if (%_IsUndetectableObject(detectable)) throw 2;"
+      "}\n"
+      "function testBool() {"
+      "  var b1 = !%_IsUndetectableObject(undetectable);"
+      "  var b2 = %_IsUndetectableObject(detectable);"
+      "  if (b1) throw 3;"
+      "  if (b2) throw 4;"
+      "  return b1 == b2;"
+      "}\n"
+      "%OptimizeFunctionOnNextCall(testBranch);"
+      "%OptimizeFunctionOnNextCall(testBool);"
+      "for (var i = 0; i < 10; i++) {"
+      "  testBranch();"
+      "  testBool();"
+      "}\n"
+      "\"PASS\"",
+      "PASS");
+}
+
+
 template <typename T> static void USE(T) { }
 
 
 // This test is not intended to be run, just type checked.
-static void PersistentHandles() {
+static inline void PersistentHandles() {
   USE(PersistentHandles);
   Local<String> str = v8_str("foo");
   v8::Persistent<String> p_str = v8::Persistent<String>::New(str);
@@ -4128,6 +4439,69 @@
 }
 
 
+class NativeFunctionExtension : public Extension {
+ public:
+  NativeFunctionExtension(const char* name,
+                          const char* source,
+                          v8::InvocationCallback fun = &Echo)
+      : Extension(name, source),
+        function_(fun) { }
+
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+      v8::Handle<v8::String> name) {
+    return v8::FunctionTemplate::New(function_);
+  }
+
+  static v8::Handle<v8::Value> Echo(const v8::Arguments& args) {
+    if (args.Length() >= 1) return (args[0]);
+    return v8::Undefined();
+  }
+ private:
+  v8::InvocationCallback function_;
+};
+
+
+THREADED_TEST(NativeFunctionDeclaration) {
+  v8::HandleScope handle_scope;
+  const char* name = "nativedecl";
+  v8::RegisterExtension(new NativeFunctionExtension(name,
+                                                    "native function foo();"));
+  const char* extension_names[] = { name };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  Context::Scope lock(context);
+  v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
+  CHECK_EQ(result, v8::Integer::New(42));
+}
+
+
+THREADED_TEST(NativeFunctionDeclarationError) {
+  v8::HandleScope handle_scope;
+  const char* name = "nativedeclerr";
+  // Syntax error in extension code.
+  v8::RegisterExtension(new NativeFunctionExtension(name,
+                                                    "native\nfunction foo();"));
+  const char* extension_names[] = { name };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  ASSERT(context.IsEmpty());
+}
+
+THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
+  v8::HandleScope handle_scope;
+  const char* name = "nativedeclerresc";
+  // Syntax error in extension code - escape code in "native" means that
+  // it's not treated as a keyword.
+  v8::RegisterExtension(new NativeFunctionExtension(
+      name,
+      "nativ\\u0065 function foo();"));
+  const char* extension_names[] = { name };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  ASSERT(context.IsEmpty());
+}
+
+
 static void CheckDependencies(const char* name, const char* expected) {
   v8::HandleScope handle_scope;
   v8::ExtensionConfiguration config(1, &name);
@@ -4399,55 +4773,116 @@
 }
 
 
-static bool in_scavenge = false;
-static int last = -1;
-
-static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) {
-  CHECK_EQ(-1, last);
-  last = 0;
+static void DisposeAndSetFlag(v8::Persistent<v8::Value> obj, void* data) {
   obj.Dispose();
   obj.Clear();
-  in_scavenge = true;
-  HEAP->PerformScavenge();
-  in_scavenge = false;
   *(reinterpret_cast<bool*>(data)) = true;
 }
 
-static void CheckIsNotInvokedInScavenge(v8::Persistent<v8::Value> obj,
-                                        void* data) {
-  CHECK_EQ(0, last);
-  last = 1;
-  *(reinterpret_cast<bool*>(data)) = in_scavenge;
-  obj.Dispose();
-  obj.Clear();
-}
 
-THREADED_TEST(NoWeakRefCallbacksInScavenge) {
-  // Test verifies that scavenge cannot invoke WeakReferenceCallbacks.
-  // Calling callbacks from scavenges is unsafe as objects held by those
-  // handlers might have become strongly reachable, but scavenge doesn't
-  // check that.
+THREADED_TEST(IndependentWeakHandle) {
   v8::Persistent<Context> context = Context::New();
   Context::Scope context_scope(context);
 
   v8::Persistent<v8::Object> object_a;
-  v8::Persistent<v8::Object> object_b;
 
   {
     v8::HandleScope handle_scope;
-    object_b = v8::Persistent<v8::Object>::New(v8::Object::New());
     object_a = v8::Persistent<v8::Object>::New(v8::Object::New());
   }
 
   bool object_a_disposed = false;
-  object_a.MakeWeak(&object_a_disposed, &ForceScavenge);
-  bool released_in_scavenge = false;
-  object_b.MakeWeak(&released_in_scavenge, &CheckIsNotInvokedInScavenge);
+  object_a.MakeWeak(&object_a_disposed, &DisposeAndSetFlag);
+  object_a.MarkIndependent();
+  HEAP->PerformScavenge();
+  CHECK(object_a_disposed);
+}
 
-  while (!object_a_disposed) {
-    HEAP->CollectAllGarbage(false);
+
+static void InvokeScavenge() {
+  HEAP->PerformScavenge();
+}
+
+
+static void InvokeMarkSweep() {
+  HEAP->CollectAllGarbage(false);
+}
+
+
+static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) {
+  obj.Dispose();
+  obj.Clear();
+  *(reinterpret_cast<bool*>(data)) = true;
+  InvokeScavenge();
+}
+
+
+static void ForceMarkSweep(v8::Persistent<v8::Value> obj, void* data) {
+  obj.Dispose();
+  obj.Clear();
+  *(reinterpret_cast<bool*>(data)) = true;
+  InvokeMarkSweep();
+}
+
+
+THREADED_TEST(GCFromWeakCallbacks) {
+  v8::Persistent<Context> context = Context::New();
+  Context::Scope context_scope(context);
+
+  static const int kNumberOfGCTypes = 2;
+  v8::WeakReferenceCallback gc_forcing_callback[kNumberOfGCTypes] =
+      {&ForceScavenge, &ForceMarkSweep};
+
+  typedef void (*GCInvoker)();
+  GCInvoker invoke_gc[kNumberOfGCTypes] = {&InvokeScavenge, &InvokeMarkSweep};
+
+  for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
+    for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
+      v8::Persistent<v8::Object> object;
+      {
+        v8::HandleScope handle_scope;
+        object = v8::Persistent<v8::Object>::New(v8::Object::New());
+      }
+      bool disposed = false;
+      object.MakeWeak(&disposed, gc_forcing_callback[inner_gc]);
+      object.MarkIndependent();
+      invoke_gc[outer_gc]();
+      CHECK(disposed);
+    }
   }
-  CHECK(!released_in_scavenge);
+}
+
+
+static void RevivingCallback(v8::Persistent<v8::Value> obj, void* data) {
+  obj.ClearWeak();
+  *(reinterpret_cast<bool*>(data)) = true;
+}
+
+
+THREADED_TEST(IndependentHandleRevival) {
+  v8::Persistent<Context> context = Context::New();
+  Context::Scope context_scope(context);
+
+  v8::Persistent<v8::Object> object;
+  {
+    v8::HandleScope handle_scope;
+    object = v8::Persistent<v8::Object>::New(v8::Object::New());
+    object->Set(v8_str("x"), v8::Integer::New(1));
+    v8::Local<String> y_str = v8_str("y");
+    object->Set(y_str, y_str);
+  }
+  bool revived = false;
+  object.MakeWeak(&revived, &RevivingCallback);
+  object.MarkIndependent();
+  HEAP->PerformScavenge();
+  CHECK(revived);
+  HEAP->CollectAllGarbage(true);
+  {
+    v8::HandleScope handle_scope;
+    v8::Local<String> y_str = v8_str("y");
+    CHECK_EQ(v8::Integer::New(1), object->Get(v8_str("x")));
+    CHECK(object->Get(y_str)->Equals(y_str));
+  }
 }
 
 
@@ -4889,6 +5324,40 @@
   CHECK_EQ(0, strncmp("d\1", buf, 2));
   uint16_t answer7[] = {'d', 0x101};
   CHECK_EQ(0, StrNCmp16(answer7, wbuf, 2));
+
+  memset(wbuf, 0x1, sizeof(wbuf));
+  wbuf[5] = 'X';
+  len = str->Write(wbuf, 0, 6, String::NO_NULL_TERMINATION);
+  CHECK_EQ(5, len);
+  CHECK_EQ('X', wbuf[5]);
+  uint16_t answer8a[] = {'a', 'b', 'c', 'd', 'e'};
+  uint16_t answer8b[] = {'a', 'b', 'c', 'd', 'e', '\0'};
+  CHECK_EQ(0, StrNCmp16(answer8a, wbuf, 5));
+  CHECK_NE(0, StrCmp16(answer8b, wbuf));
+  wbuf[5] = '\0';
+  CHECK_EQ(0, StrCmp16(answer8b, wbuf));
+
+  memset(buf, 0x1, sizeof(buf));
+  buf[5] = 'X';
+  len = str->WriteAscii(buf, 0, 6, String::NO_NULL_TERMINATION);
+  CHECK_EQ(5, len);
+  CHECK_EQ('X', buf[5]);
+  CHECK_EQ(0, strncmp("abcde", buf, 5));
+  CHECK_NE(0, strcmp("abcde", buf));
+  buf[5] = '\0';
+  CHECK_EQ(0, strcmp("abcde", buf));
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  utf8buf[8] = 'X';
+  len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen,
+                        String::NO_NULL_TERMINATION);
+  CHECK_EQ(8, len);
+  CHECK_EQ('X', utf8buf[8]);
+  CHECK_EQ(5, charlen);
+  CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\342\230\203", 8));
+  CHECK_NE(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
+  utf8buf[8] = '\0';
+  CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
 }
 
 
@@ -6570,7 +7039,7 @@
   context->Global()->Set(v8_str("__proto__"), o);
 
   Local<Value> value =
-      Script::Compile(v8_str("propertyIsEnumerable(0)"))->Run();
+      Script::Compile(v8_str("this.propertyIsEnumerable(0)"))->Run();
   CHECK(value->IsBoolean());
   CHECK(!value->BooleanValue());
 
@@ -6689,6 +7158,37 @@
 }
 
 
+THREADED_TEST(FunctionReadOnlyPrototype) {
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+  t1->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
+  t1->ReadOnlyPrototype();
+  context->Global()->Set(v8_str("func1"), t1->GetFunction());
+  // Configured value of ReadOnly flag.
+  CHECK(CompileRun(
+      "(function() {"
+      "  descriptor = Object.getOwnPropertyDescriptor(func1, 'prototype');"
+      "  return (descriptor['writable'] == false);"
+      "})()")->BooleanValue());
+  CHECK_EQ(42, CompileRun("func1.prototype.x")->Int32Value());
+  CHECK_EQ(42,
+           CompileRun("func1.prototype = {}; func1.prototype.x")->Int32Value());
+
+  Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+  t2->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
+  context->Global()->Set(v8_str("func2"), t2->GetFunction());
+  // Default value of ReadOnly flag.
+  CHECK(CompileRun(
+      "(function() {"
+      "  descriptor = Object.getOwnPropertyDescriptor(func2, 'prototype');"
+      "  return (descriptor['writable'] == true);"
+      "})()")->BooleanValue());
+  CHECK_EQ(42, CompileRun("func2.prototype.x")->Int32Value());
+}
+
+
 THREADED_TEST(SetPrototypeThrows) {
   v8::HandleScope handle_scope;
   LocalContext context;
@@ -6746,6 +7246,200 @@
   CHECK(value->BooleanValue());
 }
 
+
+static Handle<Value> ConstructorCallback(const Arguments& args) {
+  ApiTestFuzzer::Fuzz();
+  Local<Object> This;
+
+  if (args.IsConstructCall()) {
+    Local<Object> Holder = args.Holder();
+    This = Object::New();
+    Local<Value> proto = Holder->GetPrototype();
+    if (proto->IsObject()) {
+      This->SetPrototype(proto);
+    }
+  } else {
+    This = args.This();
+  }
+
+  This->Set(v8_str("a"), args[0]);
+  return This;
+}
+
+
+static Handle<Value> FakeConstructorCallback(const Arguments& args) {
+  ApiTestFuzzer::Fuzz();
+  return args[0];
+}
+
+
+THREADED_TEST(ConstructorForObject) {
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    instance_template->SetCallAsFunctionHandler(ConstructorCallback);
+    Local<Object> instance = instance_template->NewInstance();
+    context->Global()->Set(v8_str("obj"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    // Call the Object's constructor with a 32-bit signed integer.
+    value = CompileRun("(function() { var o = new obj(28); return o.a; })()");
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsInt32());
+    CHECK_EQ(28, value->Int32Value());
+
+    Local<Value> args1[] = { v8_num(28) };
+    Local<Value> value_obj1 = instance->CallAsConstructor(1, args1);
+    CHECK(value_obj1->IsObject());
+    Local<Object> object1 = Local<Object>::Cast(value_obj1);
+    value = object1->Get(v8_str("a"));
+    CHECK(value->IsInt32());
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(28, value->Int32Value());
+
+    // Call the Object's constructor with a String.
+    value = CompileRun(
+        "(function() { var o = new obj('tipli'); return o.a; })()");
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsString());
+    String::AsciiValue string_value1(value->ToString());
+    CHECK_EQ("tipli", *string_value1);
+
+    Local<Value> args2[] = { v8_str("tipli") };
+    Local<Value> value_obj2 = instance->CallAsConstructor(1, args2);
+    CHECK(value_obj2->IsObject());
+    Local<Object> object2 = Local<Object>::Cast(value_obj2);
+    value = object2->Get(v8_str("a"));
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsString());
+    String::AsciiValue string_value2(value->ToString());
+    CHECK_EQ("tipli", *string_value2);
+
+    // Call the Object's constructor with a Boolean.
+    value = CompileRun("(function() { var o = new obj(true); return o.a; })()");
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsBoolean());
+    CHECK_EQ(true, value->BooleanValue());
+
+    Handle<Value> args3[] = { v8::True() };
+    Local<Value> value_obj3 = instance->CallAsConstructor(1, args3);
+    CHECK(value_obj3->IsObject());
+    Local<Object> object3 = Local<Object>::Cast(value_obj3);
+    value = object3->Get(v8_str("a"));
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsBoolean());
+    CHECK_EQ(true, value->BooleanValue());
+
+    // Call the Object's constructor with undefined.
+    Handle<Value> args4[] = { v8::Undefined() };
+    Local<Value> value_obj4 = instance->CallAsConstructor(1, args4);
+    CHECK(value_obj4->IsObject());
+    Local<Object> object4 = Local<Object>::Cast(value_obj4);
+    value = object4->Get(v8_str("a"));
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsUndefined());
+
+    // Call the Object's constructor with null.
+    Handle<Value> args5[] = { v8::Null() };
+    Local<Value> value_obj5 = instance->CallAsConstructor(1, args5);
+    CHECK(value_obj5->IsObject());
+    Local<Object> object5 = Local<Object>::Cast(value_obj5);
+    value = object5->Get(v8_str("a"));
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsNull());
+  }
+
+  // Check exception handling when there is no constructor set for the Object.
+  { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    Local<Object> instance = instance_template->NewInstance();
+    context->Global()->Set(v8_str("obj2"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    value = CompileRun("new obj2(28)");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value1(try_catch.Exception());
+    CHECK_EQ("TypeError: object is not a function", *exception_value1);
+    try_catch.Reset();
+
+    Local<Value> args[] = { v8_num(29) };
+    value = instance->CallAsConstructor(1, args);
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value2(try_catch.Exception());
+    CHECK_EQ("TypeError: #<Object> is not a function", *exception_value2);
+    try_catch.Reset();
+  }
+
+  // Check the case when constructor throws exception.
+  { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    instance_template->SetCallAsFunctionHandler(ThrowValue);
+    Local<Object> instance = instance_template->NewInstance();
+    context->Global()->Set(v8_str("obj3"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    value = CompileRun("new obj3(22)");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value1(try_catch.Exception());
+    CHECK_EQ("22", *exception_value1);
+    try_catch.Reset();
+
+    Local<Value> args[] = { v8_num(23) };
+    value = instance->CallAsConstructor(1, args);
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value2(try_catch.Exception());
+    CHECK_EQ("23", *exception_value2);
+    try_catch.Reset();
+  }
+
+  // Check whether constructor returns with an object or non-object.
+  { Local<FunctionTemplate> function_template =
+        FunctionTemplate::New(FakeConstructorCallback);
+    Local<Function> function = function_template->GetFunction();
+    Local<Object> instance1 = function;
+    context->Global()->Set(v8_str("obj4"), instance1);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    CHECK(instance1->IsObject());
+    CHECK(instance1->IsFunction());
+
+    value = CompileRun("new obj4(28)");
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsObject());
+
+    Local<Value> args1[] = { v8_num(28) };
+    value = instance1->CallAsConstructor(1, args1);
+    CHECK(!try_catch.HasCaught());
+    CHECK(value->IsObject());
+
+    Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    instance_template->SetCallAsFunctionHandler(FakeConstructorCallback);
+    Local<Object> instance2 = instance_template->NewInstance();
+    context->Global()->Set(v8_str("obj5"), instance2);
+    CHECK(!try_catch.HasCaught());
+
+    CHECK(instance2->IsObject());
+    CHECK(!instance2->IsFunction());
+
+    value = CompileRun("new obj5(28)");
+    CHECK(!try_catch.HasCaught());
+    CHECK(!value->IsObject());
+
+    Local<Value> args2[] = { v8_num(28) };
+    value = instance2->CallAsConstructor(1, args2);
+    CHECK(!try_catch.HasCaught());
+    CHECK(!value->IsObject());
+  }
+}
+
+
 THREADED_TEST(FunctionDescriptorException) {
   v8::HandleScope handle_scope;
   LocalContext context;
@@ -6962,50 +7656,153 @@
   v8::HandleScope scope;
   LocalContext context;
 
-  Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
-  Local<ObjectTemplate> instance_template = t->InstanceTemplate();
-  instance_template->SetCallAsFunctionHandler(call_as_function);
-  Local<v8::Object> instance = t->GetFunction()->NewInstance();
-  context->Global()->Set(v8_str("obj"), instance);
-  v8::TryCatch try_catch;
-  Local<Value> value;
-  CHECK(!try_catch.HasCaught());
+  { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+    Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+    instance_template->SetCallAsFunctionHandler(call_as_function);
+    Local<v8::Object> instance = t->GetFunction()->NewInstance();
+    context->Global()->Set(v8_str("obj"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
 
-  value = CompileRun("obj(42)");
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(42, value->Int32Value());
+    value = CompileRun("obj(42)");
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(42, value->Int32Value());
 
-  value = CompileRun("(function(o){return o(49)})(obj)");
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(49, value->Int32Value());
+    value = CompileRun("(function(o){return o(49)})(obj)");
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(49, value->Int32Value());
 
-  // test special case of call as function
-  value = CompileRun("[obj]['0'](45)");
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(45, value->Int32Value());
+    // test special case of call as function
+    value = CompileRun("[obj]['0'](45)");
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(45, value->Int32Value());
 
-  value = CompileRun("obj.call = Function.prototype.call;"
-                     "obj.call(null, 87)");
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(87, value->Int32Value());
+    value = CompileRun("obj.call = Function.prototype.call;"
+                       "obj.call(null, 87)");
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(87, value->Int32Value());
 
-  // Regression tests for bug #1116356: Calling call through call/apply
-  // must work for non-function receivers.
-  const char* apply_99 = "Function.prototype.call.apply(obj, [this, 99])";
-  value = CompileRun(apply_99);
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(99, value->Int32Value());
+    // Regression tests for bug #1116356: Calling call through call/apply
+    // must work for non-function receivers.
+    const char* apply_99 = "Function.prototype.call.apply(obj, [this, 99])";
+    value = CompileRun(apply_99);
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(99, value->Int32Value());
 
-  const char* call_17 = "Function.prototype.call.call(obj, this, 17)";
-  value = CompileRun(call_17);
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(17, value->Int32Value());
+    const char* call_17 = "Function.prototype.call.call(obj, this, 17)";
+    value = CompileRun(call_17);
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(17, value->Int32Value());
 
-  // Check that the call-as-function handler can be called through
-  // new.
-  value = CompileRun("new obj(43)");
-  CHECK(!try_catch.HasCaught());
-  CHECK_EQ(-43, value->Int32Value());
+    // Check that the call-as-function handler can be called through
+    // new.
+    value = CompileRun("new obj(43)");
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(-43, value->Int32Value());
+
+    // Check that the call-as-function handler can be called through
+    // the API.
+    v8::Handle<Value> args[] = { v8_num(28) };
+    value = instance->CallAsFunction(instance, 1, args);
+    CHECK(!try_catch.HasCaught());
+    CHECK_EQ(28, value->Int32Value());
+  }
+
+  { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+    Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+    Local<v8::Object> instance = t->GetFunction()->NewInstance();
+    context->Global()->Set(v8_str("obj2"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    // Call an object without call-as-function handler through the JS
+    value = CompileRun("obj2(28)");
+    CHECK(value.IsEmpty());
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value1(try_catch.Exception());
+    CHECK_EQ("TypeError: Property 'obj2' of object #<Object> is not a function",
+             *exception_value1);
+    try_catch.Reset();
+
+    // Call an object without call-as-function handler through the API
+    value = CompileRun("obj2(28)");
+    v8::Handle<Value> args[] = { v8_num(28) };
+    value = instance->CallAsFunction(instance, 1, args);
+    CHECK(value.IsEmpty());
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value2(try_catch.Exception());
+    CHECK_EQ("TypeError: [object Object] is not a function", *exception_value2);
+    try_catch.Reset();
+  }
+
+  { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+    Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+    instance_template->SetCallAsFunctionHandler(ThrowValue);
+    Local<v8::Object> instance = t->GetFunction()->NewInstance();
+    context->Global()->Set(v8_str("obj3"), instance);
+    v8::TryCatch try_catch;
+    Local<Value> value;
+    CHECK(!try_catch.HasCaught());
+
+    // Catch the exception which is thrown by call-as-function handler
+    value = CompileRun("obj3(22)");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value1(try_catch.Exception());
+    CHECK_EQ("22", *exception_value1);
+    try_catch.Reset();
+
+    v8::Handle<Value> args[] = { v8_num(23) };
+    value = instance->CallAsFunction(instance, 1, args);
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value2(try_catch.Exception());
+    CHECK_EQ("23", *exception_value2);
+    try_catch.Reset();
+  }
+}
+
+
+// Check whether a non-function object is callable.
+THREADED_TEST(CallableObject) {
+  v8::HandleScope scope;
+  LocalContext context;
+
+  { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    instance_template->SetCallAsFunctionHandler(call_as_function);
+    Local<Object> instance = instance_template->NewInstance();
+    v8::TryCatch try_catch;
+
+    CHECK(instance->IsCallable());
+    CHECK(!try_catch.HasCaught());
+  }
+
+  { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+    Local<Object> instance = instance_template->NewInstance();
+    v8::TryCatch try_catch;
+
+    CHECK(!instance->IsCallable());
+    CHECK(!try_catch.HasCaught());
+  }
+
+  { Local<FunctionTemplate> function_template =
+        FunctionTemplate::New(call_as_function);
+    Local<Function> function = function_template->GetFunction();
+    Local<Object> instance = function;
+    v8::TryCatch try_catch;
+
+    CHECK(instance->IsCallable());
+    CHECK(!try_catch.HasCaught());
+  }
+
+  { Local<FunctionTemplate> function_template = FunctionTemplate::New();
+    Local<Function> function = function_template->GetFunction();
+    Local<Object> instance = function;
+    v8::TryCatch try_catch;
+
+    CHECK(instance->IsCallable());
+    CHECK(!try_catch.HasCaught());
+  }
 }
 
 
@@ -8711,6 +9508,8 @@
     if (callback != NULL) {
       V8::AddMessageListener(callback);
     }
+    // Some small number to control number of times message handler should
+    // throw an exception.
     call_depth = 5;
     ExpectFalse(
         "var thrown = false;\n"
@@ -8796,10 +9595,7 @@
 
 static v8::Handle<Value> IsConstructHandler(const v8::Arguments& args) {
   ApiTestFuzzer::Fuzz();
-  if (args.IsConstructCall()) {
-    return v8::Boolean::New(true);
-  }
-  return v8::Boolean::New(false);
+  return v8::Boolean::New(args.IsConstructCall());
 }
 
 
@@ -8944,14 +9740,12 @@
 void ApiTestFuzzer::Setup(PartOfTest part) {
   linear_congruential_generator = i::FLAG_testing_prng_seed;
   fuzzing_ = true;
-  int start = (part == FIRST_PART) ? 0 : (RegisterThreadedTest::count() >> 1);
-  int end = (part == FIRST_PART)
-      ? (RegisterThreadedTest::count() >> 1)
-      : RegisterThreadedTest::count();
-  active_tests_ = tests_being_run_ = end - start;
+  int count = RegisterThreadedTest::count();
+  int start =  count * part / (LAST_PART + 1);
+  int end = (count * (part + 1) / (LAST_PART + 1)) - 1;
+  active_tests_ = tests_being_run_ = end - start + 1;
   for (int i = 0; i < tests_being_run_; i++) {
-    RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(
-        i::Isolate::Current(), i + start);
+    RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(i + start);
   }
   for (int i = 0; i < active_tests_; i++) {
     RegisterThreadedTest::nth(i)->fuzzer_->Start();
@@ -9018,6 +9812,17 @@
   ApiTestFuzzer::TearDown();
 }
 
+TEST(Threading3) {
+  ApiTestFuzzer::Setup(ApiTestFuzzer::THIRD_PART);
+  ApiTestFuzzer::RunAllTests();
+  ApiTestFuzzer::TearDown();
+}
+
+TEST(Threading4) {
+  ApiTestFuzzer::Setup(ApiTestFuzzer::FOURTH_PART);
+  ApiTestFuzzer::RunAllTests();
+  ApiTestFuzzer::TearDown();
+}
 
 void ApiTestFuzzer::CallTest() {
   if (kLogThreading)
@@ -9173,6 +9978,7 @@
   // the first garbage collection but some of the maps have already
   // been marked at that point.  Therefore some of the maps are not
   // collected until the second garbage collection.
+  HEAP->global_context_map();
   HEAP->CollectAllGarbage(false);
   HEAP->CollectAllGarbage(false);
   int count = GetGlobalObjectsCount();
@@ -9511,6 +10317,19 @@
 }
 
 
+void CheckOwnProperties(v8::Handle<v8::Value> val,
+                        int elmc,
+                        const char* elmv[]) {
+  v8::Handle<v8::Object> obj = val.As<v8::Object>();
+  v8::Handle<v8::Array> props = obj->GetOwnPropertyNames();
+  CHECK_EQ(elmc, props->Length());
+  for (int i = 0; i < elmc; i++) {
+    v8::String::Utf8Value elm(props->Get(v8::Integer::New(i)));
+    CHECK_EQ(elmv[i], *elm);
+  }
+}
+
+
 THREADED_TEST(PropertyEnumeration) {
   v8::HandleScope scope;
   LocalContext context;
@@ -9528,15 +10347,21 @@
   int elmc0 = 0;
   const char** elmv0 = NULL;
   CheckProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
+  CheckOwnProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
   int elmc1 = 2;
   const char* elmv1[] = {"a", "b"};
   CheckProperties(elms->Get(v8::Integer::New(1)), elmc1, elmv1);
+  CheckOwnProperties(elms->Get(v8::Integer::New(1)), elmc1, elmv1);
   int elmc2 = 3;
   const char* elmv2[] = {"0", "1", "2"};
   CheckProperties(elms->Get(v8::Integer::New(2)), elmc2, elmv2);
+  CheckOwnProperties(elms->Get(v8::Integer::New(2)), elmc2, elmv2);
   int elmc3 = 4;
   const char* elmv3[] = {"w", "z", "x", "y"};
   CheckProperties(elms->Get(v8::Integer::New(3)), elmc3, elmv3);
+  int elmc4 = 2;
+  const char* elmv4[] = {"w", "z"};
+  CheckOwnProperties(elms->Get(v8::Integer::New(3)), elmc4, elmv4);
 }
 
 THREADED_TEST(PropertyEnumeration2) {
@@ -9938,17 +10763,16 @@
            *exception_value);
 
   try_catch.Reset();
+
   // Overwrite function bar's start position with 200.  The function entry
-  // will not be found when searching for it by position.
+  // will not be found when searching for it by position and we should fall
+  // back on eager compilation.
   sd = v8::ScriptData::PreCompile(script, i::StrLength(script));
   sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
   sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
       200;
   compiled_script = Script::New(source, NULL, sd);
-  CHECK(try_catch.HasCaught());
-  String::AsciiValue second_exception_value(try_catch.Message()->Get());
-  CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
-           *second_exception_value);
+  CHECK(!try_catch.HasCaught());
 
   delete sd;
 }
@@ -10056,7 +10880,7 @@
     gc_during_regexp_ = 0;
     regexp_success_ = false;
     gc_success_ = false;
-    GCThread gc_thread(i::Isolate::Current(), this);
+    GCThread gc_thread(this);
     gc_thread.Start();
     v8::Locker::StartPreemption(1);
 
@@ -10069,14 +10893,15 @@
     CHECK(regexp_success_);
     CHECK(gc_success_);
   }
+
  private:
   // Number of garbage collections required.
   static const int kRequiredGCs = 5;
 
   class GCThread : public i::Thread {
    public:
-    explicit GCThread(i::Isolate* isolate, RegExpInterruptTest* test)
-        : Thread(isolate, "GCThread"), test_(test) {}
+    explicit GCThread(RegExpInterruptTest* test)
+        : Thread("GCThread"), test_(test) {}
     virtual void Run() {
       test_->CollectGarbage();
     }
@@ -10178,7 +11003,7 @@
     gc_during_apply_ = 0;
     apply_success_ = false;
     gc_success_ = false;
-    GCThread gc_thread(i::Isolate::Current(), this);
+    GCThread gc_thread(this);
     gc_thread.Start();
     v8::Locker::StartPreemption(1);
 
@@ -10191,14 +11016,15 @@
     CHECK(apply_success_);
     CHECK(gc_success_);
   }
+
  private:
   // Number of garbage collections required.
   static const int kRequiredGCs = 2;
 
   class GCThread : public i::Thread {
    public:
-    explicit GCThread(i::Isolate* isolate, ApplyInterruptTest* test)
-        : Thread(isolate, "GCThread"), test_(test) {}
+    explicit GCThread(ApplyInterruptTest* test)
+        : Thread("GCThread"), test_(test) {}
     virtual void Run() {
       test_->CollectGarbage();
     }
@@ -10472,7 +11298,7 @@
         NONE,
         i::kNonStrictMode)->ToObjectChecked();
 
-    MorphThread morph_thread(i::Isolate::Current(), this);
+    MorphThread morph_thread(this);
     morph_thread.Start();
     v8::Locker::StartPreemption(1);
     LongRunningRegExp();
@@ -10484,17 +11310,16 @@
     CHECK(regexp_success_);
     CHECK(morph_success_);
   }
- private:
 
+ private:
   // Number of string modifications required.
   static const int kRequiredModifications = 5;
   static const int kMaxModifications = 100;
 
   class MorphThread : public i::Thread {
    public:
-    explicit MorphThread(i::Isolate* isolate,
-                         RegExpStringModificationTest* test)
-        : Thread(isolate, "MorphThread"), test_(test) {}
+    explicit MorphThread(RegExpStringModificationTest* test)
+        : Thread("MorphThread"), test_(test) {}
     virtual void Run() {
       test_->MorphString();
     }
@@ -10936,7 +11761,7 @@
   }
   HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
   for (int i = 0; i < kElementCount; i++) {
-    CHECK_EQ(i % 256, pixels->get(i));
+    CHECK_EQ(i % 256, pixels->get_scalar(i));
     CHECK_EQ(i % 256, pixel_data[i]);
   }
 
@@ -10996,14 +11821,21 @@
   CHECK_EQ(28, result->Int32Value());
 
   i::Handle<i::Smi> value(i::Smi::FromInt(2));
-  i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  i::Handle<i::Object> no_failure;
+  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  ASSERT(!no_failure.is_null());
+  i::USE(no_failure);
   CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
   *value.location() = i::Smi::FromInt(256);
-  i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  ASSERT(!no_failure.is_null());
+  i::USE(no_failure);
   CHECK_EQ(255,
            i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
   *value.location() = i::Smi::FromInt(-1);
-  i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  ASSERT(!no_failure.is_null());
+  i::USE(no_failure);
   CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
 
   result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -11378,6 +12210,9 @@
     case v8::kExternalFloatArray:
       return 4;
       break;
+    case v8::kExternalDoubleArray:
+      return 8;
+      break;
     default:
       UNREACHABLE();
       return -1;
@@ -11406,7 +12241,8 @@
   }
   HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
   for (int i = 0; i < kElementCount; i++) {
-    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array->get(i)));
+    CHECK_EQ(static_cast<int64_t>(i),
+             static_cast<int64_t>(array->get_scalar(i)));
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i]));
   }
 
@@ -11567,7 +12403,8 @@
   CHECK_EQ(
       2, static_cast<int>(jsobj->GetElement(6)->ToObjectChecked()->Number()));
 
-  if (array_type != v8::kExternalFloatArray) {
+  if (array_type != v8::kExternalFloatArray &&
+      array_type != v8::kExternalDoubleArray) {
     // Though the specification doesn't state it, be explicit about
     // converting NaNs and +/-Infinity to zero.
     result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -11639,35 +12476,6 @@
     CHECK_EQ(true, result->BooleanValue());
   }
 
-  // Test crankshaft external array loads
-  for (int i = 0; i < kElementCount; i++) {
-    array->set(i, static_cast<ElementType>(i));
-  }
-  result = CompileRun("function ee_load_test_func(sum) {"
-                      " for (var i = 0; i < 40; ++i)"
-                      "   sum += ext_array[i];"
-                      " return sum;"
-                      "}"
-                      "sum=0;"
-                      "for (var i=0;i<10000;++i) {"
-                      "  sum=ee_load_test_func(sum);"
-                      "}"
-                      "sum;");
-  CHECK_EQ(7800000, result->Int32Value());
-
-  // Test crankshaft external array stores
-  result = CompileRun("function ee_store_test_func(sum) {"
-                      " for (var i = 0; i < 40; ++i)"
-                      "   sum += ext_array[i] = i;"
-                      " return sum;"
-                      "}"
-                      "sum=0;"
-                      "for (var i=0;i<10000;++i) {"
-                      "  sum=ee_store_test_func(sum);"
-                      "}"
-                      "sum;");
-  CHECK_EQ(7800000, result->Int32Value());
-
   for (int i = 0; i < kElementCount; i++) {
     array->set(i, static_cast<ElementType>(i));
   }
@@ -11964,6 +12772,14 @@
 }
 
 
+THREADED_TEST(ExternalDoubleArray) {
+  ExternalArrayTestHelper<i::ExternalDoubleArray, double>(
+      v8::kExternalDoubleArray,
+      -500,
+      500);
+}
+
+
 THREADED_TEST(ExternalArrays) {
   TestExternalByteArray();
   TestExternalUnsignedByteArray();
@@ -12001,6 +12817,7 @@
   ExternalArrayInfoTestHelper(v8::kExternalIntArray);
   ExternalArrayInfoTestHelper(v8::kExternalUnsignedIntArray);
   ExternalArrayInfoTestHelper(v8::kExternalFloatArray);
+  ExternalArrayInfoTestHelper(v8::kExternalDoubleArray);
   ExternalArrayInfoTestHelper(v8::kExternalPixelArray);
 }
 
@@ -12074,9 +12891,10 @@
                     stackTrace->GetFrame(0));
     checkStackFrame(origin, "foo", 6, 3, false, false,
                     stackTrace->GetFrame(1));
-    checkStackFrame(NULL, "", 1, 1, false, false,
+    // This is the source string inside the eval which has the call to foo.
+    checkStackFrame(NULL, "", 1, 5, false, false,
                     stackTrace->GetFrame(2));
-    // The last frame is an anonymous function that has the initial call.
+    // The last frame is an anonymous function which has the initial eval call.
     checkStackFrame(origin, "", 8, 7, false, false,
                     stackTrace->GetFrame(3));
 
@@ -12095,9 +12913,10 @@
     bool is_eval = false;
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
-    checkStackFrame(NULL, "", 1, 1, is_eval, false,
+    // This is the source string inside the eval which has the call to baz.
+    checkStackFrame(NULL, "", 1, 5, is_eval, false,
                     stackTrace->GetFrame(2));
-    // The last frame is an anonymous function that has the initial call to foo.
+    // The last frame is an anonymous function which has the initial eval call.
     checkStackFrame(origin, "", 10, 1, false, false,
                     stackTrace->GetFrame(3));
 
@@ -12940,6 +13759,9 @@
       "str2;";
   Local<Value> result = CompileRun(init_code);
 
+  Local<Value> indexof = CompileRun("str2.indexOf('els')");
+  Local<Value> lastindexof = CompileRun("str2.lastIndexOf('dab')");
+
   CHECK(result->IsString());
   i::Handle<i::String> string = v8::Utils::OpenHandle(String::Cast(*result));
   int length = string->length();
@@ -13005,6 +13827,10 @@
 
   ExpectString("str2.charAt(2);", "e");
 
+  ExpectObject("str2.indexOf('els');", indexof);
+
+  ExpectObject("str2.lastIndexOf('dab');", lastindexof);
+
   reresult = CompileRun("str2.charCodeAt(2);");
   CHECK_EQ(static_cast<int32_t>('e'), reresult->Int32Value());
 }
@@ -13327,8 +14153,8 @@
 
 class IsolateThread : public v8::internal::Thread {
  public:
-  explicit IsolateThread(v8::Isolate* isolate, int fib_limit)
-      : Thread(NULL, "IsolateThread"),
+  IsolateThread(v8::Isolate* isolate, int fib_limit)
+      : Thread("IsolateThread"),
         isolate_(isolate),
         fib_limit_(fib_limit),
         result_(0) { }
@@ -13373,6 +14199,28 @@
   isolate2->Dispose();
 }
 
+TEST(IsolateDifferentContexts) {
+  v8::Isolate* isolate = v8::Isolate::New();
+  Persistent<v8::Context> context;
+  {
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope;
+    context = v8::Context::New();
+    v8::Context::Scope context_scope(context);
+    Local<Value> v = CompileRun("2");
+    CHECK(v->IsNumber());
+    CHECK_EQ(2, static_cast<int>(v->NumberValue()));
+  }
+  {
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope;
+    context = v8::Context::New();
+    v8::Context::Scope context_scope(context);
+    Local<Value> v = CompileRun("22");
+    CHECK(v->IsNumber());
+    CHECK_EQ(22, static_cast<int>(v->NumberValue()));
+  }
+}
 
 class InitDefaultIsolateThread : public v8::internal::Thread {
  public:
@@ -13386,7 +14234,7 @@
   };
 
   explicit InitDefaultIsolateThread(TestCase testCase)
-      : Thread(NULL, "InitDefaultIsolateThread"),
+      : Thread("InitDefaultIsolateThread"),
         testCase_(testCase),
         result_(false) { }
 
@@ -13629,48 +14477,6 @@
 }
 
 
-TEST(GlobalLoadICGC) {
-  const char* function_code =
-      "function readCell() { while (true) { return cell; } }";
-
-  // Check inline load code for a don't delete cell is cleared during
-  // GC.
-  {
-    v8::HandleScope scope;
-    LocalContext context;
-    CompileRun("var cell = \"value\";");
-    ExpectBoolean("delete cell", false);
-    CompileRun(function_code);
-    ExpectString("readCell()", "value");
-    ExpectString("readCell()", "value");
-  }
-  {
-    v8::HandleScope scope;
-    LocalContext context2;
-    // Hold the code object in the second context.
-    CompileRun(function_code);
-    CheckSurvivingGlobalObjectsCount(1);
-  }
-
-  // Check inline load code for a deletable cell is cleared during GC.
-  {
-    v8::HandleScope scope;
-    LocalContext context;
-    CompileRun("cell = \"value\";");
-    CompileRun(function_code);
-    ExpectString("readCell()", "value");
-    ExpectString("readCell()", "value");
-  }
-  {
-    v8::HandleScope scope;
-    LocalContext context2;
-    // Hold the code object in the second context.
-    CompileRun(function_code);
-    CheckSurvivingGlobalObjectsCount(1);
-  }
-}
-
-
 TEST(RegExp) {
   v8::HandleScope scope;
   LocalContext context;
@@ -13678,34 +14484,34 @@
   v8::Handle<v8::RegExp> re = v8::RegExp::New(v8_str("foo"), v8::RegExp::kNone);
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foo")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = v8::RegExp::New(v8_str("bar"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kGlobal));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("bar")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kGlobal);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kGlobal,
+           static_cast<int>(re->GetFlags()));
 
   re = v8::RegExp::New(v8_str("baz"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kMultiline));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("baz")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   re = CompileRun("/quux/").As<v8::RegExp>();
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("quux")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = CompileRun("/quux/gm").As<v8::RegExp>();
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("quux")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kGlobal | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kGlobal | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   // Override the RegExp constructor and check the API constructor
   // still works.
@@ -13714,19 +14520,24 @@
   re = v8::RegExp::New(v8_str("foobar"), v8::RegExp::kNone);
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foobar")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = v8::RegExp::New(v8_str("foobarbaz"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kMultiline));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foobarbaz")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   context->Global()->Set(v8_str("re"), re);
   ExpectTrue("re.test('FoobarbaZ')");
 
+  // RegExps are objects on which you can set properties.
+  re->Set(v8_str("property"), v8::Integer::New(32));
+  v8::Handle<v8::Value> value = CompileRun("re.property");
+  ASSERT_EQ(32, value->Int32Value());
+
   v8::TryCatch try_catch;
   re = v8::RegExp::New(v8_str("foo["), v8::RegExp::kNone);
   CHECK(re.IsEmpty());
@@ -13896,3 +14707,478 @@
   context2.Dispose();
   context3.Dispose();
 }
+
+
+THREADED_TEST(CreationContextOfJsFunction) {
+  HandleScope handle_scope;
+  Persistent<Context> context = Context::New();
+  InstallContextId(context, 1);
+
+  Local<Object> function;
+  {
+    Context::Scope scope(context);
+    function = CompileRun("function foo() {}; foo").As<Object>();
+  }
+
+  CHECK(function->CreationContext() == context);
+  CheckContextId(function, 1);
+
+  context.Dispose();
+}
+
+
+Handle<Value> HasOwnPropertyIndexedPropertyGetter(uint32_t index,
+                                                  const AccessorInfo& info) {
+  if (index == 42) return v8_str("yes");
+  return Handle<v8::Integer>();
+}
+
+
+Handle<Value> HasOwnPropertyNamedPropertyGetter(Local<String> property,
+                                                const AccessorInfo& info) {
+  if (property->Equals(v8_str("foo"))) return v8_str("yes");
+  return Handle<Value>();
+}
+
+
+Handle<v8::Integer> HasOwnPropertyIndexedPropertyQuery(
+    uint32_t index, const AccessorInfo& info) {
+  if (index == 42) return v8_num(1).As<v8::Integer>();
+  return Handle<v8::Integer>();
+}
+
+
+Handle<v8::Integer> HasOwnPropertyNamedPropertyQuery(
+    Local<String> property, const AccessorInfo& info) {
+  if (property->Equals(v8_str("foo"))) return v8_num(1).As<v8::Integer>();
+  return Handle<v8::Integer>();
+}
+
+
+Handle<v8::Integer> HasOwnPropertyNamedPropertyQuery2(
+    Local<String> property, const AccessorInfo& info) {
+  if (property->Equals(v8_str("bar"))) return v8_num(1).As<v8::Integer>();
+  return Handle<v8::Integer>();
+}
+
+
+Handle<Value> HasOwnPropertyAccessorGetter(Local<String> property,
+                                           const AccessorInfo& info) {
+  return v8_str("yes");
+}
+
+
+TEST(HasOwnProperty) {
+  v8::HandleScope scope;
+  LocalContext env;
+  { // Check normal properties and defined getters.
+    Handle<Value> value = CompileRun(
+        "function Foo() {"
+        "    this.foo = 11;"
+        "    this.__defineGetter__('baz', function() { return 1; });"
+        "};"
+        "function Bar() { "
+        "    this.bar = 13;"
+        "    this.__defineGetter__('bla', function() { return 2; });"
+        "};"
+        "Bar.prototype = new Foo();"
+        "new Bar();");
+    CHECK(value->IsObject());
+    Handle<Object> object = value->ToObject();
+    CHECK(object->Has(v8_str("foo")));
+    CHECK(!object->HasOwnProperty(v8_str("foo")));
+    CHECK(object->HasOwnProperty(v8_str("bar")));
+    CHECK(object->Has(v8_str("baz")));
+    CHECK(!object->HasOwnProperty(v8_str("baz")));
+    CHECK(object->HasOwnProperty(v8_str("bla")));
+  }
+  { // Check named getter interceptors.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(!instance->HasOwnProperty(v8_str("42")));
+    CHECK(instance->HasOwnProperty(v8_str("foo")));
+    CHECK(!instance->HasOwnProperty(v8_str("bar")));
+  }
+  { // Check indexed getter interceptors.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetIndexedPropertyHandler(HasOwnPropertyIndexedPropertyGetter);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(instance->HasOwnProperty(v8_str("42")));
+    CHECK(!instance->HasOwnProperty(v8_str("43")));
+    CHECK(!instance->HasOwnProperty(v8_str("foo")));
+  }
+  { // Check named query interceptors.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetNamedPropertyHandler(0, 0, HasOwnPropertyNamedPropertyQuery);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(instance->HasOwnProperty(v8_str("foo")));
+    CHECK(!instance->HasOwnProperty(v8_str("bar")));
+  }
+  { // Check indexed query interceptors.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetIndexedPropertyHandler(0, 0, HasOwnPropertyIndexedPropertyQuery);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(instance->HasOwnProperty(v8_str("42")));
+    CHECK(!instance->HasOwnProperty(v8_str("41")));
+  }
+  { // Check callbacks.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetAccessor(v8_str("foo"), HasOwnPropertyAccessorGetter);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(instance->HasOwnProperty(v8_str("foo")));
+    CHECK(!instance->HasOwnProperty(v8_str("bar")));
+  }
+  { // Check that query wins on disagreement.
+    Handle<ObjectTemplate> templ = ObjectTemplate::New();
+    templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter,
+                                   0,
+                                   HasOwnPropertyNamedPropertyQuery2);
+    Handle<Object> instance = templ->NewInstance();
+    CHECK(!instance->HasOwnProperty(v8_str("foo")));
+    CHECK(instance->HasOwnProperty(v8_str("bar")));
+  }
+}
+
+
+void CheckCodeGenerationAllowed() {
+  Handle<Value> result = CompileRun("eval('42')");
+  CHECK_EQ(42, result->Int32Value());
+  result = CompileRun("(function(e) { return e('42'); })(eval)");
+  CHECK_EQ(42, result->Int32Value());
+  result = CompileRun("var f = new Function('return 42'); f()");
+  CHECK_EQ(42, result->Int32Value());
+}
+
+
+void CheckCodeGenerationDisallowed() {
+  TryCatch try_catch;
+
+  Handle<Value> result = CompileRun("eval('42')");
+  CHECK(result.IsEmpty());
+  CHECK(try_catch.HasCaught());
+  try_catch.Reset();
+
+  result = CompileRun("(function(e) { return e('42'); })(eval)");
+  CHECK(result.IsEmpty());
+  CHECK(try_catch.HasCaught());
+  try_catch.Reset();
+
+  result = CompileRun("var f = new Function('return 42'); f()");
+  CHECK(result.IsEmpty());
+  CHECK(try_catch.HasCaught());
+}
+
+
+bool CodeGenerationAllowed(Local<Context> context) {
+  ApiTestFuzzer::Fuzz();
+  return true;
+}
+
+
+bool CodeGenerationDisallowed(Local<Context> context) {
+  ApiTestFuzzer::Fuzz();
+  return false;
+}
+
+
+THREADED_TEST(AllowCodeGenFromStrings) {
+  v8::HandleScope scope;
+  LocalContext context;
+
+  // eval and the Function constructor allowed by default.
+  CheckCodeGenerationAllowed();
+
+  // Disallow eval and the Function constructor.
+  context->AllowCodeGenerationFromStrings(false);
+  CheckCodeGenerationDisallowed();
+
+  // Allow again.
+  context->AllowCodeGenerationFromStrings(true);
+  CheckCodeGenerationAllowed();
+
+  // Disallow but setting a global callback that will allow the calls.
+  context->AllowCodeGenerationFromStrings(false);
+  V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationAllowed);
+  CheckCodeGenerationAllowed();
+
+  // Set a callback that disallows the code generation.
+  V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationDisallowed);
+  CheckCodeGenerationDisallowed();
+}
+
+
+static v8::Handle<Value> NonObjectThis(const v8::Arguments& args) {
+  return v8::Undefined();
+}
+
+
+THREADED_TEST(CallAPIFunctionOnNonObject) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<FunctionTemplate> templ = v8::FunctionTemplate::New(NonObjectThis);
+  Handle<Function> function = templ->GetFunction();
+  context->Global()->Set(v8_str("f"), function);
+  TryCatch try_catch;
+  CompileRun("f.call(2)");
+}
+
+
+// Regression test for issue 1470.
+THREADED_TEST(ReadOnlyIndexedProperties) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+
+  LocalContext context;
+  Local<v8::Object> obj = templ->NewInstance();
+  context->Global()->Set(v8_str("obj"), obj);
+  obj->Set(v8_str("1"), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->Set(v8_str("1"), v8_str("foobar"));
+  CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("1")));
+  obj->Set(v8_num(2), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->Set(v8_num(2), v8_str("foobar"));
+  CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_num(2)));
+
+  // Test non-smi case.
+  obj->Set(v8_str("2000000000"), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->Set(v8_str("2000000000"), v8_str("foobar"));
+  CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("2000000000")));
+}
+
+
+THREADED_TEST(Regress1516) {
+  v8::HandleScope scope;
+
+  LocalContext context;
+  { v8::HandleScope temp_scope;
+    CompileRun("({'a': 0})");
+  }
+
+  int elements;
+  { i::MapCache* map_cache =
+        i::MapCache::cast(i::Isolate::Current()->context()->map_cache());
+    elements = map_cache->NumberOfElements();
+    CHECK_LE(1, elements);
+  }
+
+  i::Isolate::Current()->heap()->CollectAllGarbage(true);
+  { i::Object* raw_map_cache = i::Isolate::Current()->context()->map_cache();
+    if (raw_map_cache != i::Isolate::Current()->heap()->undefined_value()) {
+      i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
+      CHECK_GT(elements, map_cache->NumberOfElements());
+    }
+  }
+}
+
+
+static bool BlockProtoNamedSecurityTestCallback(Local<v8::Object> global,
+                                                Local<Value> name,
+                                                v8::AccessType type,
+                                                Local<Value> data) {
+  // Only block read access to __proto__.
+  if (type == v8::ACCESS_GET &&
+      name->IsString() &&
+      name->ToString()->Length() == 9 &&
+      name->ToString()->Utf8Length() == 9) {
+    char buffer[10];
+    CHECK_EQ(10, name->ToString()->WriteUtf8(buffer));
+    return strncmp(buffer, "__proto__", 9) != 0;
+  }
+
+  return true;
+}
+
+
+THREADED_TEST(Regress93759) {
+  HandleScope scope;
+
+  // Template for object with security check.
+  Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New();
+  // We don't do indexing, so any callback can be used for that.
+  no_proto_template->SetAccessCheckCallbacks(
+      BlockProtoNamedSecurityTestCallback,
+      IndexedSecurityTestCallback);
+
+  // Templates for objects with hidden prototypes and possibly security check.
+  Local<FunctionTemplate> hidden_proto_template = v8::FunctionTemplate::New();
+  hidden_proto_template->SetHiddenPrototype(true);
+
+  Local<FunctionTemplate> protected_hidden_proto_template =
+      v8::FunctionTemplate::New();
+  protected_hidden_proto_template->InstanceTemplate()->SetAccessCheckCallbacks(
+      BlockProtoNamedSecurityTestCallback,
+      IndexedSecurityTestCallback);
+  protected_hidden_proto_template->SetHiddenPrototype(true);
+
+  // Context for "foreign" objects used in test.
+  Persistent<Context> context = v8::Context::New();
+  context->Enter();
+
+  // Plain object, no security check.
+  Local<Object> simple_object = Object::New();
+
+  // Object with explicit security check.
+  Local<Object> protected_object =
+      no_proto_template->NewInstance();
+
+  // JSGlobalProxy object, always have security check.
+  Local<Object> proxy_object =
+      context->Global();
+
+  // Global object, the  prototype of proxy_object. No security checks.
+  Local<Object> global_object =
+      proxy_object->GetPrototype()->ToObject();
+
+  // Hidden prototype without security check.
+  Local<Object> hidden_prototype =
+      hidden_proto_template->GetFunction()->NewInstance();
+  Local<Object> object_with_hidden =
+    Object::New();
+  object_with_hidden->SetPrototype(hidden_prototype);
+
+  // Hidden prototype with security check on the hidden prototype.
+  Local<Object> protected_hidden_prototype =
+      protected_hidden_proto_template->GetFunction()->NewInstance();
+  Local<Object> object_with_protected_hidden =
+    Object::New();
+  object_with_protected_hidden->SetPrototype(protected_hidden_prototype);
+
+  context->Exit();
+
+  // Template for object for second context. Values to test are put on it as
+  // properties.
+  Local<ObjectTemplate> global_template = ObjectTemplate::New();
+  global_template->Set(v8_str("simple"), simple_object);
+  global_template->Set(v8_str("protected"), protected_object);
+  global_template->Set(v8_str("global"), global_object);
+  global_template->Set(v8_str("proxy"), proxy_object);
+  global_template->Set(v8_str("hidden"), object_with_hidden);
+  global_template->Set(v8_str("phidden"), object_with_protected_hidden);
+
+  LocalContext context2(NULL, global_template);
+
+  Local<Value> result1 = CompileRun("Object.getPrototypeOf(simple)");
+  CHECK(result1->Equals(simple_object->GetPrototype()));
+
+  Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
+  CHECK(result2->Equals(Undefined()));
+
+  Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
+  CHECK(result3->Equals(global_object->GetPrototype()));
+
+  Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
+  CHECK(result4->Equals(Undefined()));
+
+  Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
+  CHECK(result5->Equals(
+      object_with_hidden->GetPrototype()->ToObject()->GetPrototype()));
+
+  Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
+  CHECK(result6->Equals(Undefined()));
+
+  context.Dispose();
+}
+
+
+static void TestReceiver(Local<Value> expected_result,
+                         Local<Value> expected_receiver,
+                         const char* code) {
+  Local<Value> result = CompileRun(code);
+  CHECK(result->IsObject());
+  CHECK(expected_receiver->Equals(result->ToObject()->Get(1)));
+  CHECK(expected_result->Equals(result->ToObject()->Get(0)));
+}
+
+
+THREADED_TEST(ForeignFunctionReceiver) {
+  HandleScope scope;
+
+  // Create two contexts with different "id" properties ('i' and 'o').
+  // Call a function both from its own context and from a the foreign
+  // context, and see what "this" is bound to (returning both "this"
+  // and "this.id" for comparison).
+
+  Persistent<Context> foreign_context = v8::Context::New();
+  foreign_context->Enter();
+  Local<Value> foreign_function =
+    CompileRun("function func() { return { 0: this.id, "
+               "                           1: this, "
+               "                           toString: function() { "
+               "                               return this[0];"
+               "                           }"
+               "                         };"
+               "}"
+               "var id = 'i';"
+               "func;");
+  CHECK(foreign_function->IsFunction());
+  foreign_context->Exit();
+
+  LocalContext context;
+
+  Local<String> password = v8_str("Password");
+  // Don't get hit by security checks when accessing foreign_context's
+  // global receiver (aka. global proxy).
+  context->SetSecurityToken(password);
+  foreign_context->SetSecurityToken(password);
+
+  Local<String> i = v8_str("i");
+  Local<String> o = v8_str("o");
+  Local<String> id = v8_str("id");
+
+  CompileRun("function ownfunc() { return { 0: this.id, "
+             "                              1: this, "
+             "                              toString: function() { "
+             "                                  return this[0];"
+             "                              }"
+             "                             };"
+             "}"
+             "var id = 'o';"
+             "ownfunc");
+  context->Global()->Set(v8_str("func"), foreign_function);
+
+  // Sanity check the contexts.
+  CHECK(i->Equals(foreign_context->Global()->Get(id)));
+  CHECK(o->Equals(context->Global()->Get(id)));
+
+  // Checking local function's receiver.
+  // Calling function using its call/apply methods.
+  TestReceiver(o, context->Global(), "ownfunc.call()");
+  TestReceiver(o, context->Global(), "ownfunc.apply()");
+  // Making calls through built-in functions.
+  TestReceiver(o, context->Global(), "[1].map(ownfunc)[0]");
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/,ownfunc)[1]")));
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[1]")));
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[3]")));
+  // Calling with environment record as base.
+  TestReceiver(o, context->Global(), "ownfunc()");
+  // Calling with no base.
+  TestReceiver(o, context->Global(), "(1,ownfunc)()");
+
+  // Checking foreign function return value.
+  // Calling function using its call/apply methods.
+  TestReceiver(i, foreign_context->Global(), "func.call()");
+  TestReceiver(i, foreign_context->Global(), "func.apply()");
+  // Calling function using another context's call/apply methods.
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.call.call(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.call.apply(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.apply.call(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.apply.apply(func)");
+  // Making calls through built-in functions.
+  TestReceiver(i, foreign_context->Global(), "[1].map(func)[0]");
+  // ToString(func()) is func()[0], i.e., the returned this.id.
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/,func)[1]")));
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[1]")));
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[3]")));
+
+  // TODO(1547): Make the following also return "i".
+  // Calling with environment record as base.
+  TestReceiver(o, context->Global(), "func()");
+  // Calling with no base.
+  TestReceiver(o, context->Global(), "(1,func)()");
+
+  foreign_context.Dispose();
+}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 89153c7..ecbf956 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -945,4 +945,83 @@
   }
 }
 
+
+TEST(11) {
+  // Test instructions using the carry flag.
+  InitializeVM();
+  v8::HandleScope scope;
+
+  typedef struct {
+    int32_t a;
+    int32_t b;
+    int32_t c;
+    int32_t d;
+  } I;
+  I i;
+
+  i.a = 0xabcd0001;
+  i.b = 0xabcd0000;
+
+  Assembler assm(Isolate::Current(), NULL, 0);
+
+  // Test HeapObject untagging.
+  __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a)));
+  __ mov(r1, Operand(r1, ASR, 1), SetCC);
+  __ adc(r1, r1, Operand(r1), LeaveCC, cs);
+  __ str(r1, MemOperand(r0, OFFSET_OF(I, a)));
+
+  __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b)));
+  __ mov(r2, Operand(r2, ASR, 1), SetCC);
+  __ adc(r2, r2, Operand(r2), LeaveCC, cs);
+  __ str(r2, MemOperand(r0, OFFSET_OF(I, b)));
+
+  // Test corner cases.
+  __ mov(r1, Operand(0xffffffff));
+  __ mov(r2, Operand(0));
+  __ mov(r3, Operand(r1, ASR, 1), SetCC);  // Set the carry.
+  __ adc(r3, r1, Operand(r2));
+  __ str(r3, MemOperand(r0, OFFSET_OF(I, c)));
+
+  __ mov(r1, Operand(0xffffffff));
+  __ mov(r2, Operand(0));
+  __ mov(r3, Operand(r2, ASR, 1), SetCC);  // Unset the carry.
+  __ adc(r3, r1, Operand(r2));
+  __ str(r3, MemOperand(r0, OFFSET_OF(I, d)));
+
+  __ mov(pc, Operand(lr));
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code = HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+  CHECK(code->IsCode());
+#ifdef DEBUG
+  Code::cast(code)->Print();
+#endif
+  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+  Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(0xabcd0001, i.a);
+  CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
+  CHECK_EQ(0x00000000, i.c);
+  CHECK_EQ(0xffffffff, i.d);
+}
+
+
+TEST(12) {
+  // Test chaining of label usages within instructions (issue 1644).
+  InitializeVM();
+  v8::HandleScope scope;
+  Assembler assm(Isolate::Current(), NULL, 0);
+
+  Label target;
+  __ b(eq, &target);
+  __ b(ne, &target);
+  __ bind(&target);
+  __ nop();
+}
+
 #undef __
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 576739b..839b7f5 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -102,7 +102,7 @@
 
   __ bind(&C);
   __ test(edx, Operand(edx));
-  __ j(not_zero, &L, taken);
+  __ j(not_zero, &L);
   __ ret(0);
 
   CodeDesc desc;
@@ -140,7 +140,7 @@
 
   __ bind(&C);
   __ test(edx, Operand(edx));
-  __ j(not_zero, &L, taken);
+  __ j(not_zero, &L);
   __ ret(0);
 
   // some relocated stuff here, not executed
@@ -351,10 +351,10 @@
   __ fld_d(Operand(esp, 3 * kPointerSize));
   __ fld_d(Operand(esp, 1 * kPointerSize));
   __ FCmp();
-  __ j(parity_even, &nan_l, taken);
-  __ j(equal, &equal_l, taken);
-  __ j(below, &less_l, taken);
-  __ j(above, &greater_l, taken);
+  __ j(parity_even, &nan_l);
+  __ j(equal, &equal_l);
+  __ j(below, &less_l);
+  __ j(above, &greater_l);
 
   __ mov(eax, kUndefined);
   __ ret(0);
@@ -394,4 +394,18 @@
   CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
 }
 
+
+TEST(AssemblerIa3210) {
+  // Test chaining of label usages within instructions (issue 1644).
+  InitializeVM();
+  v8::HandleScope scope;
+  Assembler assm(Isolate::Current(), NULL, 0);
+
+  Label target;
+  __ j(equal, &target);
+  __ j(not_equal, &target);
+  __ bind(&target);
+  __ nop();
+}
+
 #undef __
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index 8cd56f7..a6c76f0 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -64,7 +64,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
   // Addition.
   __ addu(v0, a0, a1);
@@ -89,7 +89,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
   __ mov(a1, a0);
@@ -127,7 +127,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
   Label exit, error;
 
@@ -281,10 +281,10 @@
 
   // Create a function that accepts &t, and loads, manipulates, and stores
   // the doubles t.a ... t.f.
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
@@ -354,10 +354,10 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
@@ -415,10 +415,10 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     // Load all structure elements to registers.
@@ -490,7 +490,7 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
   // Basic word load/store.
@@ -563,10 +563,10 @@
 
   // Create a function that accepts &t, and loads, manipulates, and stores
   // the doubles t.a ... t.f.
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
   Label neither_is_nan, less_than, outa_here;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
@@ -645,7 +645,7 @@
   } T;
   T t;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
   // Basic word load.
   __ lw(t0, MemOperand(a0, OFFSET_OF(T, input)) );
@@ -730,7 +730,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
   Label exit, exit2, exit3;
 
   __ Branch(&exit, ge, a0, Operand(0x00000000));
@@ -771,10 +771,10 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU) && mips32r2) {
     CpuFeatures::Scope scope(FPU);
 
     // Load all structure elements to registers.
@@ -855,7 +855,7 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
   // Test all combinations of LWL and vAddr.
   __ lw(t0, MemOperand(a0, OFFSET_OF(T, reg_init)) );
@@ -986,7 +986,7 @@
   } T;
   T t;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
   __ mov(t6, fp);  // Save frame pointer.
   __ mov(fp, a0);  // Access struct T by fp.
@@ -996,18 +996,18 @@
   __ addu(t1, t0, t3);
   __ subu(t4, t0, t3);
   __ nop();
-  __ Push(t0);  // These instructions disappear after opt.
+  __ push(t0);  // These instructions disappear after opt.
   __ Pop();
   __ addu(t0, t0, t0);
   __ nop();
   __ Pop();     // These instructions disappear after opt.
-  __ Push(t3);
+  __ push(t3);
   __ nop();
-  __ Push(t3);  // These instructions disappear after opt.
-  __ Pop(t3);
+  __ push(t3);  // These instructions disappear after opt.
+  __ pop(t3);
   __ nop();
-  __ Push(t3);
-  __ Pop(t4);
+  __ push(t3);
+  __ pop(t4);
   __ nop();
   __ sw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
   __ lw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
@@ -1015,25 +1015,25 @@
   __ sw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
   __ lw(t1, MemOperand(fp, OFFSET_OF(T, y)) );
   __ nop();
-  __ Push(t1);
+  __ push(t1);
   __ lw(t1, MemOperand(fp, OFFSET_OF(T, y)) );
-  __ Pop(t1);
+  __ pop(t1);
   __ nop();
-  __ Push(t1);
+  __ push(t1);
   __ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
-  __ Pop(t1);
+  __ pop(t1);
   __ nop();
-  __ Push(t1);
+  __ push(t1);
   __ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
-  __ Pop(t2);
+  __ pop(t2);
   __ nop();
-  __ Push(t2);
+  __ push(t2);
   __ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
-  __ Pop(t1);
+  __ pop(t1);
   __ nop();
-  __ Push(t1);
+  __ push(t1);
   __ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
-  __ Pop(t3);
+  __ pop(t3);
   __ nop();
 
   __ mov(fp, t6);
@@ -1077,23 +1077,23 @@
   } T;
   T t;
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
-    __ Cvt_d_uw(f10, t0);
+    __ Cvt_d_uw(f10, t0, f22);
     __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
 
-    __ Trunc_uw_d(f10, f10);
+    __ Trunc_uw_d(f10, f10, f22);
     __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
 
     __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
-    __ Cvt_d_uw(f8, t0);
+    __ Cvt_d_uw(f8, t0, f22);
     __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
 
-    __ Trunc_uw_d(f8, f8);
+    __ Trunc_uw_d(f8, f8, f22);
     __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
 
     __ jr(ra);
@@ -1134,10 +1134,10 @@
   int32_t x##_down_out; \
   int32_t neg_##x##_up_out; \
   int32_t neg_##x##_down_out; \
-  int32_t x##_err1_out; \
-  int32_t x##_err2_out; \
-  int32_t x##_err3_out; \
-  int32_t x##_err4_out; \
+  uint32_t x##_err1_out; \
+  uint32_t x##_err2_out; \
+  uint32_t x##_err3_out; \
+  uint32_t x##_err4_out; \
   int32_t x##_invalid_result;
 
   typedef struct {
@@ -1160,9 +1160,9 @@
 
 #undef ROUND_STRUCT_ELEMENT
 
-  MacroAssembler assm(NULL, 0);
+  MacroAssembler assm(Isolate::Current(), NULL, 0);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
 
     // Save FCSR.
@@ -1220,8 +1220,6 @@
     // Restore FCSR.
     __ ctc1(a1, FCSR);
 
-#undef RUN_ROUND_TEST
-
     __ jr(ra);
     __ nop();
 
@@ -1246,66 +1244,35 @@
     Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
     USE(dummy);
 
-#define GET_FPU_ERR(x) ((x >> 2) & (32 - 1))
+#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
+#define CHECK_ROUND_RESULT(type) \
+  CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
+  CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
+  CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
+  CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
+  CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
 
-    CHECK_EQ(124, t.round_up_out);
-    CHECK_EQ(123, t.round_down_out);
-    CHECK_EQ(-124, t.neg_round_up_out);
-    CHECK_EQ(-123, t.neg_round_down_out);
-
-    // Inaccurate.
-    CHECK_EQ(1, GET_FPU_ERR(t.round_err1_out));
-    // No error.
-    CHECK_EQ(0, GET_FPU_ERR(t.round_err2_out));
-    // Invalid operation.
-    CHECK_EQ(16, GET_FPU_ERR(t.round_err3_out));
-    CHECK_EQ(16, GET_FPU_ERR(t.round_err4_out));
-    CHECK_EQ(kFPUInvalidResult, t.round_invalid_result);
-
-    CHECK_EQ(123, t.floor_up_out);
-    CHECK_EQ(123, t.floor_down_out);
-    CHECK_EQ(-124, t.neg_floor_up_out);
-    CHECK_EQ(-124, t.neg_floor_down_out);
-
-    // Inaccurate.
-    CHECK_EQ(1, GET_FPU_ERR(t.floor_err1_out));
-    // No error.
-    CHECK_EQ(0, GET_FPU_ERR(t.floor_err2_out));
-    // Invalid operation.
-    CHECK_EQ(16, GET_FPU_ERR(t.floor_err3_out));
-    CHECK_EQ(16, GET_FPU_ERR(t.floor_err4_out));
-    CHECK_EQ(kFPUInvalidResult, t.floor_invalid_result);
-
-    CHECK_EQ(124, t.ceil_up_out);
-    CHECK_EQ(124, t.ceil_down_out);
-    CHECK_EQ(-123, t.neg_ceil_up_out);
-    CHECK_EQ(-123, t.neg_ceil_down_out);
-
-    // Inaccurate.
-    CHECK_EQ(1, GET_FPU_ERR(t.ceil_err1_out));
-    // No error.
-    CHECK_EQ(0, GET_FPU_ERR(t.ceil_err2_out));
-    // Invalid operation.
-    CHECK_EQ(16, GET_FPU_ERR(t.ceil_err3_out));
-    CHECK_EQ(16, GET_FPU_ERR(t.ceil_err4_out));
-    CHECK_EQ(kFPUInvalidResult, t.ceil_invalid_result);
-
-    // In rounding mode 0 cvt should behave like round.
-    CHECK_EQ(t.round_up_out, t.cvt_up_out);
-    CHECK_EQ(t.round_down_out, t.cvt_down_out);
-    CHECK_EQ(t.neg_round_up_out, t.neg_cvt_up_out);
-    CHECK_EQ(t.neg_round_down_out, t.neg_cvt_down_out);
-
-    // Inaccurate.
-    CHECK_EQ(1, GET_FPU_ERR(t.cvt_err1_out));
-    // No error.
-    CHECK_EQ(0, GET_FPU_ERR(t.cvt_err2_out));
-    // Invalid operation.
-    CHECK_EQ(16, GET_FPU_ERR(t.cvt_err3_out));
-    CHECK_EQ(16, GET_FPU_ERR(t.cvt_err4_out));
-    CHECK_EQ(kFPUInvalidResult, t.cvt_invalid_result);
+    CHECK_ROUND_RESULT(round);
+    CHECK_ROUND_RESULT(floor);
+    CHECK_ROUND_RESULT(ceil);
+    CHECK_ROUND_RESULT(cvt);
   }
 }
 
 
+TEST(MIPS15) {
+  // Test chaining of label usages within instructions (issue 1644).
+  InitializeVM();
+  v8::HandleScope scope;
+  Assembler assm(Isolate::Current(), NULL, 0);
+
+  Label target;
+  __ beq(v0, v1, &target);
+  __ nop();
+  __ bne(v0, v1, &target);
+  __ nop();
+  __ bind(&target);
+  __ nop();
+}
+
 #undef __
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index ea70f54..28f7c9b 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -46,6 +46,7 @@
 using v8::internal::byte;
 using v8::internal::greater;
 using v8::internal::less_equal;
+using v8::internal::equal;
 using v8::internal::not_equal;
 using v8::internal::r13;
 using v8::internal::r15;
@@ -345,4 +346,17 @@
   }
 }
 
+
+TEST(AssemblerX64LabelChaining) {
+  // Test chaining of label usages within instructions (issue 1644).
+  v8::HandleScope scope;
+  Assembler assm(Isolate::Current(), NULL, 0);
+
+  Label target;
+  __ j(equal, &target);
+  __ j(not_equal, &target);
+  __ bind(&target);
+  __ nop();
+}
+
 #undef __
diff --git a/test/cctest/test-ast.cc b/test/cctest/test-ast.cc
index 6183357..2aa7207 100644
--- a/test/cctest/test-ast.cc
+++ b/test/cctest/test-ast.cc
@@ -39,8 +39,8 @@
   List<AstNode*>* list = new List<AstNode*>(0);
   CHECK_EQ(0, list->length());
 
-  ZoneScope zone_scope(DELETE_ON_EXIT);
-  AstNode* node = new EmptyStatement();
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+  AstNode* node = new(ZONE) EmptyStatement();
   list->Add(node);
   CHECK_EQ(1, list->length());
   CHECK_EQ(node, list->at(0));
@@ -56,14 +56,3 @@
   CHECK_EQ(0, list->length());
   delete list;
 }
-
-
-TEST(DeleteEmpty) {
-  {
-    List<int>* list = new List<int>(0);
-    delete list;
-  }
-  {
-    List<int> list(0);
-  }
-}
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index 9dd4981..2861b1f 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -6,8 +6,6 @@
 #include "circular-queue-inl.h"
 #include "cctest.h"
 
-namespace i = v8::internal;
-
 using i::SamplingCircularQueue;
 
 
@@ -84,12 +82,11 @@
  public:
   typedef SamplingCircularQueue::Cell Record;
 
-  ProducerThread(i::Isolate* isolate,
-                 SamplingCircularQueue* scq,
+  ProducerThread(SamplingCircularQueue* scq,
                  int records_per_chunk,
                  Record value,
                  i::Semaphore* finished)
-      : Thread(isolate, "producer"),
+      : Thread("producer"),
         scq_(scq),
         records_per_chunk_(records_per_chunk),
         value_(value),
@@ -133,10 +130,9 @@
   // Check that we are using non-reserved values.
   CHECK_NE(SamplingCircularQueue::kClear, 1);
   CHECK_NE(SamplingCircularQueue::kEnd, 1);
-  i::Isolate* isolate = i::Isolate::Current();
-  ProducerThread producer1(isolate, &scq, kRecordsPerChunk, 1, semaphore);
-  ProducerThread producer2(isolate, &scq, kRecordsPerChunk, 10, semaphore);
-  ProducerThread producer3(isolate, &scq, kRecordsPerChunk, 20, semaphore);
+  ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
+  ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
+  ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
 
   CHECK_EQ(NULL, scq.StartDequeue());
   producer1.Start();
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 8f226f6..2d9b012 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdlib.h>
-#include <wchar.h>  // wint_t
+#include <wchar.h>
 
 #include "v8.h"
 
@@ -75,7 +75,7 @@
     uint16_t* string = NewArray<uint16_t>(length + 1);
     string_obj->Write(string);
     for (int j = 0; j < length; j++)
-      printf("%lc", static_cast<wint_t>(string[j]));
+      printf("%lc", static_cast<wchar_t>(string[j]));
     DeleteArray(string);
   }
   printf("\n");
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 17611ac..f567a0f 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -2,15 +2,11 @@
 //
 // Tests of profiles generator and utilities.
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "v8.h"
 #include "cpu-profiler-inl.h"
 #include "cctest.h"
 #include "../include/v8-profiler.h"
 
-namespace i = v8::internal;
-
 using i::CodeEntry;
 using i::CpuProfile;
 using i::CpuProfiler;
@@ -24,11 +20,8 @@
 TEST(StartStop) {
   CpuProfilesCollection profiles;
   ProfileGenerator generator(&profiles);
-  ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
+  ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  while (!processor.running()) {
-    i::Thread::YieldCPU();
-  }
   processor.Stop();
   processor.Join();
 }
@@ -88,11 +81,8 @@
   CpuProfilesCollection profiles;
   profiles.StartProfiling("", 1);
   ProfileGenerator generator(&profiles);
-  ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
+  ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  while (!processor.running()) {
-    i::Thread::YieldCPU();
-  }
 
   // Enqueue code creation events.
   i::HandleScope scope;
@@ -117,7 +107,7 @@
                             0x80);
   processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
   processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
-  processor.CodeDeleteEvent(ToAddress(0x1600));
+  processor.CodeCreateEvent(i::Logger::STUB_TAG, 4, ToAddress(0x1605), 0x10);
   // Enqueue a tick event to enable code events processing.
   EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
 
@@ -152,11 +142,8 @@
   CpuProfilesCollection profiles;
   profiles.StartProfiling("", 1);
   ProfileGenerator generator(&profiles);
-  ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
+  ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  while (!processor.running()) {
-    i::Thread::YieldCPU();
-  }
 
   processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
                             "bbb",
@@ -245,11 +232,8 @@
   CpuProfilesCollection profiles;
   profiles.StartProfiling("", 1);
   ProfileGenerator generator(&profiles);
-  ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
+  ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  while (!processor.running()) {
-    i::Thread::YieldCPU();
-  }
 
   processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
                             "bbb",
@@ -413,5 +397,3 @@
   CHECK_EQ(0, CpuProfiler::GetProfilesCount());
   CHECK_EQ(NULL, v8::CpuProfiler::FindProfile(uid3));
 }
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-dataflow.cc b/test/cctest/test-dataflow.cc
index feae0b0..ad48f55 100644
--- a/test/cctest/test-dataflow.cc
+++ b/test/cctest/test-dataflow.cc
@@ -36,7 +36,7 @@
 
 TEST(BitVector) {
   v8::internal::V8::Initialize(NULL);
-  ZoneScope zone(DELETE_ON_EXIT);
+  ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
   {
     BitVector v(15);
     v.Add(1);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 7f506db..45da6dc 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -157,6 +157,7 @@
         Handle<Object>(debug->debug_context()->global_proxy()), DONT_ENUM,
         ::v8::internal::kNonStrictMode);
   }
+
  private:
   v8::Persistent<v8::Context> context_;
 };
@@ -501,7 +502,11 @@
   CHECK(Debug::HasDebugInfo(shared));
   TestBreakLocationIterator it1(Debug::GetDebugInfo(shared));
   it1.FindBreakLocationFromPosition(position);
-  CHECK_EQ(mode, it1.it()->rinfo()->rmode());
+  v8::internal::RelocInfo::Mode actual_mode = it1.it()->rinfo()->rmode();
+  if (actual_mode == v8::internal::RelocInfo::CODE_TARGET_WITH_ID) {
+    actual_mode = v8::internal::RelocInfo::CODE_TARGET;
+  }
+  CHECK_EQ(mode, actual_mode);
   if (mode != v8::internal::RelocInfo::JS_RETURN) {
     CHECK_EQ(debug_break,
         Code::GetCodeFromTargetAddress(it1.it()->rinfo()->target_address()));
@@ -516,7 +521,11 @@
   CHECK(debug->EnsureDebugInfo(shared));
   TestBreakLocationIterator it2(Debug::GetDebugInfo(shared));
   it2.FindBreakLocationFromPosition(position);
-  CHECK_EQ(mode, it2.it()->rinfo()->rmode());
+  actual_mode = it2.it()->rinfo()->rmode();
+  if (actual_mode == v8::internal::RelocInfo::CODE_TARGET_WITH_ID) {
+    actual_mode = v8::internal::RelocInfo::CODE_TARGET;
+  }
+  CHECK_EQ(mode, actual_mode);
   if (mode == v8::internal::RelocInfo::JS_RETURN) {
     CHECK(!Debug::IsDebugBreakAtReturn(it2.it()->rinfo()));
   }
@@ -641,6 +650,7 @@
 
 // Debug event handler which counts the break points which have been hit.
 int break_point_hit_count = 0;
+int break_point_hit_count_deoptimize = 0;
 static void DebugEventBreakPointHitCount(v8::DebugEvent event,
                                          v8::Handle<v8::Object> exec_state,
                                          v8::Handle<v8::Object> event_data,
@@ -717,6 +727,12 @@
         script_data->WriteAscii(last_script_data_hit);
       }
     }
+
+    // Perform a full deoptimization when the specified number of
+    // breaks have been hit.
+    if (break_point_hit_count == break_point_hit_count_deoptimize) {
+      i::Deoptimizer::DeoptimizeAll();
+    }
   } else if (event == v8::AfterCompile && !compiled_script_data.IsEmpty()) {
     const int argc = 1;
     v8::Handle<v8::Value> argv[argc] = { event_data };
@@ -975,12 +991,30 @@
       // Count the number of breaks.
       break_point_hit_count++;
 
+      // Collect the JavsScript stack height if the function frame_count is
+      // compiled.
+      if (!frame_count.IsEmpty()) {
+        static const int kArgc = 1;
+        v8::Handle<v8::Value> argv[kArgc] = { exec_state };
+        // Using exec_state as receiver is just to have a receiver.
+        v8::Handle<v8::Value> result =
+            frame_count->Call(exec_state, kArgc, argv);
+        last_js_stack_height = result->Int32Value();
+      }
+
       // Set the break flag again to come back here as soon as possible.
       v8::Debug::DebugBreak();
+
     } else if (terminate_after_max_break_point_hit) {
       // Terminate execution after the last break if requested.
       v8::V8::TerminateExecution();
     }
+
+    // Perform a full deoptimization when the specified number of
+    // breaks have been hit.
+    if (break_point_hit_count == break_point_hit_count_deoptimize) {
+      i::Deoptimizer::DeoptimizeAll();
+    }
   }
 }
 
@@ -2141,7 +2175,7 @@
   f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
   g = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
 
-  // Chesk that a break point was hit when the script was run.
+  // Check that a break point was hit when the script was run.
   CHECK_EQ(1, break_point_hit_count);
   CHECK_EQ(0, StrLength(last_function_hit));
 
@@ -4256,9 +4290,9 @@
                  "named_values[%d] instanceof debug.PropertyMirror", i);
     CHECK(CompileRun(buffer.start())->BooleanValue());
 
-    // 4 is PropertyType.Interceptor
+    // 5 is PropertyType.Interceptor
     OS::SNPrintF(buffer, "named_values[%d].propertyType()", i);
-    CHECK_EQ(4, CompileRun(buffer.start())->Int32Value());
+    CHECK_EQ(5, CompileRun(buffer.start())->Int32Value());
 
     OS::SNPrintF(buffer, "named_values[%d].isNative()", i);
     CHECK(CompileRun(buffer.start())->BooleanValue());
@@ -4720,8 +4754,8 @@
 // placing JSON debugger commands in the queue.
 class MessageQueueDebuggerThread : public v8::internal::Thread {
  public:
-  explicit MessageQueueDebuggerThread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "MessageQueueDebuggerThread") { }
+  MessageQueueDebuggerThread()
+      : Thread("MessageQueueDebuggerThread") { }
   void Run();
 };
 
@@ -4824,8 +4858,7 @@
 
 // This thread runs the v8 engine.
 TEST(MessageQueues) {
-  MessageQueueDebuggerThread message_queue_debugger_thread(
-      i::Isolate::Current());
+  MessageQueueDebuggerThread message_queue_debugger_thread;
 
   // Create a V8 environment
   v8::HandleScope scope;
@@ -4972,15 +5005,13 @@
 
 class V8Thread : public v8::internal::Thread {
  public:
-  explicit V8Thread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "V8Thread") { }
+  V8Thread() : Thread("V8Thread") { }
   void Run();
 };
 
 class DebuggerThread : public v8::internal::Thread {
  public:
-  explicit DebuggerThread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "DebuggerThread") { }
+  DebuggerThread() : Thread("DebuggerThread") { }
   void Run();
 };
 
@@ -5024,6 +5055,7 @@
       "\n"
       "foo();\n";
 
+  v8::V8::Initialize();
   v8::HandleScope scope;
   DebugLocalContext env;
   v8::Debug::SetMessageHandler2(&ThreadedMessageHandler);
@@ -5057,8 +5089,8 @@
 
 
 TEST(ThreadedDebugging) {
-  DebuggerThread debugger_thread(i::Isolate::Current());
-  V8Thread v8_thread(i::Isolate::Current());
+  DebuggerThread debugger_thread;
+  V8Thread v8_thread;
 
   // Create a V8 environment
   threaded_debugging_barriers.Initialize();
@@ -5079,16 +5111,14 @@
 
 class BreakpointsV8Thread : public v8::internal::Thread {
  public:
-  explicit BreakpointsV8Thread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "BreakpointsV8Thread") { }
+  BreakpointsV8Thread() : Thread("BreakpointsV8Thread") { }
   void Run();
 };
 
 class BreakpointsDebuggerThread : public v8::internal::Thread {
  public:
-  explicit BreakpointsDebuggerThread(v8::internal::Isolate* isolate,
-                                     bool global_evaluate)
-      : Thread(isolate, "BreakpointsDebuggerThread"),
+  explicit BreakpointsDebuggerThread(bool global_evaluate)
+      : Thread("BreakpointsDebuggerThread"),
         global_evaluate_(global_evaluate) {}
   void Run();
 
@@ -5138,6 +5168,7 @@
   const char* source_2 = "cat(17);\n"
     "cat(19);\n";
 
+  v8::V8::Initialize();
   v8::HandleScope scope;
   DebugLocalContext env;
   v8::Debug::SetMessageHandler2(&BreakpointsMessageHandler);
@@ -5265,9 +5296,8 @@
 void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
   i::FLAG_debugger_auto_break = true;
 
-  BreakpointsDebuggerThread breakpoints_debugger_thread(i::Isolate::Current(),
-      global_evaluate);
-  BreakpointsV8Thread breakpoints_v8_thread(i::Isolate::Current());
+  BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
+  BreakpointsV8Thread breakpoints_v8_thread;
 
   // Create a V8 environment
   Barriers stack_allocated_breakpoints_barriers;
@@ -5649,15 +5679,13 @@
 
 class HostDispatchV8Thread : public v8::internal::Thread {
  public:
-  explicit HostDispatchV8Thread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "HostDispatchV8Thread") { }
+  HostDispatchV8Thread() : Thread("HostDispatchV8Thread") { }
   void Run();
 };
 
 class HostDispatchDebuggerThread : public v8::internal::Thread {
  public:
-  explicit HostDispatchDebuggerThread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "HostDispatchDebuggerThread") { }
+  HostDispatchDebuggerThread() : Thread("HostDispatchDebuggerThread") { }
   void Run();
 };
 
@@ -5687,6 +5715,7 @@
     "\n";
   const char* source_2 = "cat(17);\n";
 
+  v8::V8::Initialize();
   v8::HandleScope scope;
   DebugLocalContext env;
 
@@ -5729,9 +5758,8 @@
 
 
 TEST(DebuggerHostDispatch) {
-  HostDispatchDebuggerThread host_dispatch_debugger_thread(
-      i::Isolate::Current());
-  HostDispatchV8Thread host_dispatch_v8_thread(i::Isolate::Current());
+  HostDispatchDebuggerThread host_dispatch_debugger_thread;
+  HostDispatchV8Thread host_dispatch_v8_thread;
   i::FLAG_debugger_auto_break = true;
 
   // Create a V8 environment
@@ -5755,15 +5783,14 @@
 
 class DebugMessageDispatchV8Thread : public v8::internal::Thread {
  public:
-  explicit DebugMessageDispatchV8Thread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "DebugMessageDispatchV8Thread") { }
+  DebugMessageDispatchV8Thread() : Thread("DebugMessageDispatchV8Thread") { }
   void Run();
 };
 
 class DebugMessageDispatchDebuggerThread : public v8::internal::Thread {
  public:
-  explicit DebugMessageDispatchDebuggerThread(v8::internal::Isolate* isolate)
-      : Thread(isolate, "DebugMessageDispatchDebuggerThread") { }
+  DebugMessageDispatchDebuggerThread()
+      : Thread("DebugMessageDispatchDebuggerThread") { }
   void Run();
 };
 
@@ -5776,6 +5803,7 @@
 
 
 void DebugMessageDispatchV8Thread::Run() {
+  v8::V8::Initialize();
   v8::HandleScope scope;
   DebugLocalContext env;
 
@@ -5797,10 +5825,8 @@
 
 
 TEST(DebuggerDebugMessageDispatch) {
-  DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread(
-      i::Isolate::Current());
-  DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread(
-      i::Isolate::Current());
+  DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread;
+  DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread;
 
   i::FLAG_debugger_auto_break = true;
 
@@ -5840,7 +5866,6 @@
   // Test starting and stopping the agent without any client connection.
   debugger->StartAgent("test", kPort1);
   debugger->StopAgent();
-
   // Test starting the agent, connecting a client and shutting down the agent
   // with the client connected.
   ok = debugger->StartAgent("test", kPort2);
@@ -5849,6 +5874,12 @@
   i::Socket* client = i::OS::CreateSocket();
   ok = client->Connect("localhost", port2_str);
   CHECK(ok);
+  // It is important to wait for a message from the agent. Otherwise,
+  // we can close the server socket during "accept" syscall, making it failing
+  // (at least on Linux), and the test will work incorrectly.
+  char buf;
+  ok = client->Receive(&buf, 1) == 1;
+  CHECK(ok);
   debugger->StopAgent();
   delete client;
 
@@ -5866,8 +5897,8 @@
 
 class DebuggerAgentProtocolServerThread : public i::Thread {
  public:
-  explicit DebuggerAgentProtocolServerThread(i::Isolate* isolate, int port)
-      : Thread(isolate, "DebuggerAgentProtocolServerThread"),
+  explicit DebuggerAgentProtocolServerThread(int port)
+      : Thread("DebuggerAgentProtocolServerThread"),
         port_(port),
         server_(NULL),
         client_(NULL),
@@ -5886,7 +5917,7 @@
 
  private:
   int port_;
-  i::SmartPointer<char> body_;
+  i::SmartArrayPointer<char> body_;
   i::Socket* server_;  // Server socket used for bind/accept.
   i::Socket* client_;  // Single client connection used by the test.
   i::Semaphore* listening_;  // Signalled when the server is in listen mode.
@@ -5932,7 +5963,7 @@
 
   // Create a socket server to receive a debugger agent message.
   DebuggerAgentProtocolServerThread* server =
-      new DebuggerAgentProtocolServerThread(i::Isolate::Current(), kPort);
+      new DebuggerAgentProtocolServerThread(kPort);
   server->Start();
   server->WaitForListening();
 
@@ -6306,8 +6337,7 @@
   v8::Persistent<v8::Context> context_1;
   v8::Handle<v8::ObjectTemplate> global_template =
       v8::Handle<v8::ObjectTemplate>();
-  v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
-  context_1 = v8::Context::New(NULL, global_template, global_object);
+  context_1 = v8::Context::New(NULL, global_template);
 
   // Default data value is undefined.
   CHECK(context_1->GetData()->IsUndefined());
@@ -6372,11 +6402,11 @@
       const int kBufferSize = 1000;
       uint16_t buffer[kBufferSize];
       const char* eval_command =
-        "{\"seq\":0,"
-         "\"type\":\"request\","
-         "\"command\":\"evaluate\","
-         "arguments:{\"expression\":\"debugger;\","
-         "\"global\":true,\"disable_break\":false}}";
+          "{\"seq\":0,"
+          "\"type\":\"request\","
+          "\"command\":\"evaluate\","
+          "\"arguments\":{\"expression\":\"debugger;\","
+          "\"global\":true,\"disable_break\":false}}";
 
       // Send evaluate command.
       v8::Debug::SendCommand(buffer, AsciiToUtf16(eval_command, buffer));
@@ -7192,29 +7222,38 @@
                                  const char** loop_bodies,
                                  const char* loop_tail) {
   // Receive 100 breaks for each test and then terminate JavaScript execution.
-  static int count = 0;
+  static const int kBreaksPerTest = 100;
 
-  for (int i = 0; loop_bodies[i] != NULL; i++) {
-    count++;
-    max_break_point_hit_count = count * 100;
-    terminate_after_max_break_point_hit = true;
+  for (int i = 0; i < 1 && loop_bodies[i] != NULL; i++) {
+    // Perform a lazy deoptimization after various numbers of breaks
+    // have been hit.
+    for (int j = 0; j < 10; j++) {
+      break_point_hit_count_deoptimize = j;
+      if (j == 10) {
+        break_point_hit_count_deoptimize = kBreaksPerTest;
+      }
 
-    EmbeddedVector<char, 1024> buffer;
-    OS::SNPrintF(buffer,
-                 "function f() {%s%s%s}",
-                 loop_head, loop_bodies[i], loop_tail);
+      break_point_hit_count = 0;
+      max_break_point_hit_count = kBreaksPerTest;
+      terminate_after_max_break_point_hit = true;
 
-    // Function with infinite loop.
-    CompileRun(buffer.start());
+      EmbeddedVector<char, 1024> buffer;
+      OS::SNPrintF(buffer,
+                   "function f() {%s%s%s}",
+                   loop_head, loop_bodies[i], loop_tail);
 
-    // Set the debug break to enter the debugger as soon as possible.
-    v8::Debug::DebugBreak();
+      // Function with infinite loop.
+      CompileRun(buffer.start());
 
-    // Call function with infinite loop.
-    CompileRun("f();");
-    CHECK_EQ(count * 100, break_point_hit_count);
+      // Set the debug break to enter the debugger as soon as possible.
+      v8::Debug::DebugBreak();
 
-    CHECK(!v8::V8::IsExecutionTerminating());
+      // Call function with infinite loop.
+      CompileRun("f();");
+      CHECK_EQ(kBreaksPerTest, break_point_hit_count);
+
+      CHECK(!v8::V8::IsExecutionTerminating());
+    }
   }
 }
 
@@ -7226,6 +7265,9 @@
   // Register a debug event listener which sets the break flag and counts.
   v8::Debug::SetDebugEventListener(DebugEventBreakMax);
 
+  // Create a function for getting the frame count when hitting the break.
+  frame_count = CompileFunction(&env, frame_count_source, "frame_count");
+
   CompileRun("var a = 1;");
   CompileRun("function g() { }");
   CompileRun("function h() { }");
diff --git a/test/cctest/test-dictionary.cc b/test/cctest/test-dictionary.cc
new file mode 100644
index 0000000..15a854b
--- /dev/null
+++ b/test/cctest/test-dictionary.cc
@@ -0,0 +1,85 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "debug.h"
+#include "execution.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "objects.h"
+#include "global-handles.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+TEST(ObjectHashTable) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(23);
+  Handle<JSObject> a = FACTORY->NewJSArray(7);
+  Handle<JSObject> b = FACTORY->NewJSArray(11);
+  table = PutIntoObjectHashTable(table, a, b);
+  CHECK_EQ(table->NumberOfElements(), 1);
+  CHECK_EQ(table->Lookup(*a), *b);
+  CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+
+  // Keys still have to be valid after objects were moved.
+  HEAP->CollectGarbage(NEW_SPACE);
+  CHECK_EQ(table->NumberOfElements(), 1);
+  CHECK_EQ(table->Lookup(*a), *b);
+  CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+
+  // Keys that are overwritten should not change number of elements.
+  table = PutIntoObjectHashTable(table, a, FACTORY->NewJSArray(13));
+  CHECK_EQ(table->NumberOfElements(), 1);
+  CHECK_NE(table->Lookup(*a), *b);
+
+  // Keys mapped to undefined should be removed permanently.
+  table = PutIntoObjectHashTable(table, a, FACTORY->undefined_value());
+  CHECK_EQ(table->NumberOfElements(), 0);
+  CHECK_EQ(table->NumberOfDeletedElements(), 1);
+  CHECK_EQ(table->Lookup(*a), HEAP->undefined_value());
+
+  // Keys should map back to their respective values.
+  for (int i = 0; i < 100; i++) {
+    Handle<JSObject> key = FACTORY->NewJSArray(7);
+    Handle<JSObject> value = FACTORY->NewJSArray(11);
+    table = PutIntoObjectHashTable(table, key, value);
+    CHECK_EQ(table->NumberOfElements(), i + 1);
+    CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
+    CHECK_EQ(table->Lookup(*key), *value);
+  }
+
+  // Keys never added to the map should not be found.
+  for (int i = 0; i < 1000; i++) {
+    Handle<JSObject> o = FACTORY->NewJSArray(100);
+    CHECK_EQ(table->FindEntry(*o), ObjectHashTable::kNotFound);
+    CHECK_EQ(table->Lookup(*o), HEAP->undefined_value());
+  }
+}
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 65a2cf3..032e6bc 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -436,14 +436,14 @@
             "ee0faa90       vmov s31, r10");
 
     COMPARE(vabs(d0, d1),
-            "eeb00bc1       vabs d0, d1");
+            "eeb00bc1       vabs.f64 d0, d1");
     COMPARE(vabs(d3, d4, mi),
-            "4eb03bc4       vabsmi d3, d4");
+            "4eb03bc4       vabs.f64mi d3, d4");
 
     COMPARE(vneg(d0, d1),
-            "eeb10b41       vneg d0, d1");
+            "eeb10b41       vneg.f64 d0, d1");
     COMPARE(vneg(d3, d4, mi),
-            "4eb13b44       vnegmi d3, d4");
+            "4eb13b44       vneg.f64mi d3, d4");
 
     COMPARE(vadd(d0, d1, d2),
             "ee310b02       vadd.f64 d0, d1, d2");
@@ -543,3 +543,206 @@
 
   VERIFY_RUN();
 }
+
+
+TEST(LoadStore) {
+  SETUP();
+
+  COMPARE(ldrb(r0, MemOperand(r1)),
+          "e5d10000       ldrb r0, [r1, #+0]");
+  COMPARE(ldrb(r2, MemOperand(r3, 42)),
+          "e5d3202a       ldrb r2, [r3, #+42]");
+  COMPARE(ldrb(r4, MemOperand(r5, -42)),
+          "e555402a       ldrb r4, [r5, #-42]");
+  COMPARE(ldrb(r6, MemOperand(r7, 42, PostIndex)),
+          "e4d7602a       ldrb r6, [r7], #+42");
+  COMPARE(ldrb(r8, MemOperand(r9, -42, PostIndex)),
+          "e459802a       ldrb r8, [r9], #-42");
+  COMPARE(ldrb(r10, MemOperand(fp, 42, PreIndex)),
+          "e5fba02a       ldrb r10, [fp, #+42]!");
+  COMPARE(ldrb(ip, MemOperand(sp, -42, PreIndex)),
+          "e57dc02a       ldrb ip, [sp, #-42]!");
+  COMPARE(ldrb(r0, MemOperand(r1, r2)),
+          "e7d10002       ldrb r0, [r1, +r2]");
+  COMPARE(ldrb(r0, MemOperand(r1, r2, NegOffset)),
+          "e7510002       ldrb r0, [r1, -r2]");
+  COMPARE(ldrb(r0, MemOperand(r1, r2, PostIndex)),
+          "e6d10002       ldrb r0, [r1], +r2");
+  COMPARE(ldrb(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e6510002       ldrb r0, [r1], -r2");
+  COMPARE(ldrb(r0, MemOperand(r1, r2, PreIndex)),
+          "e7f10002       ldrb r0, [r1, +r2]!");
+  COMPARE(ldrb(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e7710002       ldrb r0, [r1, -r2]!");
+
+  COMPARE(strb(r0, MemOperand(r1)),
+          "e5c10000       strb r0, [r1, #+0]");
+  COMPARE(strb(r2, MemOperand(r3, 42)),
+          "e5c3202a       strb r2, [r3, #+42]");
+  COMPARE(strb(r4, MemOperand(r5, -42)),
+          "e545402a       strb r4, [r5, #-42]");
+  COMPARE(strb(r6, MemOperand(r7, 42, PostIndex)),
+          "e4c7602a       strb r6, [r7], #+42");
+  COMPARE(strb(r8, MemOperand(r9, -42, PostIndex)),
+          "e449802a       strb r8, [r9], #-42");
+  COMPARE(strb(r10, MemOperand(fp, 42, PreIndex)),
+          "e5eba02a       strb r10, [fp, #+42]!");
+  COMPARE(strb(ip, MemOperand(sp, -42, PreIndex)),
+          "e56dc02a       strb ip, [sp, #-42]!");
+  COMPARE(strb(r0, MemOperand(r1, r2)),
+          "e7c10002       strb r0, [r1, +r2]");
+  COMPARE(strb(r0, MemOperand(r1, r2, NegOffset)),
+          "e7410002       strb r0, [r1, -r2]");
+  COMPARE(strb(r0, MemOperand(r1, r2, PostIndex)),
+          "e6c10002       strb r0, [r1], +r2");
+  COMPARE(strb(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e6410002       strb r0, [r1], -r2");
+  COMPARE(strb(r0, MemOperand(r1, r2, PreIndex)),
+          "e7e10002       strb r0, [r1, +r2]!");
+  COMPARE(strb(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e7610002       strb r0, [r1, -r2]!");
+
+  COMPARE(ldrh(r0, MemOperand(r1)),
+          "e1d100b0       ldrh r0, [r1, #+0]");
+  COMPARE(ldrh(r2, MemOperand(r3, 42)),
+          "e1d322ba       ldrh r2, [r3, #+42]");
+  COMPARE(ldrh(r4, MemOperand(r5, -42)),
+          "e15542ba       ldrh r4, [r5, #-42]");
+  COMPARE(ldrh(r6, MemOperand(r7, 42, PostIndex)),
+          "e0d762ba       ldrh r6, [r7], #+42");
+  COMPARE(ldrh(r8, MemOperand(r9, -42, PostIndex)),
+          "e05982ba       ldrh r8, [r9], #-42");
+  COMPARE(ldrh(r10, MemOperand(fp, 42, PreIndex)),
+          "e1fba2ba       ldrh r10, [fp, #+42]!");
+  COMPARE(ldrh(ip, MemOperand(sp, -42, PreIndex)),
+          "e17dc2ba       ldrh ip, [sp, #-42]!");
+  COMPARE(ldrh(r0, MemOperand(r1, r2)),
+          "e19100b2       ldrh r0, [r1, +r2]");
+  COMPARE(ldrh(r0, MemOperand(r1, r2, NegOffset)),
+          "e11100b2       ldrh r0, [r1, -r2]");
+  COMPARE(ldrh(r0, MemOperand(r1, r2, PostIndex)),
+          "e09100b2       ldrh r0, [r1], +r2");
+  COMPARE(ldrh(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e01100b2       ldrh r0, [r1], -r2");
+  COMPARE(ldrh(r0, MemOperand(r1, r2, PreIndex)),
+          "e1b100b2       ldrh r0, [r1, +r2]!");
+  COMPARE(ldrh(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e13100b2       ldrh r0, [r1, -r2]!");
+
+  COMPARE(strh(r0, MemOperand(r1)),
+          "e1c100b0       strh r0, [r1, #+0]");
+  COMPARE(strh(r2, MemOperand(r3, 42)),
+          "e1c322ba       strh r2, [r3, #+42]");
+  COMPARE(strh(r4, MemOperand(r5, -42)),
+          "e14542ba       strh r4, [r5, #-42]");
+  COMPARE(strh(r6, MemOperand(r7, 42, PostIndex)),
+          "e0c762ba       strh r6, [r7], #+42");
+  COMPARE(strh(r8, MemOperand(r9, -42, PostIndex)),
+          "e04982ba       strh r8, [r9], #-42");
+  COMPARE(strh(r10, MemOperand(fp, 42, PreIndex)),
+          "e1eba2ba       strh r10, [fp, #+42]!");
+  COMPARE(strh(ip, MemOperand(sp, -42, PreIndex)),
+          "e16dc2ba       strh ip, [sp, #-42]!");
+  COMPARE(strh(r0, MemOperand(r1, r2)),
+          "e18100b2       strh r0, [r1, +r2]");
+  COMPARE(strh(r0, MemOperand(r1, r2, NegOffset)),
+          "e10100b2       strh r0, [r1, -r2]");
+  COMPARE(strh(r0, MemOperand(r1, r2, PostIndex)),
+          "e08100b2       strh r0, [r1], +r2");
+  COMPARE(strh(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e00100b2       strh r0, [r1], -r2");
+  COMPARE(strh(r0, MemOperand(r1, r2, PreIndex)),
+          "e1a100b2       strh r0, [r1, +r2]!");
+  COMPARE(strh(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e12100b2       strh r0, [r1, -r2]!");
+
+  COMPARE(ldr(r0, MemOperand(r1)),
+          "e5910000       ldr r0, [r1, #+0]");
+  COMPARE(ldr(r2, MemOperand(r3, 42)),
+          "e593202a       ldr r2, [r3, #+42]");
+  COMPARE(ldr(r4, MemOperand(r5, -42)),
+          "e515402a       ldr r4, [r5, #-42]");
+  COMPARE(ldr(r6, MemOperand(r7, 42, PostIndex)),
+          "e497602a       ldr r6, [r7], #+42");
+  COMPARE(ldr(r8, MemOperand(r9, -42, PostIndex)),
+          "e419802a       ldr r8, [r9], #-42");
+  COMPARE(ldr(r10, MemOperand(fp, 42, PreIndex)),
+          "e5bba02a       ldr r10, [fp, #+42]!");
+  COMPARE(ldr(ip, MemOperand(sp, -42, PreIndex)),
+          "e53dc02a       ldr ip, [sp, #-42]!");
+  COMPARE(ldr(r0, MemOperand(r1, r2)),
+          "e7910002       ldr r0, [r1, +r2]");
+  COMPARE(ldr(r0, MemOperand(r1, r2, NegOffset)),
+          "e7110002       ldr r0, [r1, -r2]");
+  COMPARE(ldr(r0, MemOperand(r1, r2, PostIndex)),
+          "e6910002       ldr r0, [r1], +r2");
+  COMPARE(ldr(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e6110002       ldr r0, [r1], -r2");
+  COMPARE(ldr(r0, MemOperand(r1, r2, PreIndex)),
+          "e7b10002       ldr r0, [r1, +r2]!");
+  COMPARE(ldr(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e7310002       ldr r0, [r1, -r2]!");
+
+  COMPARE(str(r0, MemOperand(r1)),
+          "e5810000       str r0, [r1, #+0]");
+  COMPARE(str(r2, MemOperand(r3, 42)),
+          "e583202a       str r2, [r3, #+42]");
+  COMPARE(str(r4, MemOperand(r5, -42)),
+          "e505402a       str r4, [r5, #-42]");
+  COMPARE(str(r6, MemOperand(r7, 42, PostIndex)),
+          "e487602a       str r6, [r7], #+42");
+  COMPARE(str(r8, MemOperand(r9, -42, PostIndex)),
+          "e409802a       str r8, [r9], #-42");
+  COMPARE(str(r10, MemOperand(fp, 42, PreIndex)),
+          "e5aba02a       str r10, [fp, #+42]!");
+  COMPARE(str(ip, MemOperand(sp, -42, PreIndex)),
+          "e52dc02a       str ip, [sp, #-42]!");
+  COMPARE(str(r0, MemOperand(r1, r2)),
+          "e7810002       str r0, [r1, +r2]");
+  COMPARE(str(r0, MemOperand(r1, r2, NegOffset)),
+          "e7010002       str r0, [r1, -r2]");
+  COMPARE(str(r0, MemOperand(r1, r2, PostIndex)),
+          "e6810002       str r0, [r1], +r2");
+  COMPARE(str(r0, MemOperand(r1, r2, NegPostIndex)),
+          "e6010002       str r0, [r1], -r2");
+  COMPARE(str(r0, MemOperand(r1, r2, PreIndex)),
+          "e7a10002       str r0, [r1, +r2]!");
+  COMPARE(str(r0, MemOperand(r1, r2, NegPreIndex)),
+          "e7210002       str r0, [r1, -r2]!");
+
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    COMPARE(ldrd(r0, r1, MemOperand(r1)),
+            "e1c100d0       ldrd r0, [r1, #+0]");
+    COMPARE(ldrd(r2, r3, MemOperand(r3, 127)),
+            "e1c327df       ldrd r2, [r3, #+127]");
+    COMPARE(ldrd(r4, r5, MemOperand(r5, -127)),
+            "e14547df       ldrd r4, [r5, #-127]");
+    COMPARE(ldrd(r6, r7, MemOperand(r7, 127, PostIndex)),
+            "e0c767df       ldrd r6, [r7], #+127");
+    COMPARE(ldrd(r8, r9, MemOperand(r9, -127, PostIndex)),
+            "e04987df       ldrd r8, [r9], #-127");
+    COMPARE(ldrd(r10, fp, MemOperand(fp, 127, PreIndex)),
+            "e1eba7df       ldrd r10, [fp, #+127]!");
+    COMPARE(ldrd(ip, sp, MemOperand(sp, -127, PreIndex)),
+            "e16dc7df       ldrd ip, [sp, #-127]!");
+
+    COMPARE(strd(r0, r1, MemOperand(r1)),
+            "e1c100f0       strd r0, [r1, #+0]");
+    COMPARE(strd(r2, r3, MemOperand(r3, 127)),
+            "e1c327ff       strd r2, [r3, #+127]");
+    COMPARE(strd(r4, r5, MemOperand(r5, -127)),
+            "e14547ff       strd r4, [r5, #-127]");
+    COMPARE(strd(r6, r7, MemOperand(r7, 127, PostIndex)),
+            "e0c767ff       strd r6, [r7], #+127");
+    COMPARE(strd(r8, r9, MemOperand(r9, -127, PostIndex)),
+            "e04987ff       strd r8, [r9], #-127");
+    COMPARE(strd(r10, fp, MemOperand(fp, 127, PreIndex)),
+            "e1eba7ff       strd r10, [fp, #+127]!");
+    COMPARE(strd(ip, sp, MemOperand(sp, -127, PreIndex)),
+            "e16dc7ff       strd ip, [sp, #-127]!");
+  }
+
+  VERIFY_RUN();
+}
+
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index cb735c7..9f7d0bb 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -330,11 +330,6 @@
   __ j(less_equal, &Ljcc);
   __ j(greater, &Ljcc);
 
-  // checking hints
-  __ j(zero, &Ljcc, taken);
-  __ j(zero, &Ljcc, not_taken);
-
-  // __ mov(Operand::StaticVariable(Isolate::handler_address()), eax);
   // 0xD9 instructions
   __ nop();
 
diff --git a/test/cctest/test-disasm-mips.cc b/test/cctest/test-disasm-mips.cc
new file mode 100644
index 0000000..5ad99d7
--- /dev/null
+++ b/test/cctest/test-disasm-mips.cc
@@ -0,0 +1,437 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+  // Disable compilation of natives.
+  FLAG_disable_native_files = true;
+  if (env.IsEmpty()) {
+    env = v8::Context::New();
+  }
+}
+
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+  disasm::NameConverter converter;
+  disasm::Disassembler disasm(converter);
+  EmbeddedVector<char, 128> disasm_buffer;
+
+  disasm.InstructionDecode(disasm_buffer, pc);
+
+  if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+    fprintf(stderr,
+            "expected: \n"
+            "%s\n"
+            "disassembled: \n"
+            "%s\n\n",
+            compare_string, disasm_buffer.start());
+    return false;
+  }
+  return true;
+}
+
+
+// Setup V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SETUP()                                           \
+  InitializeVM();                                         \
+  v8::HandleScope scope;                                  \
+  byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
+  Assembler assm(Isolate::Current(), buffer, 4*1024);     \
+  bool failure = false;
+
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+  { \
+    int pc_offset = assm.pc_offset(); \
+    byte *progcounter = &buffer[pc_offset]; \
+    assm.asm_; \
+    if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+  }
+
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+if (failure) { \
+    V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
+  }
+
+
+TEST(Type0) {
+  SETUP();
+
+  COMPARE(addu(a0, a1, a2),
+          "00a62021       addu    a0, a1, a2");
+  COMPARE(addu(t2, t3, t4),
+          "016c5021       addu    t2, t3, t4");
+  COMPARE(addu(v0, v1, s0),
+          "00701021       addu    v0, v1, s0");
+
+  COMPARE(subu(a0, a1, a2),
+          "00a62023       subu    a0, a1, a2");
+  COMPARE(subu(t2, t3, t4),
+          "016c5023       subu    t2, t3, t4");
+  COMPARE(subu(v0, v1, s0),
+          "00701023       subu    v0, v1, s0");
+
+  COMPARE(mult(a0, a1),
+          "00850018       mult    a0, a1");
+  COMPARE(mult(t2, t3),
+          "014b0018       mult    t2, t3");
+  COMPARE(mult(v0, v1),
+          "00430018       mult    v0, v1");
+
+  COMPARE(multu(a0, a1),
+          "00850019       multu   a0, a1");
+  COMPARE(multu(t2, t3),
+          "014b0019       multu   t2, t3");
+  COMPARE(multu(v0, v1),
+          "00430019       multu   v0, v1");
+
+  COMPARE(div(a0, a1),
+          "0085001a       div     a0, a1");
+  COMPARE(div(t2, t3),
+          "014b001a       div     t2, t3");
+  COMPARE(div(v0, v1),
+          "0043001a       div     v0, v1");
+
+  COMPARE(divu(a0, a1),
+          "0085001b       divu    a0, a1");
+  COMPARE(divu(t2, t3),
+          "014b001b       divu    t2, t3");
+  COMPARE(divu(v0, v1),
+          "0043001b       divu    v0, v1");
+
+  COMPARE(mul(a0, a1, a2),
+          "70a62002       mul     a0, a1, a2");
+  COMPARE(mul(t2, t3, t4),
+          "716c5002       mul     t2, t3, t4");
+  COMPARE(mul(v0, v1, s0),
+          "70701002       mul     v0, v1, s0");
+
+  COMPARE(addiu(a0, a1, 0x0),
+          "24a40000       addiu   a0, a1, 0");
+  COMPARE(addiu(s0, s1, 32767),
+          "26307fff       addiu   s0, s1, 32767");
+  COMPARE(addiu(t2, t3, -32768),
+          "256a8000       addiu   t2, t3, -32768");
+  COMPARE(addiu(v0, v1, -1),
+          "2462ffff       addiu   v0, v1, -1");
+
+  COMPARE(and_(a0, a1, a2),
+          "00a62024       and     a0, a1, a2");
+  COMPARE(and_(s0, s1, s2),
+          "02328024       and     s0, s1, s2");
+  COMPARE(and_(t2, t3, t4),
+          "016c5024       and     t2, t3, t4");
+  COMPARE(and_(v0, v1, a2),
+          "00661024       and     v0, v1, a2");
+
+  COMPARE(or_(a0, a1, a2),
+          "00a62025       or      a0, a1, a2");
+  COMPARE(or_(s0, s1, s2),
+          "02328025       or      s0, s1, s2");
+  COMPARE(or_(t2, t3, t4),
+          "016c5025       or      t2, t3, t4");
+  COMPARE(or_(v0, v1, a2),
+          "00661025       or      v0, v1, a2");
+
+  COMPARE(xor_(a0, a1, a2),
+          "00a62026       xor     a0, a1, a2");
+  COMPARE(xor_(s0, s1, s2),
+          "02328026       xor     s0, s1, s2");
+  COMPARE(xor_(t2, t3, t4),
+          "016c5026       xor     t2, t3, t4");
+  COMPARE(xor_(v0, v1, a2),
+          "00661026       xor     v0, v1, a2");
+
+  COMPARE(nor(a0, a1, a2),
+          "00a62027       nor     a0, a1, a2");
+  COMPARE(nor(s0, s1, s2),
+          "02328027       nor     s0, s1, s2");
+  COMPARE(nor(t2, t3, t4),
+          "016c5027       nor     t2, t3, t4");
+  COMPARE(nor(v0, v1, a2),
+          "00661027       nor     v0, v1, a2");
+
+  COMPARE(andi(a0, a1, 0x1),
+          "30a40001       andi    a0, a1, 0x1");
+  COMPARE(andi(v0, v1, 0xffff),
+          "3062ffff       andi    v0, v1, 0xffff");
+
+  COMPARE(ori(a0, a1, 0x1),
+          "34a40001       ori     a0, a1, 0x1");
+  COMPARE(ori(v0, v1, 0xffff),
+          "3462ffff       ori     v0, v1, 0xffff");
+
+  COMPARE(xori(a0, a1, 0x1),
+          "38a40001       xori    a0, a1, 0x1");
+  COMPARE(xori(v0, v1, 0xffff),
+          "3862ffff       xori    v0, v1, 0xffff");
+
+  COMPARE(lui(a0, 0x1),
+          "3c040001       lui     a0, 0x1");
+  COMPARE(lui(v0, 0xffff),
+          "3c02ffff       lui     v0, 0xffff");
+
+  COMPARE(sll(a0, a1, 0),
+          "00052000       sll     a0, a1, 0");
+  COMPARE(sll(s0, s1, 8),
+          "00118200       sll     s0, s1, 8");
+  COMPARE(sll(t2, t3, 24),
+          "000b5600       sll     t2, t3, 24");
+  COMPARE(sll(v0, v1, 31),
+          "000317c0       sll     v0, v1, 31");
+
+  COMPARE(sllv(a0, a1, a2),
+          "00c52004       sllv    a0, a1, a2");
+  COMPARE(sllv(s0, s1, s2),
+          "02518004       sllv    s0, s1, s2");
+  COMPARE(sllv(t2, t3, t4),
+          "018b5004       sllv    t2, t3, t4");
+  COMPARE(sllv(v0, v1, fp),
+          "03c31004       sllv    v0, v1, fp");
+
+  COMPARE(srl(a0, a1, 0),
+          "00052002       srl     a0, a1, 0");
+  COMPARE(srl(s0, s1, 8),
+          "00118202       srl     s0, s1, 8");
+  COMPARE(srl(t2, t3, 24),
+          "000b5602       srl     t2, t3, 24");
+  COMPARE(srl(v0, v1, 31),
+          "000317c2       srl     v0, v1, 31");
+
+  COMPARE(srlv(a0, a1, a2),
+          "00c52006       srlv    a0, a1, a2");
+  COMPARE(srlv(s0, s1, s2),
+          "02518006       srlv    s0, s1, s2");
+  COMPARE(srlv(t2, t3, t4),
+          "018b5006       srlv    t2, t3, t4");
+  COMPARE(srlv(v0, v1, fp),
+          "03c31006       srlv    v0, v1, fp");
+
+  COMPARE(sra(a0, a1, 0),
+          "00052003       sra     a0, a1, 0");
+  COMPARE(sra(s0, s1, 8),
+          "00118203       sra     s0, s1, 8");
+  COMPARE(sra(t2, t3, 24),
+          "000b5603       sra     t2, t3, 24");
+  COMPARE(sra(v0, v1, 31),
+          "000317c3       sra     v0, v1, 31");
+
+  COMPARE(srav(a0, a1, a2),
+          "00c52007       srav    a0, a1, a2");
+  COMPARE(srav(s0, s1, s2),
+          "02518007       srav    s0, s1, s2");
+  COMPARE(srav(t2, t3, t4),
+          "018b5007       srav    t2, t3, t4");
+  COMPARE(srav(v0, v1, fp),
+          "03c31007       srav    v0, v1, fp");
+
+  if (mips32r2) {
+    COMPARE(rotr(a0, a1, 0),
+            "00252002       rotr    a0, a1, 0");
+    COMPARE(rotr(s0, s1, 8),
+            "00318202       rotr    s0, s1, 8");
+    COMPARE(rotr(t2, t3, 24),
+            "002b5602       rotr    t2, t3, 24");
+    COMPARE(rotr(v0, v1, 31),
+            "002317c2       rotr    v0, v1, 31");
+
+    COMPARE(rotrv(a0, a1, a2),
+            "00c52046       rotrv   a0, a1, a2");
+    COMPARE(rotrv(s0, s1, s2),
+            "02518046       rotrv   s0, s1, s2");
+    COMPARE(rotrv(t2, t3, t4),
+            "018b5046       rotrv   t2, t3, t4");
+    COMPARE(rotrv(v0, v1, fp),
+            "03c31046       rotrv   v0, v1, fp");
+  }
+
+  COMPARE(break_(0),
+          "0000000d       break, code: 0x00000 (0)");
+  COMPARE(break_(261120),
+          "00ff000d       break, code: 0x3fc00 (261120)");
+  COMPARE(break_(1047552),
+          "03ff000d       break, code: 0xffc00 (1047552)");
+
+  COMPARE(tge(a0, a1, 0),
+          "00850030       tge     a0, a1, code: 0x000");
+  COMPARE(tge(s0, s1, 1023),
+          "0211fff0       tge     s0, s1, code: 0x3ff");
+  COMPARE(tgeu(a0, a1, 0),
+          "00850031       tgeu    a0, a1, code: 0x000");
+  COMPARE(tgeu(s0, s1, 1023),
+          "0211fff1       tgeu    s0, s1, code: 0x3ff");
+  COMPARE(tlt(a0, a1, 0),
+          "00850032       tlt     a0, a1, code: 0x000");
+  COMPARE(tlt(s0, s1, 1023),
+          "0211fff2       tlt     s0, s1, code: 0x3ff");
+  COMPARE(tltu(a0, a1, 0),
+          "00850033       tltu    a0, a1, code: 0x000");
+  COMPARE(tltu(s0, s1, 1023),
+          "0211fff3       tltu    s0, s1, code: 0x3ff");
+  COMPARE(teq(a0, a1, 0),
+          "00850034       teq     a0, a1, code: 0x000");
+  COMPARE(teq(s0, s1, 1023),
+          "0211fff4       teq     s0, s1, code: 0x3ff");
+  COMPARE(tne(a0, a1, 0),
+          "00850036       tne     a0, a1, code: 0x000");
+  COMPARE(tne(s0, s1, 1023),
+          "0211fff6       tne     s0, s1, code: 0x3ff");
+
+  COMPARE(mfhi(a0),
+          "00002010       mfhi    a0");
+  COMPARE(mfhi(s2),
+          "00009010       mfhi    s2");
+  COMPARE(mfhi(t4),
+          "00006010       mfhi    t4");
+  COMPARE(mfhi(v1),
+          "00001810       mfhi    v1");
+  COMPARE(mflo(a0),
+          "00002012       mflo    a0");
+  COMPARE(mflo(s2),
+          "00009012       mflo    s2");
+  COMPARE(mflo(t4),
+          "00006012       mflo    t4");
+  COMPARE(mflo(v1),
+          "00001812       mflo    v1");
+
+  COMPARE(slt(a0, a1, a2),
+          "00a6202a       slt     a0, a1, a2");
+  COMPARE(slt(s0, s1, s2),
+          "0232802a       slt     s0, s1, s2");
+  COMPARE(slt(t2, t3, t4),
+          "016c502a       slt     t2, t3, t4");
+  COMPARE(slt(v0, v1, a2),
+          "0066102a       slt     v0, v1, a2");
+  COMPARE(sltu(a0, a1, a2),
+          "00a6202b       sltu    a0, a1, a2");
+  COMPARE(sltu(s0, s1, s2),
+          "0232802b       sltu    s0, s1, s2");
+  COMPARE(sltu(t2, t3, t4),
+          "016c502b       sltu    t2, t3, t4");
+  COMPARE(sltu(v0, v1, a2),
+          "0066102b       sltu    v0, v1, a2");
+
+  COMPARE(slti(a0, a1, 0),
+          "28a40000       slti    a0, a1, 0");
+  COMPARE(slti(s0, s1, 32767),
+          "2a307fff       slti    s0, s1, 32767");
+  COMPARE(slti(t2, t3, -32768),
+          "296a8000       slti    t2, t3, -32768");
+  COMPARE(slti(v0, v1, -1),
+          "2862ffff       slti    v0, v1, -1");
+  COMPARE(sltiu(a0, a1, 0),
+          "2ca40000       sltiu   a0, a1, 0");
+  COMPARE(sltiu(s0, s1, 32767),
+          "2e307fff       sltiu   s0, s1, 32767");
+  COMPARE(sltiu(t2, t3, -32768),
+          "2d6a8000       sltiu   t2, t3, -32768");
+  COMPARE(sltiu(v0, v1, -1),
+          "2c62ffff       sltiu   v0, v1, -1");
+
+  COMPARE(movz(a0, a1, a2),
+          "00a6200a       movz    a0, a1, a2");
+  COMPARE(movz(s0, s1, s2),
+          "0232800a       movz    s0, s1, s2");
+  COMPARE(movz(t2, t3, t4),
+          "016c500a       movz    t2, t3, t4");
+  COMPARE(movz(v0, v1, a2),
+          "0066100a       movz    v0, v1, a2");
+  COMPARE(movn(a0, a1, a2),
+          "00a6200b       movn    a0, a1, a2");
+  COMPARE(movn(s0, s1, s2),
+          "0232800b       movn    s0, s1, s2");
+  COMPARE(movn(t2, t3, t4),
+          "016c500b       movn    t2, t3, t4");
+  COMPARE(movn(v0, v1, a2),
+          "0066100b       movn    v0, v1, a2");
+
+  COMPARE(movt(a0, a1, 1),
+          "00a52001       movt    a0, a1, 1");
+  COMPARE(movt(s0, s1, 2),
+          "02298001       movt    s0, s1, 2");
+  COMPARE(movt(t2, t3, 3),
+          "016d5001       movt    t2, t3, 3");
+  COMPARE(movt(v0, v1, 7),
+          "007d1001       movt    v0, v1, 7");
+  COMPARE(movf(a0, a1, 0),
+          "00a02001       movf    a0, a1, 0");
+  COMPARE(movf(s0, s1, 4),
+          "02308001       movf    s0, s1, 4");
+  COMPARE(movf(t2, t3, 5),
+          "01745001       movf    t2, t3, 5");
+  COMPARE(movf(v0, v1, 6),
+          "00781001       movf    v0, v1, 6");
+
+  COMPARE(clz(a0, a1),
+          "70a42020       clz     a0, a1");
+  COMPARE(clz(s6, s7),
+          "72f6b020       clz     s6, s7");
+  COMPARE(clz(v0, v1),
+          "70621020       clz     v0, v1");
+
+  if (mips32r2) {
+    COMPARE(ins_(a0, a1, 31, 1),
+            "7ca4ffc4       ins     a0, a1, 31, 1");
+    COMPARE(ins_(s6, s7, 30, 2),
+            "7ef6ff84       ins     s6, s7, 30, 2");
+    COMPARE(ins_(v0, v1, 0, 32),
+            "7c62f804       ins     v0, v1, 0, 32");
+    COMPARE(ext_(a0, a1, 31, 1),
+            "7ca407c0       ext     a0, a1, 31, 1");
+    COMPARE(ext_(s6, s7, 30, 2),
+            "7ef60f80       ext     s6, s7, 30, 2");
+    COMPARE(ext_(v0, v1, 0, 32),
+            "7c62f800       ext     v0, v1, 0, 32");
+  }
+
+  VERIFY_RUN();
+}
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index dea5c47..8f405b7 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,7 @@
 using ::v8::internal::Object;
 using ::v8::internal::Runtime;
 using ::v8::internal::Script;
-using ::v8::internal::SmartPointer;
+using ::v8::internal::SmartArrayPointer;
 using ::v8::internal::SharedFunctionInfo;
 using ::v8::internal::String;
 
@@ -96,7 +96,7 @@
       SharedFunctionInfo::cast(shared_func_info_ptr));
 
   // Verify inferred function name.
-  SmartPointer<char> inferred_name =
+  SmartArrayPointer<char> inferred_name =
       shared_func_info->inferred_name()->ToCString();
   CHECK_EQ(ref_inferred_name, *inferred_name);
 #endif  // ENABLE_DEBUGGER_SUPPORT
@@ -281,3 +281,122 @@
       "}");
   CheckFunctionName(script, "return p", "");
 }
+
+
+TEST(MultipleAssignments) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "var fun1 = fun2 = function () { return 1; }\n"
+      "var bar1 = bar2 = bar3 = function () { return 2; }\n"
+      "foo1 = foo2 = function () { return 3; }\n"
+      "baz1 = baz2 = baz3 = function () { return 4; }");
+  CheckFunctionName(script, "return 1", "fun2");
+  CheckFunctionName(script, "return 2", "bar3");
+  CheckFunctionName(script, "return 3", "foo2");
+  CheckFunctionName(script, "return 4", "baz3");
+}
+
+
+TEST(AsConstructorParameter) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "function Foo() {}\n"
+      "var foo = new Foo(function() { return 1; })\n"
+      "var bar = new Foo(function() { return 2; }, function() { return 3; })");
+  CheckFunctionName(script, "return 1", "");
+  CheckFunctionName(script, "return 2", "");
+  CheckFunctionName(script, "return 3", "");
+}
+
+
+TEST(FactoryHashmap) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "function createMyObj() {\n"
+      "  var obj = {};\n"
+      "  obj[\"method1\"] = function() { return 1; }\n"
+      "  obj[\"method2\"] = function() { return 2; }\n"
+      "  return obj;\n"
+      "}");
+  CheckFunctionName(script, "return 1", "obj.method1");
+  CheckFunctionName(script, "return 2", "obj.method2");
+}
+
+
+TEST(FactoryHashmapVariable) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "function createMyObj() {\n"
+      "  var obj = {};\n"
+      "  var methodName = \"method1\";\n"
+      "  obj[methodName] = function() { return 1; }\n"
+      "  methodName = \"method2\";\n"
+      "  obj[methodName] = function() { return 2; }\n"
+      "  return obj;\n"
+      "}");
+  // Can't infer function names statically.
+  CheckFunctionName(script, "return 1", "obj.(anonymous function)");
+  CheckFunctionName(script, "return 2", "obj.(anonymous function)");
+}
+
+
+TEST(FactoryHashmapConditional) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "function createMyObj() {\n"
+      "  var obj = {};\n"
+      "  obj[0 ? \"method1\" : \"method2\"] = function() { return 1; }\n"
+      "  return obj;\n"
+      "}");
+  // Can't infer the function name statically.
+  CheckFunctionName(script, "return 1", "obj.(anonymous function)");
+}
+
+
+TEST(GlobalAssignmentAndCall) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "var Foo = function() {\n"
+      "  return 1;\n"
+      "}();\n"
+      "var Baz = Bar = function() {\n"
+      "  return 2;\n"
+      "}");
+  // The inferred name is empty, because this is an assignment of a result.
+  CheckFunctionName(script, "return 1", "");
+  // See MultipleAssignments test.
+  CheckFunctionName(script, "return 2", "Bar");
+}
+
+
+TEST(AssignmentAndCall) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "(function Enclosing() {\n"
+      "  var Foo;\n"
+      "  Foo = function() {\n"
+      "    return 1;\n"
+      "  }();\n"
+      "  var Baz = Bar = function() {\n"
+      "    return 2;\n"
+      "  }\n"
+      "})();");
+  // The inferred name is empty, because this is an assignment of a result.
+  CheckFunctionName(script, "return 1", "");
+  // See MultipleAssignments test.
+  CheckFunctionName(script, "return 2", "Enclosing.Bar");
+}
diff --git a/test/cctest/test-hashing.cc b/test/cctest/test-hashing.cc
new file mode 100644
index 0000000..a626510
--- /dev/null
+++ b/test/cctest/test-hashing.cc
@@ -0,0 +1,260 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "factory.h"
+#include "macro-assembler.h"
+#include "cctest.h"
+#include "code-stubs.h"
+#include "objects.h"
+
+#ifdef USE_SIMULATOR
+#include "simulator.h"
+#endif
+
+using namespace v8::internal;
+
+
+typedef uint32_t (*HASH_FUNCTION)();
+
+static v8::Persistent<v8::Context> env;
+
+#define __ masm->
+
+
+void generate(MacroAssembler* masm, i::Vector<const char> string) {
+  // GenerateHashInit takes the first character as an argument so it can't
+  // handle the zero length string.
+  ASSERT(string.length() > 0);
+#ifdef V8_TARGET_ARCH_IA32
+  __ push(ebx);
+  __ push(ecx);
+  __ mov(eax, Immediate(0));
+  __ mov(ebx, Immediate(string.at(0)));
+  StringHelper::GenerateHashInit(masm, eax, ebx, ecx);
+  for (int i = 1; i < string.length(); i++) {
+    __ mov(ebx, Immediate(string.at(i)));
+    StringHelper::GenerateHashAddCharacter(masm, eax, ebx, ecx);
+  }
+  StringHelper::GenerateHashGetHash(masm, eax, ecx);
+  __ pop(ecx);
+  __ pop(ebx);
+  __ Ret();
+#elif V8_TARGET_ARCH_X64
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+  __ push(rbx);
+  __ push(rcx);
+  __ movq(rax, Immediate(0));
+  __ movq(rbx, Immediate(string.at(0)));
+  StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
+  for (int i = 1; i < string.length(); i++) {
+    __ movq(rbx, Immediate(string.at(i)));
+    StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
+  }
+  StringHelper::GenerateHashGetHash(masm, rax, rcx);
+  __ pop(rcx);
+  __ pop(rbx);
+  __ pop(kRootRegister);
+  __ Ret();
+#elif V8_TARGET_ARCH_ARM
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+
+  __ mov(r0, Operand(0));
+  __ mov(ip, Operand(string.at(0)));
+  StringHelper::GenerateHashInit(masm, r0, ip);
+  for (int i = 1; i < string.length(); i++) {
+    __ mov(ip, Operand(string.at(i)));
+    StringHelper::GenerateHashAddCharacter(masm, r0, ip);
+  }
+  StringHelper::GenerateHashGetHash(masm, r0);
+  __ pop(kRootRegister);
+  __ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_MIPS
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+
+  __ li(v0, Operand(0));
+  __ li(t1, Operand(string.at(0)));
+  StringHelper::GenerateHashInit(masm, v0, t1);
+  for (int i = 1; i < string.length(); i++) {
+    __ li(t1, Operand(string.at(i)));
+    StringHelper::GenerateHashAddCharacter(masm, v0, t1);
+  }
+  StringHelper::GenerateHashGetHash(masm, v0);
+  __ pop(kRootRegister);
+  __ jr(ra);
+  __ nop();
+#endif
+}
+
+
+void generate(MacroAssembler* masm, uint32_t key) {
+#ifdef V8_TARGET_ARCH_IA32
+  __ push(ebx);
+  __ mov(eax, Immediate(key));
+  __ GetNumberHash(eax, ebx);
+  __ pop(ebx);
+  __ Ret();
+#elif V8_TARGET_ARCH_X64
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+  __ push(rbx);
+  __ movq(rax, Immediate(key));
+  __ GetNumberHash(rax, rbx);
+  __ pop(rbx);
+  __ pop(kRootRegister);
+  __ Ret();
+#elif V8_TARGET_ARCH_ARM
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+  __ mov(r0, Operand(key));
+  __ GetNumberHash(r0, ip);
+  __ pop(kRootRegister);
+  __ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_MIPS
+  __ push(kRootRegister);
+  __ InitializeRootRegister();
+  __ li(v0, Operand(key));
+  __ GetNumberHash(v0, t1);
+  __ pop(kRootRegister);
+  __ jr(ra);
+  __ nop();
+#endif
+}
+
+
+void check(i::Vector<const char> string) {
+  v8::HandleScope scope;
+  v8::internal::byte buffer[2048];
+  MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
+
+  generate(&masm, string);
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Code* code = Code::cast(HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+  CHECK(code->IsCode());
+
+  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
+  Handle<String> v8_string = FACTORY->NewStringFromAscii(string);
+  v8_string->set_hash_field(String::kEmptyHashField);
+#ifdef USE_SIMULATOR
+  uint32_t codegen_hash =
+      reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+#else
+  uint32_t codegen_hash = hash();
+#endif
+  uint32_t runtime_hash = v8_string->Hash();
+  CHECK(runtime_hash == codegen_hash);
+}
+
+
+void check(uint32_t key) {
+  v8::HandleScope scope;
+  v8::internal::byte buffer[2048];
+  MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
+
+  generate(&masm, key);
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Code* code = Code::cast(HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+  CHECK(code->IsCode());
+
+  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
+#ifdef USE_SIMULATOR
+  uint32_t codegen_hash =
+      reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+#else
+  uint32_t codegen_hash = hash();
+#endif
+
+  uint32_t runtime_hash = ComputeIntegerHash(
+      key,
+      Isolate::Current()->heap()->HashSeed());
+  CHECK(runtime_hash == codegen_hash);
+}
+
+
+void check_twochars(char a, char b) {
+  char ab[2] = {a, b};
+  check(i::Vector<const char>(ab, 2));
+}
+
+
+static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
+  return ~(~((i * 781) ^ (j * 329)));
+}
+
+
+TEST(StringHash) {
+  if (env.IsEmpty()) env = v8::Context::New();
+  for (int a = 0; a < String::kMaxAsciiCharCode; a++) {
+    // Numbers are hashed differently.
+    if (a >= '0' && a <= '9') continue;
+    for (int b = 0; b < String::kMaxAsciiCharCode; b++) {
+      if (b >= '0' && b <= '9') continue;
+      check_twochars(static_cast<char>(a), static_cast<char>(b));
+    }
+  }
+  check(i::Vector<const char>("*",       1));
+  check(i::Vector<const char>(".zZ",     3));
+  check(i::Vector<const char>("muc",     3));
+  check(i::Vector<const char>("(>'_')>", 7));
+  check(i::Vector<const char>("-=[ vee eight ftw ]=-", 21));
+}
+
+
+TEST(NumberHash) {
+  if (env.IsEmpty()) env = v8::Context::New();
+
+  // Some specific numbers
+  for (uint32_t key = 0; key < 42; key += 7) {
+    check(key);
+  }
+
+  // Some pseudo-random numbers
+  static const uint32_t kLimit = 1000;
+  for (uint32_t i = 0; i < 5; i++) {
+    for (uint32_t j = 0; j < 5; j++) {
+      check(PseudoRandom(i, j) % kLimit);
+    }
+  }
+}
+
+#undef __
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index bd08d4c..6c2afd4 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -1,388 +1,15 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 //
 // Tests for heap profiler
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "v8.h"
+
+#include "cctest.h"
 #include "heap-profiler.h"
 #include "snapshot.h"
-#include "string-stream.h"
-#include "cctest.h"
-#include "zone-inl.h"
+#include "utils-inl.h"
 #include "../include/v8-profiler.h"
 
-namespace i = v8::internal;
-using i::ClustersCoarser;
-using i::JSObjectsCluster;
-using i::JSObjectsRetainerTree;
-using i::JSObjectsClusterTree;
-using i::RetainerHeapProfile;
-
-
-namespace {
-
-class ConstructorHeapProfileTestHelper : public i::ConstructorHeapProfile {
- public:
-  ConstructorHeapProfileTestHelper()
-    : i::ConstructorHeapProfile(),
-      f_name_(FACTORY->NewStringFromAscii(i::CStrVector("F"))),
-      f_count_(0) {
-  }
-
-  void Call(const JSObjectsCluster& cluster,
-            const i::NumberAndSizeInfo& number_and_size) {
-    if (f_name_->Equals(cluster.constructor())) {
-      CHECK_EQ(f_count_, 0);
-      f_count_ = number_and_size.number();
-      CHECK_GT(f_count_, 0);
-    }
-  }
-
-  int f_count() { return f_count_; }
-
- private:
-  i::Handle<i::String> f_name_;
-  int f_count_;
-};
-
-}  // namespace
-
-
-TEST(ConstructorProfile) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun(
-      "function F() {}  // A constructor\n"
-      "var f1 = new F();\n"
-      "var f2 = new F();\n");
-
-  ConstructorHeapProfileTestHelper cons_profile;
-  i::AssertNoAllocation no_alloc;
-  i::HeapIterator iterator;
-  for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
-    cons_profile.CollectStats(obj);
-  CHECK_EQ(0, cons_profile.f_count());
-  cons_profile.PrintStats();
-  CHECK_EQ(2, cons_profile.f_count());
-}
-
-
-static JSObjectsCluster AddHeapObjectToTree(JSObjectsRetainerTree* tree,
-                                            i::String* constructor,
-                                            int instance,
-                                            JSObjectsCluster* ref1 = NULL,
-                                            JSObjectsCluster* ref2 = NULL,
-                                            JSObjectsCluster* ref3 = NULL) {
-  JSObjectsCluster o(constructor, reinterpret_cast<i::Object*>(instance));
-  JSObjectsClusterTree* o_tree = new JSObjectsClusterTree();
-  JSObjectsClusterTree::Locator o_loc;
-  if (ref1 != NULL) o_tree->Insert(*ref1, &o_loc);
-  if (ref2 != NULL) o_tree->Insert(*ref2, &o_loc);
-  if (ref3 != NULL) o_tree->Insert(*ref3, &o_loc);
-  JSObjectsRetainerTree::Locator loc;
-  tree->Insert(o, &loc);
-  loc.set_value(o_tree);
-  return o;
-}
-
-
-static void AddSelfReferenceToTree(JSObjectsRetainerTree* tree,
-                                   JSObjectsCluster* self_ref) {
-  JSObjectsRetainerTree::Locator loc;
-  CHECK(tree->Find(*self_ref, &loc));
-  JSObjectsClusterTree::Locator o_loc;
-  CHECK_NE(NULL, loc.value());
-  loc.value()->Insert(*self_ref, &o_loc);
-}
-
-
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     const JSObjectsCluster& expected,
-                                     const char* value_source,
-                                     const JSObjectsCluster& value) {
-  if (JSObjectsCluster::Compare(expected, value) != 0) {
-    i::HeapStringAllocator allocator;
-    i::StringStream stream(&allocator);
-    stream.Add("#  Expected: ");
-    expected.DebugPrint(&stream);
-    stream.Add("\n#  Found: ");
-    value.DebugPrint(&stream);
-    V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n%s",
-             expected_source, value_source,
-             *stream.ToCString());
-  }
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     const JSObjectsCluster& expected,
-                                     const char* value_source,
-                                     const JSObjectsCluster& value) {
-  if (JSObjectsCluster::Compare(expected, value) == 0) {
-    i::HeapStringAllocator allocator;
-    i::StringStream stream(&allocator);
-    stream.Add("# !Expected: ");
-    expected.DebugPrint(&stream);
-    stream.Add("\n#  Found: ");
-    value.DebugPrint(&stream);
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n%s",
-             expected_source, value_source,
-             *stream.ToCString());
-  }
-}
-
-
-TEST(ClustersCoarserSimple) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
-
-  JSObjectsRetainerTree tree;
-  JSObjectsCluster function(HEAP->function_class_symbol());
-  JSObjectsCluster a(*FACTORY->NewStringFromAscii(i::CStrVector("A")));
-  JSObjectsCluster b(*FACTORY->NewStringFromAscii(i::CStrVector("B")));
-
-  // o1 <- Function
-  JSObjectsCluster o1 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100, &function);
-  // o2 <- Function
-  JSObjectsCluster o2 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x200, &function);
-  // o3 <- A, B
-  JSObjectsCluster o3 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &a, &b);
-  // o4 <- B, A
-  JSObjectsCluster o4 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x400, &b, &a);
-  // o5 <- A, B, Function
-  JSObjectsCluster o5 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x500,
-                          &a, &b, &function);
-
-  ClustersCoarser coarser;
-  coarser.Process(&tree);
-
-  CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
-  CHECK_EQ(coarser.GetCoarseEquivalent(o3), coarser.GetCoarseEquivalent(o4));
-  CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o3));
-  CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o5));
-}
-
-
-TEST(ClustersCoarserMultipleConstructors) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
-
-  JSObjectsRetainerTree tree;
-  JSObjectsCluster function(HEAP->function_class_symbol());
-
-  // o1 <- Function
-  JSObjectsCluster o1 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100, &function);
-  // a1 <- Function
-  JSObjectsCluster a1 =
-      AddHeapObjectToTree(&tree, HEAP->Array_symbol(), 0x1000, &function);
-  // o2 <- Function
-  JSObjectsCluster o2 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x200, &function);
-  // a2 <- Function
-  JSObjectsCluster a2 =
-      AddHeapObjectToTree(&tree, HEAP->Array_symbol(), 0x2000, &function);
-
-  ClustersCoarser coarser;
-  coarser.Process(&tree);
-
-  CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
-  CHECK_EQ(coarser.GetCoarseEquivalent(a1), coarser.GetCoarseEquivalent(a2));
-}
-
-
-TEST(ClustersCoarserPathsTraversal) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
-
-  JSObjectsRetainerTree tree;
-
-  // On the following graph:
-  //
-  // p
-  //   <- o21 <- o11 <-
-  // q                  o
-  //   <- o22 <- o12 <-
-  // r
-  //
-  // we expect that coarser will deduce equivalences: p ~ q ~ r,
-  // o21 ~ o22, and o11 ~ o12.
-
-  JSObjectsCluster o =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100);
-  JSObjectsCluster o11 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x110, &o);
-  JSObjectsCluster o12 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x120, &o);
-  JSObjectsCluster o21 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x210, &o11);
-  JSObjectsCluster o22 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x220, &o12);
-  JSObjectsCluster p =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &o21);
-  JSObjectsCluster q =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x310, &o21, &o22);
-  JSObjectsCluster r =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x320, &o22);
-
-  ClustersCoarser coarser;
-  coarser.Process(&tree);
-
-  CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
-  CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o11));
-  CHECK_EQ(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o12));
-  CHECK_EQ(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(o22));
-  CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o21));
-  CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
-  CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
-  CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
-  CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(p));
-  CHECK_NE(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(p));
-}
-
-
-TEST(ClustersCoarserSelf) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
-
-  JSObjectsRetainerTree tree;
-
-  // On the following graph:
-  //
-  // p (self-referencing)
-  //          <- o1     <-
-  // q (self-referencing)   o
-  //          <- o2     <-
-  // r (self-referencing)
-  //
-  // we expect that coarser will deduce equivalences: p ~ q ~ r, o1 ~ o2;
-
-  JSObjectsCluster o =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100);
-  JSObjectsCluster o1 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x110, &o);
-  JSObjectsCluster o2 =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x120, &o);
-  JSObjectsCluster p =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &o1);
-  AddSelfReferenceToTree(&tree, &p);
-  JSObjectsCluster q =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x310, &o1, &o2);
-  AddSelfReferenceToTree(&tree, &q);
-  JSObjectsCluster r =
-      AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x320, &o2);
-  AddSelfReferenceToTree(&tree, &r);
-
-  ClustersCoarser coarser;
-  coarser.Process(&tree);
-
-  CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
-  CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o1));
-  CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
-  CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
-  CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
-  CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
-  CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(p));
-}
-
-
-namespace {
-
-class RetainerProfilePrinter : public RetainerHeapProfile::Printer {
- public:
-  RetainerProfilePrinter() : stream_(&allocator_), lines_(100) {}
-
-  void PrintRetainers(const JSObjectsCluster& cluster,
-                      const i::StringStream& retainers) {
-    cluster.Print(&stream_);
-    stream_.Add("%s", *(retainers.ToCString()));
-    stream_.Put('\0');
-  }
-
-  const char* GetRetainers(const char* constructor) {
-    FillLines();
-    const size_t cons_len = strlen(constructor);
-    for (int i = 0; i < lines_.length(); ++i) {
-      if (strncmp(constructor, lines_[i], cons_len) == 0 &&
-          lines_[i][cons_len] == ',') {
-        return lines_[i] + cons_len + 1;
-      }
-    }
-    return NULL;
-  }
-
- private:
-  void FillLines() {
-    if (lines_.length() > 0) return;
-    stream_.Put('\0');
-    stream_str_ = stream_.ToCString();
-    const char* pos = *stream_str_;
-    while (pos != NULL && *pos != '\0') {
-      lines_.Add(pos);
-      pos = strchr(pos, '\0');
-      if (pos != NULL) ++pos;
-    }
-  }
-
-  i::HeapStringAllocator allocator_;
-  i::StringStream stream_;
-  i::SmartPointer<const char> stream_str_;
-  i::List<const char*> lines_;
-};
-
-}  // namespace
-
-
-TEST(RetainerProfile) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun(
-      "function A() {}\n"
-      "function B(x) { this.x = x; }\n"
-      "function C(x) { this.x1 = x; this.x2 = x; }\n"
-      "var a = new A();\n"
-      "var b1 = new B(a), b2 = new B(a);\n"
-      "var c = new C(a);");
-
-  RetainerHeapProfile ret_profile;
-  i::AssertNoAllocation no_alloc;
-  i::HeapIterator iterator;
-  for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
-    ret_profile.CollectStats(obj);
-  ret_profile.CoarseAndAggregate();
-  RetainerProfilePrinter printer;
-  ret_profile.DebugPrintStats(&printer);
-  const char* retainers_of_a = printer.GetRetainers("A");
-  // The order of retainers is unspecified, so we check string length, and
-  // verify each retainer separately.
-  CHECK_EQ(i::StrLength("(global property);1,B;2,C;2"),
-           i::StrLength(retainers_of_a));
-  CHECK(strstr(retainers_of_a, "(global property);1") != NULL);
-  CHECK(strstr(retainers_of_a, "B;2") != NULL);
-  CHECK(strstr(retainers_of_a, "C;2") != NULL);
-  CHECK_EQ("(global property);2", printer.GetRetainers("B"));
-  CHECK_EQ("(global property);1", printer.GetRetainers("C"));
-}
-
-
 namespace {
 
 class NamedEntriesDetector {
@@ -414,8 +41,8 @@
   CHECK_EQ(2, snapshot->GetRoot()->GetChildrenCount());
   const v8::HeapGraphNode* global_obj =
       snapshot->GetRoot()->GetChild(0)->GetToNode();
-  CHECK_EQ("Object", const_cast<i::HeapEntry*>(
-      reinterpret_cast<const i::HeapEntry*>(global_obj))->name());
+  CHECK_EQ(0, strncmp("Object", const_cast<i::HeapEntry*>(
+      reinterpret_cast<const i::HeapEntry*>(global_obj))->name(), 6));
   return global_obj;
 }
 
@@ -458,7 +85,7 @@
       "var b2_1 = new B2(a2), b2_2 = new B2(a2);\n"
       "var c2 = new C2(a2);");
   const v8::HeapSnapshot* snapshot_env2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("env2"));
   i::HeapSnapshot* i_snapshot_env2 =
       const_cast<i::HeapSnapshot*>(
           reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
@@ -497,7 +124,7 @@
       "x = new X(new X(), new X());\n"
       "(function() { x.a.a = x.b; })();");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("sizes"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* x =
       GetProperty(global, v8::HeapGraphEdge::kShortcut, "x");
@@ -528,7 +155,7 @@
       "function A() { }\n"
       "a = new A;");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("children"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("children"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
     const v8::HeapGraphEdge* prop = global->GetChild(i);
@@ -554,7 +181,7 @@
       "var anonymous = (function() { return function() { return 0; } })();\n"
       "compiled(1)");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("code"));
 
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* compiled =
@@ -616,7 +243,7 @@
       "a = 1;    // a is Smi\n"
       "b = 2.5;  // b is HeapNumber");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("numbers"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("numbers"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kShortcut, "a"));
   const v8::HeapGraphNode* b =
@@ -638,7 +265,7 @@
   global->SetInternalField(0, v8_num(17));
   global->SetInternalField(1, obj);
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("internals"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("internals"));
   const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
   // The first reference will not present, because it's a Smi.
   CHECK_EQ(NULL, GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0"));
@@ -665,12 +292,12 @@
       "var a = new A();\n"
       "var b = new B(a);");
   const v8::HeapSnapshot* snapshot1 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
 
   HEAP->CollectAllGarbage(true);  // Enforce compaction.
 
   const v8::HeapSnapshot* snapshot2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
 
   const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
   const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
@@ -715,7 +342,7 @@
   v8::HandleScope scope;
   LocalContext env;
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s"));
   const v8::HeapGraphNode* root1 = snapshot->GetRoot();
   const_cast<i::HeapSnapshot*>(reinterpret_cast<const i::HeapSnapshot*>(
       snapshot))->GetSortedEntriesList();
@@ -724,116 +351,6 @@
 }
 
 
-static const v8::HeapGraphNode* GetChild(
-    const v8::HeapGraphNode* node,
-    v8::HeapGraphNode::Type type,
-    const char* name,
-    const v8::HeapGraphNode* after = NULL) {
-  bool ignore_child = after == NULL ? false : true;
-  for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
-    const v8::HeapGraphEdge* prop = node->GetChild(i);
-    const v8::HeapGraphNode* child = prop->GetToNode();
-    v8::String::AsciiValue child_name(child->GetName());
-    if (!ignore_child
-        && child->GetType() == type
-        && strcmp(name, *child_name) == 0)
-      return child;
-    if (after != NULL && child == after) ignore_child = false;
-  }
-  return NULL;
-}
-
-static bool IsNodeRetainedAs(const v8::HeapGraphNode* node,
-                             int element) {
-  for (int i = 0, count = node->GetRetainersCount(); i < count; ++i) {
-    const v8::HeapGraphEdge* prop = node->GetRetainer(i);
-    if (prop->GetType() == v8::HeapGraphEdge::kElement
-        && element == prop->GetName()->Int32Value())
-      return true;
-  }
-  return false;
-}
-
-TEST(AggregatedHeapSnapshot) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun(
-      "function A() {}\n"
-      "function B(x) { this.x = x; }\n"
-      "var a = new A();\n"
-      "var b = new B(a);");
-  const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(
-          v8::String::New("agg"), v8::HeapSnapshot::kAggregated);
-  const v8::HeapGraphNode* strings = GetChild(snapshot->GetRoot(),
-                                              v8::HeapGraphNode::kHidden,
-                                              "STRING_TYPE");
-  CHECK_NE(NULL, strings);
-  CHECK_NE(0, strings->GetSelfSize());
-  CHECK_NE(0, strings->GetInstancesCount());
-  const v8::HeapGraphNode* maps = GetChild(snapshot->GetRoot(),
-                                           v8::HeapGraphNode::kHidden,
-                                           "MAP_TYPE");
-  CHECK_NE(NULL, maps);
-  CHECK_NE(0, maps->GetSelfSize());
-  CHECK_NE(0, maps->GetInstancesCount());
-
-  const v8::HeapGraphNode* a = GetChild(snapshot->GetRoot(),
-                                        v8::HeapGraphNode::kObject,
-                                        "A");
-  CHECK_NE(NULL, a);
-  CHECK_NE(0, a->GetSelfSize());
-  CHECK_EQ(1, a->GetInstancesCount());
-
-  const v8::HeapGraphNode* b = GetChild(snapshot->GetRoot(),
-                                        v8::HeapGraphNode::kObject,
-                                        "B");
-  CHECK_NE(NULL, b);
-  CHECK_NE(0, b->GetSelfSize());
-  CHECK_EQ(1, b->GetInstancesCount());
-
-  const v8::HeapGraphNode* glob_prop = GetChild(snapshot->GetRoot(),
-                                                v8::HeapGraphNode::kObject,
-                                                "(global property)",
-                                                b);
-  CHECK_NE(NULL, glob_prop);
-  CHECK_EQ(0, glob_prop->GetSelfSize());
-  CHECK_EQ(0, glob_prop->GetInstancesCount());
-  CHECK_NE(0, glob_prop->GetChildrenCount());
-
-  const v8::HeapGraphNode* a_from_glob_prop = GetChild(
-      glob_prop,
-      v8::HeapGraphNode::kObject,
-      "A");
-  CHECK_NE(NULL, a_from_glob_prop);
-  CHECK_EQ(0, a_from_glob_prop->GetSelfSize());
-  CHECK_EQ(0, a_from_glob_prop->GetInstancesCount());
-  CHECK_EQ(0, a_from_glob_prop->GetChildrenCount());  // Retains nothing.
-  CHECK(IsNodeRetainedAs(a_from_glob_prop, 1));  // (global propery) has 1 ref.
-
-  const v8::HeapGraphNode* b_with_children = GetChild(
-      snapshot->GetRoot(),
-      v8::HeapGraphNode::kObject,
-      "B",
-      b);
-  CHECK_NE(NULL, b_with_children);
-  CHECK_EQ(0, b_with_children->GetSelfSize());
-  CHECK_EQ(0, b_with_children->GetInstancesCount());
-  CHECK_NE(0, b_with_children->GetChildrenCount());
-
-  const v8::HeapGraphNode* a_from_b = GetChild(
-      b_with_children,
-      v8::HeapGraphNode::kObject,
-      "A");
-  CHECK_NE(NULL, a_from_b);
-  CHECK_EQ(0, a_from_b->GetSelfSize());
-  CHECK_EQ(0, a_from_b->GetInstancesCount());
-  CHECK_EQ(0, a_from_b->GetChildrenCount());  // Retains nothing.
-  CHECK(IsNodeRetainedAs(a_from_b, 1));  // B has 1 ref to A.
-}
-
-
 TEST(HeapEntryDominator) {
   // The graph looks like this:
   //
@@ -863,7 +380,7 @@
       "})();");
 
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("dominators"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("dominators"));
 
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
@@ -946,7 +463,7 @@
       "var a = new A(" STRING_LITERAL_FOR_TEST ");\n"
       "var b = new B(a);");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("json"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("json"));
   TestJSONStream stream;
   snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
   CHECK_GT(stream.size(), 0);
@@ -957,17 +474,17 @@
   // Verify that snapshot string is valid JSON.
   AsciiResource json_res(json);
   v8::Local<v8::String> json_string = v8::String::NewExternal(&json_res);
-  env->Global()->Set(v8::String::New("json_snapshot"), json_string);
+  env->Global()->Set(v8_str("json_snapshot"), json_string);
   v8::Local<v8::Value> snapshot_parse_result = CompileRun(
       "var parsed = JSON.parse(json_snapshot); true;");
   CHECK(!snapshot_parse_result.IsEmpty());
 
   // Verify that snapshot object has required fields.
   v8::Local<v8::Object> parsed_snapshot =
-      env->Global()->Get(v8::String::New("parsed"))->ToObject();
-  CHECK(parsed_snapshot->Has(v8::String::New("snapshot")));
-  CHECK(parsed_snapshot->Has(v8::String::New("nodes")));
-  CHECK(parsed_snapshot->Has(v8::String::New("strings")));
+      env->Global()->Get(v8_str("parsed"))->ToObject();
+  CHECK(parsed_snapshot->Has(v8_str("snapshot")));
+  CHECK(parsed_snapshot->Has(v8_str("nodes")));
+  CHECK(parsed_snapshot->Has(v8_str("strings")));
 
   // Get node and edge "member" offsets.
   v8::Local<v8::Value> meta_analysis_result = CompileRun(
@@ -1019,12 +536,12 @@
   int string_obj_pos =
       static_cast<int>(string_obj_pos_val->ToNumber()->Value());
   v8::Local<v8::Object> nodes_array =
-      parsed_snapshot->Get(v8::String::New("nodes"))->ToObject();
+      parsed_snapshot->Get(v8_str("nodes"))->ToObject();
   int string_index = static_cast<int>(
       nodes_array->Get(string_obj_pos + 1)->ToNumber()->Value());
   CHECK_GT(string_index, 0);
   v8::Local<v8::Object> strings_array =
-      parsed_snapshot->Get(v8::String::New("strings"))->ToObject();
+      parsed_snapshot->Get(v8_str("strings"))->ToObject();
   v8::Local<v8::String> string = strings_array->Get(string_index)->ToString();
   v8::Local<v8::String> ref_string =
       CompileRun(STRING_LITERAL_FOR_TEST)->ToString();
@@ -1038,7 +555,7 @@
   v8::HandleScope scope;
   LocalContext env;
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("abort"));
   TestJSONStream stream(5);
   snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
   CHECK_GT(stream.size(), 0);
@@ -1046,27 +563,12 @@
 }
 
 
-// Must not crash in debug mode.
-TEST(AggregatedHeapSnapshotJSONSerialization) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(
-          v8::String::New("agg"), v8::HeapSnapshot::kAggregated);
-  TestJSONStream stream;
-  snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
-  CHECK_GT(stream.size(), 0);
-  CHECK_EQ(1, stream.eos_signaled());
-}
-
-
 TEST(HeapSnapshotGetNodeById) {
   v8::HandleScope scope;
   LocalContext env;
 
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("id"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("id"));
   const v8::HeapGraphNode* root = snapshot->GetRoot();
   CHECK_EQ(root, snapshot->GetNodeById(root->GetId()));
   for (int i = 0, count = root->GetChildrenCount(); i < count; ++i) {
@@ -1107,7 +609,7 @@
   const int snapshots_count = v8::HeapProfiler::GetSnapshotsCount();
   TestActivityControl aborting_control(3);
   const v8::HeapSnapshot* no_snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"),
+      v8::HeapProfiler::TakeSnapshot(v8_str("abort"),
                                      v8::HeapSnapshot::kFull,
                                      &aborting_control);
   CHECK_EQ(NULL, no_snapshot);
@@ -1116,7 +618,7 @@
 
   TestActivityControl control(-1);  // Don't abort.
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("full"),
+      v8::HeapProfiler::TakeSnapshot(v8_str("full"),
                                      v8::HeapSnapshot::kFull,
                                      &control);
   CHECK_NE(NULL, snapshot);
@@ -1226,7 +728,7 @@
   p_CCC.SetWrapperClassId(2);
   CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("retained"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("retained"));
 
   CHECK_EQ(3, TestRetainedObjectInfo::instances.length());
   for (int i = 0; i < TestRetainedObjectInfo::instances.length(); ++i) {
@@ -1270,12 +772,12 @@
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("1")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("1")));
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("1")));
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("2")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("1")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("2")));
   CHECK_EQ(2, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
@@ -1288,7 +790,7 @@
 
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
   const v8::HeapSnapshot* s1 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("1"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("1"));
   CHECK_NE(NULL, s1);
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid1 = s1->GetUid();
@@ -1298,14 +800,14 @@
   CHECK_EQ(NULL, v8::HeapProfiler::FindSnapshot(uid1));
 
   const v8::HeapSnapshot* s2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("2"));
   CHECK_NE(NULL, s2);
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid2 = s2->GetUid();
   CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
   CHECK_EQ(s2, v8::HeapProfiler::FindSnapshot(uid2));
   const v8::HeapSnapshot* s3 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("3"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("3"));
   CHECK_NE(NULL, s3);
   CHECK_EQ(2, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid3 = s3->GetUid();
@@ -1320,4 +822,182 @@
   CHECK_EQ(NULL, v8::HeapProfiler::FindSnapshot(uid3));
 }
 
-#endif  // ENABLE_LOGGING_AND_PROFILING
+
+TEST(DocumentURL) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("document = { URL:\"abcdefgh\" };");
+
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  CHECK_EQ("Object / abcdefgh",
+           const_cast<i::HeapEntry*>(
+               reinterpret_cast<const i::HeapEntry*>(global))->name());
+}
+
+
+TEST(DocumentWithException) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun(
+      "this.__defineGetter__(\"document\", function() { throw new Error(); })");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  CHECK_EQ("Object",
+           const_cast<i::HeapEntry*>(
+               reinterpret_cast<const i::HeapEntry*>(global))->name());
+}
+
+
+TEST(DocumentURLWithException) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun(
+      "function URLWithException() {}\n"
+      "URLWithException.prototype = { get URL() { throw new Error(); } };\n"
+      "document = { URL: new URLWithException() };");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  CHECK_EQ("Object",
+           const_cast<i::HeapEntry*>(
+               reinterpret_cast<const i::HeapEntry*>(global))->name());
+}
+
+
+TEST(NodesIteration) {
+  v8::HandleScope scope;
+  LocalContext env;
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("iteration"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  // Verify that we can find this object by iteration.
+  const int nodes_count = snapshot->GetNodesCount();
+  int count = 0;
+  for (int i = 0; i < nodes_count; ++i) {
+    if (snapshot->GetNode(i) == global)
+      ++count;
+  }
+  CHECK_EQ(1, count);
+}
+
+
+TEST(GetHeapValue) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("a = { s_prop: \'value\', n_prop: 0.1 };");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("value"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK(global->GetHeapValue()->IsObject());
+  v8::Local<v8::Object> js_global =
+      env->Global()->GetPrototype().As<v8::Object>();
+  CHECK(js_global == global->GetHeapValue());
+  const v8::HeapGraphNode* obj = GetProperty(
+      global, v8::HeapGraphEdge::kShortcut, "a");
+  CHECK(obj->GetHeapValue()->IsObject());
+  v8::Local<v8::Object> js_obj = js_global->Get(v8_str("a")).As<v8::Object>();
+  CHECK(js_obj == obj->GetHeapValue());
+  const v8::HeapGraphNode* s_prop =
+      GetProperty(obj, v8::HeapGraphEdge::kProperty, "s_prop");
+  v8::Local<v8::String> js_s_prop =
+      js_obj->Get(v8_str("s_prop")).As<v8::String>();
+  CHECK(js_s_prop == s_prop->GetHeapValue());
+  const v8::HeapGraphNode* n_prop =
+      GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
+  v8::Local<v8::Number> js_n_prop =
+      js_obj->Get(v8_str("n_prop")).As<v8::Number>();
+  CHECK(js_n_prop == n_prop->GetHeapValue());
+}
+
+
+TEST(GetHeapValueForDeletedObject) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  // It is impossible to delete a global property, so we are about to delete a
+  // property of the "a" object. Also, the "p" object can't be an empty one
+  // because the empty object is static and isn't actually deleted.
+  CompileRun("a = { p: { r: {} } };");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* obj = GetProperty(
+      global, v8::HeapGraphEdge::kShortcut, "a");
+  const v8::HeapGraphNode* prop = GetProperty(
+      obj, v8::HeapGraphEdge::kProperty, "p");
+  {
+    // Perform the check inside a nested local scope to avoid creating a
+    // reference to the object we are deleting.
+    v8::HandleScope scope;
+    CHECK(prop->GetHeapValue()->IsObject());
+  }
+  CompileRun("delete a.p;");
+  CHECK(prop->GetHeapValue()->IsUndefined());
+}
+
+
+static int StringCmp(const char* ref, i::String* act) {
+  i::SmartArrayPointer<char> s_act = act->ToCString();
+  int result = strcmp(ref, *s_act);
+  if (result != 0)
+    fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, *s_act);
+  return result;
+}
+
+
+TEST(GetConstructorName) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun(
+      "function Constructor1() {};\n"
+      "var obj1 = new Constructor1();\n"
+      "var Constructor2 = function() {};\n"
+      "var obj2 = new Constructor2();\n"
+      "var obj3 = {};\n"
+      "obj3.constructor = function Constructor3() {};\n"
+      "var obj4 = {};\n"
+      "// Slow properties\n"
+      "for (var i=0; i<2000; ++i) obj4[\"p\" + i] = i;\n"
+      "obj4.constructor = function Constructor4() {};\n"
+      "var obj5 = {};\n"
+      "var obj6 = {};\n"
+      "obj6.constructor = 6;");
+  v8::Local<v8::Object> js_global =
+      env->Global()->GetPrototype().As<v8::Object>();
+  v8::Local<v8::Object> obj1 = js_global->Get(v8_str("obj1")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj1 = v8::Utils::OpenHandle(*obj1);
+  CHECK_EQ(0, StringCmp(
+      "Constructor1", i::V8HeapExplorer::GetConstructorName(*js_obj1)));
+  v8::Local<v8::Object> obj2 = js_global->Get(v8_str("obj2")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj2 = v8::Utils::OpenHandle(*obj2);
+  CHECK_EQ(0, StringCmp(
+      "Constructor2", i::V8HeapExplorer::GetConstructorName(*js_obj2)));
+  v8::Local<v8::Object> obj3 = js_global->Get(v8_str("obj3")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj3 = v8::Utils::OpenHandle(*obj3);
+  CHECK_EQ(0, StringCmp(
+      "Constructor3", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
+  v8::Local<v8::Object> obj4 = js_global->Get(v8_str("obj4")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj4 = v8::Utils::OpenHandle(*obj4);
+  CHECK_EQ(0, StringCmp(
+      "Constructor4", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
+  v8::Local<v8::Object> obj5 = js_global->Get(v8_str("obj5")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj5 = v8::Utils::OpenHandle(*obj5);
+  CHECK_EQ(0, StringCmp(
+      "Object", i::V8HeapExplorer::GetConstructorName(*js_obj5)));
+  v8::Local<v8::Object> obj6 = js_global->Get(v8_str("obj6")).As<v8::Object>();
+  i::Handle<i::JSObject> js_obj6 = v8::Utils::OpenHandle(*obj6);
+  CHECK_EQ(0, StringCmp(
+      "Object", i::V8HeapExplorer::GetConstructorName(*js_obj6)));
+}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index d25f39f..11b8813 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -675,7 +675,7 @@
   CHECK(array->HasFastElements());  // Must be in fast mode.
 
   // array[length] = name.
-  ok = array->SetElement(0, *name, kNonStrictMode)->ToObjectChecked();
+  ok = array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(1), array->length());
   CHECK_EQ(array->GetElement(0), *name);
 
@@ -690,7 +690,8 @@
   CHECK(array->HasDictionaryElements());  // Must be in slow mode.
 
   // array[length] = name.
-  ok = array->SetElement(int_length, *name, kNonStrictMode)->ToObjectChecked();
+  ok = array->SetElement(
+      int_length, *name, kNonStrictMode, true)->ToObjectChecked();
   uint32_t new_int_length = 0;
   CHECK(array->length()->ToArrayIndex(&new_int_length));
   CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -717,9 +718,10 @@
   obj->SetProperty(
       *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
 
-  Object* ok = obj->SetElement(0, *first, kNonStrictMode)->ToObjectChecked();
+  Object* ok =
+      obj->SetElement(0, *first, kNonStrictMode, true)->ToObjectChecked();
 
-  ok = obj->SetElement(1, *second, kNonStrictMode)->ToObjectChecked();
+  ok = obj->SetElement(1, *second, kNonStrictMode, true)->ToObjectChecked();
 
   // Make the clone.
   Handle<JSObject> clone = Copy(obj);
@@ -737,8 +739,8 @@
   clone->SetProperty(
       *second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
 
-  ok = clone->SetElement(0, *second, kNonStrictMode)->ToObjectChecked();
-  ok = clone->SetElement(1, *first, kNonStrictMode)->ToObjectChecked();
+  ok = clone->SetElement(0, *second, kNonStrictMode, true)->ToObjectChecked();
+  ok = clone->SetElement(1, *first, kNonStrictMode, true)->ToObjectChecked();
 
   CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
   CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
diff --git a/test/cctest/test-list.cc b/test/cctest/test-list.cc
index e20ee8a..7520b05 100644
--- a/test/cctest/test-list.cc
+++ b/test/cctest/test-list.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -138,3 +138,14 @@
   list.Clear();
   CHECK_EQ(0, list.length());
 }
+
+
+TEST(DeleteEmpty) {
+  {
+    List<int>* list = new List<int>(0);
+    delete list;
+  }
+  {
+    List<int> list(0);
+  }
+}
diff --git a/test/cctest/test-liveedit.cc b/test/cctest/test-liveedit.cc
index 9232354..2498fca 100644
--- a/test/cctest/test-liveedit.cc
+++ b/test/cctest/test-liveedit.cc
@@ -44,13 +44,13 @@
  public:
   StringCompareInput(const char* s1, const char* s2) : s1_(s1), s2_(s2) {
   }
-  int getLength1() {
+  int GetLength1() {
     return StrLength(s1_);
   }
-  int getLength2() {
+  int GetLength2() {
     return StrLength(s2_);
   }
-  bool equals(int index1, int index2) {
+  bool Equals(int index1, int index2) {
     return s1_[index1] == s2_[index2];
   }
 
@@ -95,7 +95,7 @@
                           int expected_diff_parameter = -1) {
   StringCompareInput input(s1, s2);
 
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
 
   DiffChunkStruct* first_chunk;
   ListDiffOutputWriter writer(&first_chunk);
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
new file mode 100644
index 0000000..7360da5
--- /dev/null
+++ b/test/cctest/test-lockers.cc
@@ -0,0 +1,641 @@
+// Copyright 2007-2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::AccessorInfo;
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::String;
+using ::v8::Value;
+using ::v8::V8;
+
+
+// Migrating an isolate
+class KangarooThread : public v8::internal::Thread {
+ public:
+  KangarooThread(v8::Isolate* isolate,
+                 v8::Handle<v8::Context> context, int value)
+      : Thread("KangarooThread"),
+        isolate_(isolate), context_(context), value_(value) {
+  }
+
+  void Run() {
+    {
+      v8::Locker locker(isolate_);
+      v8::Isolate::Scope isolate_scope(isolate_);
+      CHECK_EQ(isolate_, v8::internal::Isolate::Current());
+      v8::HandleScope scope;
+      v8::Context::Scope context_scope(context_);
+      Local<Value> v = CompileRun("getValue()");
+      CHECK(v->IsNumber());
+      CHECK_EQ(30, static_cast<int>(v->NumberValue()));
+    }
+    {
+      v8::Locker locker(isolate_);
+      v8::Isolate::Scope isolate_scope(isolate_);
+      v8::Context::Scope context_scope(context_);
+      v8::HandleScope scope;
+      Local<Value> v = CompileRun("getValue()");
+      CHECK(v->IsNumber());
+      CHECK_EQ(30, static_cast<int>(v->NumberValue()));
+    }
+    isolate_->Dispose();
+  }
+
+ private:
+  v8::Isolate* isolate_;
+  Persistent<v8::Context> context_;
+  int value_;
+};
+
+// Migrates an isolate from one thread to another
+TEST(KangarooIsolates) {
+  v8::Isolate* isolate = v8::Isolate::New();
+  Persistent<v8::Context> context;
+  {
+    v8::Locker locker(isolate);
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope;
+    context = v8::Context::New();
+    v8::Context::Scope context_scope(context);
+    CHECK_EQ(isolate, v8::internal::Isolate::Current());
+    CompileRun("function getValue() { return 30; }");
+  }
+  KangarooThread thread1(isolate, context, 1);
+  thread1.Start();
+  thread1.Join();
+}
+
+static void CalcFibAndCheck() {
+  Local<Value> v = CompileRun("function fib(n) {"
+                              "  if (n <= 2) return 1;"
+                              "  return fib(n-1) + fib(n-2);"
+                              "}"
+                              "fib(10)");
+  CHECK(v->IsNumber());
+  CHECK_EQ(55, static_cast<int>(v->NumberValue()));
+}
+
+class JoinableThread {
+ public:
+  explicit JoinableThread(const char* name)
+    : name_(name),
+      semaphore_(i::OS::CreateSemaphore(0)),
+      thread_(this) {
+  }
+
+  virtual ~JoinableThread() {
+    delete semaphore_;
+  }
+
+  void Start() {
+    thread_.Start();
+  }
+
+  void Join() {
+    semaphore_->Wait();
+  }
+
+  virtual void Run() = 0;
+
+ private:
+  class ThreadWithSemaphore : public i::Thread {
+   public:
+    explicit ThreadWithSemaphore(JoinableThread* joinable_thread)
+      : Thread(joinable_thread->name_),
+        joinable_thread_(joinable_thread) {
+    }
+
+    virtual void Run() {
+      joinable_thread_->Run();
+      joinable_thread_->semaphore_->Signal();
+    }
+
+   private:
+    JoinableThread* joinable_thread_;
+  };
+
+  const char* name_;
+  i::Semaphore* semaphore_;
+  ThreadWithSemaphore thread_;
+
+  friend class ThreadWithSemaphore;
+
+  DISALLOW_COPY_AND_ASSIGN(JoinableThread);
+};
+
+
+class IsolateLockingThreadWithLocalContext : public JoinableThread {
+ public:
+  explicit IsolateLockingThreadWithLocalContext(v8::Isolate* isolate)
+    : JoinableThread("IsolateLockingThread"),
+      isolate_(isolate) {
+  }
+
+  virtual void Run() {
+    v8::Locker locker(isolate_);
+    v8::Isolate::Scope isolate_scope(isolate_);
+    v8::HandleScope handle_scope;
+    LocalContext local_context;
+    CHECK_EQ(isolate_, v8::internal::Isolate::Current());
+    CalcFibAndCheck();
+  }
+ private:
+  v8::Isolate* isolate_;
+};
+
+static void StartJoinAndDeleteThreads(const i::List<JoinableThread*>& threads) {
+  for (int i = 0; i < threads.length(); i++) {
+    threads[i]->Start();
+  }
+  for (int i = 0; i < threads.length(); i++) {
+    threads[i]->Join();
+  }
+  for (int i = 0; i < threads.length(); i++) {
+    delete threads[i];
+  }
+}
+
+
+// Run many threads all locking on the same isolate
+TEST(IsolateLockingStress) {
+  const int kNThreads = 100;
+  i::List<JoinableThread*> threads(kNThreads);
+  v8::Isolate* isolate = v8::Isolate::New();
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new IsolateLockingThreadWithLocalContext(isolate));
+  }
+  StartJoinAndDeleteThreads(threads);
+  isolate->Dispose();
+}
+
+class IsolateNonlockingThread : public JoinableThread {
+ public:
+  explicit IsolateNonlockingThread()
+    : JoinableThread("IsolateNonlockingThread") {
+  }
+
+  virtual void Run() {
+    v8::Isolate* isolate = v8::Isolate::New();
+    {
+      v8::Isolate::Scope isolate_scope(isolate);
+      v8::HandleScope handle_scope;
+      v8::Handle<v8::Context> context = v8::Context::New();
+      v8::Context::Scope context_scope(context);
+      CHECK_EQ(isolate, v8::internal::Isolate::Current());
+      CalcFibAndCheck();
+    }
+    isolate->Dispose();
+  }
+ private:
+};
+
+// Run many threads each accessing its own isolate without locking
+TEST(MultithreadedParallelIsolates) {
+#ifdef V8_TARGET_ARCH_ARM
+  const int kNThreads = 10;
+#else
+  const int kNThreads = 50;
+#endif
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new IsolateNonlockingThread());
+  }
+  StartJoinAndDeleteThreads(threads);
+}
+
+
+class IsolateNestedLockingThread : public JoinableThread {
+ public:
+  explicit IsolateNestedLockingThread(v8::Isolate* isolate)
+    : JoinableThread("IsolateNestedLocking"), isolate_(isolate) {
+  }
+  virtual void Run() {
+    v8::Locker lock(isolate_);
+    v8::Isolate::Scope isolate_scope(isolate_);
+    v8::HandleScope handle_scope;
+    LocalContext local_context;
+    {
+      v8::Locker another_lock(isolate_);
+      CalcFibAndCheck();
+    }
+    {
+      v8::Locker another_lock(isolate_);
+      CalcFibAndCheck();
+    }
+  }
+ private:
+  v8::Isolate* isolate_;
+};
+
+// Run  many threads with nested locks
+TEST(IsolateNestedLocking) {
+  const int kNThreads = 100;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new IsolateNestedLockingThread(isolate));
+  }
+  StartJoinAndDeleteThreads(threads);
+}
+
+
+class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
+ public:
+  SeparateIsolatesLocksNonexclusiveThread(v8::Isolate* isolate1,
+                                          v8::Isolate* isolate2)
+    : JoinableThread("SeparateIsolatesLocksNonexclusiveThread"),
+      isolate1_(isolate1), isolate2_(isolate2) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock(isolate1_);
+    v8::Isolate::Scope isolate_scope(isolate1_);
+    v8::HandleScope handle_scope;
+    LocalContext local_context;
+
+    IsolateLockingThreadWithLocalContext threadB(isolate2_);
+    threadB.Start();
+    CalcFibAndCheck();
+    threadB.Join();
+  }
+ private:
+  v8::Isolate* isolate1_;
+  v8::Isolate* isolate2_;
+};
+
+// Run parallel threads that lock and access different isolates in parallel
+TEST(SeparateIsolatesLocksNonexclusive) {
+#ifdef V8_TARGET_ARCH_ARM
+  const int kNThreads = 50;
+#else
+  const int kNThreads = 100;
+#endif
+  v8::Isolate* isolate1 = v8::Isolate::New();
+  v8::Isolate* isolate2 = v8::Isolate::New();
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new SeparateIsolatesLocksNonexclusiveThread(isolate1,
+                                                             isolate2));
+  }
+  StartJoinAndDeleteThreads(threads);
+  isolate2->Dispose();
+  isolate1->Dispose();
+}
+
+class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
+ public:
+  explicit LockIsolateAndCalculateFibSharedContextThread(
+      v8::Isolate* isolate, v8::Handle<v8::Context> context)
+    : JoinableThread("LockIsolateAndCalculateFibThread"),
+      isolate_(isolate),
+      context_(context) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock(isolate_);
+    v8::Isolate::Scope isolate_scope(isolate_);
+    HandleScope handle_scope;
+    v8::Context::Scope context_scope(context_);
+    CalcFibAndCheck();
+  }
+ private:
+  v8::Isolate* isolate_;
+  Persistent<v8::Context> context_;
+};
+
+class LockerUnlockerThread : public JoinableThread {
+ public:
+  explicit LockerUnlockerThread(v8::Isolate* isolate)
+    : JoinableThread("LockerUnlockerThread"),
+      isolate_(isolate) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock(isolate_);
+    v8::Isolate::Scope isolate_scope(isolate_);
+    v8::HandleScope handle_scope;
+    v8::Handle<v8::Context> context = v8::Context::New();
+    {
+      v8::Context::Scope context_scope(context);
+      CalcFibAndCheck();
+    }
+    {
+      isolate_->Exit();
+      v8::Unlocker unlocker(isolate_);
+      LockIsolateAndCalculateFibSharedContextThread thread(isolate_, context);
+      thread.Start();
+      thread.Join();
+    }
+    isolate_->Enter();
+    {
+      v8::Context::Scope context_scope(context);
+      CalcFibAndCheck();
+    }
+  }
+
+ private:
+  v8::Isolate* isolate_;
+};
+
+// Use unlocker inside of a Locker, multiple threads.
+TEST(LockerUnlocker) {
+#ifdef V8_TARGET_ARCH_ARM
+  const int kNThreads = 50;
+#else
+  const int kNThreads = 100;
+#endif
+  i::List<JoinableThread*> threads(kNThreads);
+  v8::Isolate* isolate = v8::Isolate::New();
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new LockerUnlockerThread(isolate));
+  }
+  StartJoinAndDeleteThreads(threads);
+  isolate->Dispose();
+}
+
+class LockTwiceAndUnlockThread : public JoinableThread {
+ public:
+  explicit LockTwiceAndUnlockThread(v8::Isolate* isolate)
+    : JoinableThread("LockTwiceAndUnlockThread"),
+      isolate_(isolate) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock(isolate_);
+    v8::Isolate::Scope isolate_scope(isolate_);
+    v8::HandleScope handle_scope;
+    v8::Handle<v8::Context> context = v8::Context::New();
+    {
+      v8::Context::Scope context_scope(context);
+      CalcFibAndCheck();
+    }
+    {
+      v8::Locker second_lock(isolate_);
+      {
+        isolate_->Exit();
+        v8::Unlocker unlocker(isolate_);
+        LockIsolateAndCalculateFibSharedContextThread thread(isolate_, context);
+        thread.Start();
+        thread.Join();
+      }
+    }
+    isolate_->Enter();
+    {
+      v8::Context::Scope context_scope(context);
+      CalcFibAndCheck();
+    }
+  }
+
+ private:
+  v8::Isolate* isolate_;
+};
+
+// Use Unlocker inside two Lockers.
+TEST(LockTwiceAndUnlock) {
+#ifdef V8_TARGET_ARCH_ARM
+  const int kNThreads = 50;
+#else
+  const int kNThreads = 100;
+#endif
+  i::List<JoinableThread*> threads(kNThreads);
+  v8::Isolate* isolate = v8::Isolate::New();
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new LockTwiceAndUnlockThread(isolate));
+  }
+  StartJoinAndDeleteThreads(threads);
+  isolate->Dispose();
+}
+
+class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
+ public:
+  LockAndUnlockDifferentIsolatesThread(v8::Isolate* isolate1,
+                                       v8::Isolate* isolate2)
+    : JoinableThread("LockAndUnlockDifferentIsolatesThread"),
+      isolate1_(isolate1),
+      isolate2_(isolate2) {
+  }
+
+  virtual void Run() {
+    Persistent<v8::Context> context1;
+    Persistent<v8::Context> context2;
+    v8::Locker lock1(isolate1_);
+    CHECK(v8::Locker::IsLocked(isolate1_));
+    CHECK(!v8::Locker::IsLocked(isolate2_));
+    {
+      v8::Isolate::Scope isolate_scope(isolate1_);
+      v8::HandleScope handle_scope;
+      context1 = v8::Context::New();
+      {
+        v8::Context::Scope context_scope(context1);
+        CalcFibAndCheck();
+      }
+    }
+    v8::Locker lock2(isolate2_);
+    CHECK(v8::Locker::IsLocked(isolate1_));
+    CHECK(v8::Locker::IsLocked(isolate2_));
+    {
+      v8::Isolate::Scope isolate_scope(isolate2_);
+      v8::HandleScope handle_scope;
+      context2 = v8::Context::New();
+      {
+        v8::Context::Scope context_scope(context2);
+        CalcFibAndCheck();
+      }
+    }
+    {
+      v8::Unlocker unlock1(isolate1_);
+      CHECK(!v8::Locker::IsLocked(isolate1_));
+      CHECK(v8::Locker::IsLocked(isolate2_));
+      v8::Isolate::Scope isolate_scope(isolate2_);
+      v8::HandleScope handle_scope;
+      v8::Context::Scope context_scope(context2);
+      LockIsolateAndCalculateFibSharedContextThread thread(isolate1_, context1);
+      thread.Start();
+      CalcFibAndCheck();
+      thread.Join();
+    }
+  }
+
+ private:
+  v8::Isolate* isolate1_;
+  v8::Isolate* isolate2_;
+};
+
+// Lock two isolates and unlock one of them.
+TEST(LockAndUnlockDifferentIsolates) {
+  v8::Isolate* isolate1 = v8::Isolate::New();
+  v8::Isolate* isolate2 = v8::Isolate::New();
+  LockAndUnlockDifferentIsolatesThread thread(isolate1, isolate2);
+  thread.Start();
+  thread.Join();
+  isolate2->Dispose();
+  isolate1->Dispose();
+}
+
+class LockUnlockLockThread : public JoinableThread {
+ public:
+  LockUnlockLockThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
+    : JoinableThread("LockUnlockLockThread"),
+      isolate_(isolate),
+      context_(context) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock1(isolate_);
+    CHECK(v8::Locker::IsLocked(isolate_));
+    CHECK(!v8::Locker::IsLocked());
+    {
+      v8::Isolate::Scope isolate_scope(isolate_);
+      v8::HandleScope handle_scope;
+      v8::Context::Scope context_scope(context_);
+      CalcFibAndCheck();
+    }
+    {
+      v8::Unlocker unlock1(isolate_);
+      CHECK(!v8::Locker::IsLocked(isolate_));
+      CHECK(!v8::Locker::IsLocked());
+      {
+        v8::Locker lock2(isolate_);
+        v8::Isolate::Scope isolate_scope(isolate_);
+        v8::HandleScope handle_scope;
+        CHECK(v8::Locker::IsLocked(isolate_));
+        CHECK(!v8::Locker::IsLocked());
+        v8::Context::Scope context_scope(context_);
+        CalcFibAndCheck();
+      }
+    }
+  }
+
+ private:
+  v8::Isolate* isolate_;
+  v8::Persistent<v8::Context> context_;
+};
+
+// Locker inside an Unlocker inside a Locker.
+TEST(LockUnlockLockMultithreaded) {
+  const int kNThreads = 100;
+  v8::Isolate* isolate = v8::Isolate::New();
+  Persistent<v8::Context> context;
+  {
+    v8::Locker locker_(isolate);
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope;
+    context = v8::Context::New();
+  }
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new LockUnlockLockThread(isolate, context));
+  }
+  StartJoinAndDeleteThreads(threads);
+}
+
+class LockUnlockLockDefaultIsolateThread : public JoinableThread {
+ public:
+  explicit LockUnlockLockDefaultIsolateThread(v8::Handle<v8::Context> context)
+    : JoinableThread("LockUnlockLockDefaultIsolateThread"),
+      context_(context) {
+  }
+
+  virtual void Run() {
+    v8::Locker lock1;
+    {
+      v8::HandleScope handle_scope;
+      v8::Context::Scope context_scope(context_);
+      CalcFibAndCheck();
+    }
+    {
+      v8::Unlocker unlock1;
+      {
+        v8::Locker lock2;
+        v8::HandleScope handle_scope;
+        v8::Context::Scope context_scope(context_);
+        CalcFibAndCheck();
+      }
+    }
+  }
+
+ private:
+  v8::Persistent<v8::Context> context_;
+};
+
+// Locker inside an Unlocker inside a Locker for default isolate.
+TEST(LockUnlockLockDefaultIsolateMultithreaded) {
+  const int kNThreads = 100;
+  Persistent<v8::Context> context;
+  {
+    v8::Locker locker_;
+    v8::HandleScope handle_scope;
+    context = v8::Context::New();
+  }
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new LockUnlockLockDefaultIsolateThread(context));
+  }
+  StartJoinAndDeleteThreads(threads);
+}
+
+
+TEST(Regress1433) {
+  for (int i = 0; i < 10; i++) {
+    v8::Isolate* isolate = v8::Isolate::New();
+    {
+      v8::Locker lock(isolate);
+      v8::Isolate::Scope isolate_scope(isolate);
+      v8::HandleScope handle_scope;
+      v8::Persistent<Context> context = v8::Context::New();
+      v8::Context::Scope context_scope(context);
+      v8::Handle<String> source = v8::String::New("1+1");
+      v8::Handle<Script> script = v8::Script::Compile(source);
+      v8::Handle<Value> result = script->Run();
+      v8::String::AsciiValue ascii(result);
+      context.Dispose();
+    }
+    isolate->Dispose();
+  }
+}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index b967c73..f536e6b 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -27,8 +27,6 @@
 //
 // Tests of profiler-related functions from log.h
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include <stdlib.h>
 
 #include "v8.h"
@@ -56,8 +54,6 @@
 using v8::internal::StackTracer;
 using v8::internal::TickSample;
 
-namespace i = v8::internal;
-
 
 static v8::Persistent<v8::Context> env;
 
@@ -413,5 +409,3 @@
   CompileRun("js_entry_sp_level2();");
   CHECK_EQ(0, GetJsEntrySp());
 }
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-log-utils.cc b/test/cctest/test-log-utils.cc
deleted file mode 100644
index 861be12..0000000
--- a/test/cctest/test-log-utils.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-//
-// Tests of logging utilities from log-utils.h
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#include "v8.h"
-
-#include "log-utils.h"
-#include "cctest.h"
-
-using v8::internal::CStrVector;
-using v8::internal::EmbeddedVector;
-using v8::internal::LogDynamicBuffer;
-using v8::internal::MutableCStrVector;
-using v8::internal::ScopedVector;
-using v8::internal::Vector;
-using v8::internal::StrLength;
-
-// Fills 'ref_buffer' with test data: a sequence of two-digit
-// hex numbers: '0001020304...'. Then writes 'ref_buffer' contents to 'dynabuf'.
-static void WriteData(LogDynamicBuffer* dynabuf, Vector<char>* ref_buffer) {
-  static const char kHex[] = "0123456789ABCDEF";
-  CHECK_GT(ref_buffer->length(), 0);
-  CHECK_GT(513, ref_buffer->length());
-  for (int i = 0, half_len = ref_buffer->length() >> 1; i < half_len; ++i) {
-    (*ref_buffer)[i << 1] = kHex[i >> 4];
-    (*ref_buffer)[(i << 1) + 1] = kHex[i & 15];
-  }
-  if (ref_buffer->length() & 1) {
-    ref_buffer->last() = kHex[ref_buffer->length() >> 5];
-  }
-  CHECK_EQ(ref_buffer->length(),
-           dynabuf->Write(ref_buffer->start(), ref_buffer->length()));
-}
-
-
-static int ReadData(
-    LogDynamicBuffer* dynabuf, int start_pos, i::Vector<char>* buffer) {
-  return dynabuf->Read(start_pos, buffer->start(), buffer->length());
-}
-
-
-// Helper function used by CHECK_EQ to compare Vectors. Templatized to
-// accept both "char" and "const char" vector contents.
-template <typename E, typename V>
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     const Vector<E>& expected,
-                                     const char* value_source,
-                                     const Vector<V>& value) {
-  if (expected.length() != value.length()) {
-    V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
-             "#   Vectors lengths differ: %d expected, %d found\n"
-             "#   Expected: %.*s\n"
-             "#   Found: %.*s",
-             expected_source, value_source,
-             expected.length(), value.length(),
-             expected.length(), expected.start(),
-             value.length(), value.start());
-  }
-  if (strncmp(expected.start(), value.start(), expected.length()) != 0) {
-    V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
-             "#   Vectors contents differ:\n"
-             "#   Expected: %.*s\n"
-             "#   Found: %.*s",
-             expected_source, value_source,
-             expected.length(), expected.start(),
-             value.length(), value.start());
-  }
-}
-
-
-TEST(DynaBufSingleBlock) {
-  LogDynamicBuffer dynabuf(32, 32, "", 0);
-  EmbeddedVector<char, 32> ref_buf;
-  WriteData(&dynabuf, &ref_buf);
-  EmbeddedVector<char, 32> buf;
-  CHECK_EQ(32, dynabuf.Read(0, buf.start(), buf.length()));
-  CHECK_EQ(32, ReadData(&dynabuf, 0, &buf));
-  CHECK_EQ(ref_buf, buf);
-
-  // Verify that we can't read and write past the end.
-  CHECK_EQ(0, dynabuf.Read(32, buf.start(), buf.length()));
-  CHECK_EQ(0, dynabuf.Write(buf.start(), buf.length()));
-}
-
-
-TEST(DynaBufCrossBlocks) {
-  LogDynamicBuffer dynabuf(32, 128, "", 0);
-  EmbeddedVector<char, 48> ref_buf;
-  WriteData(&dynabuf, &ref_buf);
-  CHECK_EQ(48, dynabuf.Write(ref_buf.start(), ref_buf.length()));
-  // Verify that we can't write data when remaining buffer space isn't enough.
-  CHECK_EQ(0, dynabuf.Write(ref_buf.start(), ref_buf.length()));
-  EmbeddedVector<char, 48> buf;
-  CHECK_EQ(48, ReadData(&dynabuf, 0, &buf));
-  CHECK_EQ(ref_buf, buf);
-  CHECK_EQ(48, ReadData(&dynabuf, 48, &buf));
-  CHECK_EQ(ref_buf, buf);
-  CHECK_EQ(0, ReadData(&dynabuf, 48 * 2, &buf));
-}
-
-
-TEST(DynaBufReadTruncation) {
-  LogDynamicBuffer dynabuf(32, 128, "", 0);
-  EmbeddedVector<char, 128> ref_buf;
-  WriteData(&dynabuf, &ref_buf);
-  EmbeddedVector<char, 128> buf;
-  CHECK_EQ(128, ReadData(&dynabuf, 0, &buf));
-  CHECK_EQ(ref_buf, buf);
-  // Try to read near the end with a buffer larger than remaining data size.
-  EmbeddedVector<char, 48> tail_buf;
-  CHECK_EQ(32, ReadData(&dynabuf, 128 - 32, &tail_buf));
-  CHECK_EQ(ref_buf.SubVector(128 - 32, 128), tail_buf.SubVector(0, 32));
-}
-
-
-TEST(DynaBufSealing) {
-  const char* seal = "Sealed";
-  const int seal_size = StrLength(seal);
-  LogDynamicBuffer dynabuf(32, 128, seal, seal_size);
-  EmbeddedVector<char, 100> ref_buf;
-  WriteData(&dynabuf, &ref_buf);
-  // Try to write data that will not fit in the buffer.
-  CHECK_EQ(0, dynabuf.Write(ref_buf.start(), 128 - 100 - seal_size + 1));
-  // Now the buffer is sealed, writing of any amount of data is forbidden.
-  CHECK_EQ(0, dynabuf.Write(ref_buf.start(), 1));
-  EmbeddedVector<char, 100> buf;
-  CHECK_EQ(100, ReadData(&dynabuf, 0, &buf));
-  CHECK_EQ(ref_buf, buf);
-  // Check the seal.
-  EmbeddedVector<char, 50> seal_buf;
-  CHECK_EQ(seal_size, ReadData(&dynabuf, 100, &seal_buf));
-  CHECK_EQ(CStrVector(seal), seal_buf.SubVector(0, seal_size));
-  // Verify that there's no data beyond the seal.
-  CHECK_EQ(0, ReadData(&dynabuf, 100 + seal_size, &buf));
-}
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index b43e0cd..72e663c 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -2,8 +2,6 @@
 //
 // Tests of logging functions from log.h
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #ifdef __linux__
 #include <math.h>
 #include <pthread.h>
@@ -14,7 +12,9 @@
 #include "v8.h"
 #include "log.h"
 #include "cpu-profiler.h"
+#include "natives.h"
 #include "v8threads.h"
+#include "v8utils.h"
 #include "cctest.h"
 #include "vm-state-inl.h"
 
@@ -23,273 +23,76 @@
 using v8::internal::Logger;
 using v8::internal::StrLength;
 
-namespace i = v8::internal;
-
-static void SetUp() {
-  // Log to memory buffer.
-  i::FLAG_logfile = "*";
-  i::FLAG_log = true;
-  ISOLATE->InitializeLoggingAndCounters();
-  LOGGER->Setup();
-}
-
-static void TearDown() {
-  LOGGER->TearDown();
-}
-
-
-TEST(EmptyLog) {
-  SetUp();
-  CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
-  CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 0));
-  CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 100));
-  CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 100));
-  TearDown();
-}
-
-
-TEST(GetMessages) {
-  SetUp();
-  LOGGER->StringEvent("aaa", "bbb");
-  LOGGER->StringEvent("cccc", "dddd");
-  CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
-  char log_lines[100];
-  memset(log_lines, 0, sizeof(log_lines));
-  // See Logger::StringEvent.
-  const char* line_1 = "aaa,\"bbb\"\n";
-  const int line_1_len = StrLength(line_1);
-  // The exact size.
-  CHECK_EQ(line_1_len, LOGGER->GetLogLines(0, log_lines, line_1_len));
-  CHECK_EQ(line_1, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  // A bit more than the first line length.
-  CHECK_EQ(line_1_len, LOGGER->GetLogLines(0, log_lines, line_1_len + 3));
-  log_lines[line_1_len] = '\0';
-  CHECK_EQ(line_1, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  const char* line_2 = "cccc,\"dddd\"\n";
-  const int line_2_len = StrLength(line_2);
-  // Now start with line_2 beginning.
-  CHECK_EQ(0, LOGGER->GetLogLines(line_1_len, log_lines, 0));
-  CHECK_EQ(line_2_len, LOGGER->GetLogLines(line_1_len, log_lines, line_2_len));
-  CHECK_EQ(line_2, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  CHECK_EQ(line_2_len,
-           LOGGER->GetLogLines(line_1_len, log_lines, line_2_len + 3));
-  CHECK_EQ(line_2, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  // Now get entire buffer contents.
-  const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
-  const int all_lines_len = StrLength(all_lines);
-  CHECK_EQ(all_lines_len, LOGGER->GetLogLines(0, log_lines, all_lines_len));
-  CHECK_EQ(all_lines, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  CHECK_EQ(all_lines_len, LOGGER->GetLogLines(0, log_lines, all_lines_len + 3));
-  CHECK_EQ(all_lines, log_lines);
-  memset(log_lines, 0, sizeof(log_lines));
-  TearDown();
-}
-
-
-static int GetLogLines(int start_pos, i::Vector<char>* buffer) {
-  return LOGGER->GetLogLines(start_pos, buffer->start(), buffer->length());
-}
-
-
-TEST(BeyondWritePosition) {
-  SetUp();
-  LOGGER->StringEvent("aaa", "bbb");
-  LOGGER->StringEvent("cccc", "dddd");
-  // See Logger::StringEvent.
-  const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
-  const int all_lines_len = StrLength(all_lines);
-  EmbeddedVector<char, 100> buffer;
-  const int beyond_write_pos = all_lines_len;
-  CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos, buffer.start(), 1));
-  CHECK_EQ(0, GetLogLines(beyond_write_pos, &buffer));
-  CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos + 1, buffer.start(), 1));
-  CHECK_EQ(0, GetLogLines(beyond_write_pos + 1, &buffer));
-  CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos + 100, buffer.start(), 1));
-  CHECK_EQ(0, GetLogLines(beyond_write_pos + 100, &buffer));
-  CHECK_EQ(0, LOGGER->GetLogLines(10 * 1024 * 1024, buffer.start(), 1));
-  CHECK_EQ(0, GetLogLines(10 * 1024 * 1024, &buffer));
-  TearDown();
-}
-
-
-TEST(MemoryLoggingTurnedOff) {
-  // Log to stdout
-  i::FLAG_logfile = "-";
-  i::FLAG_log = true;
-  ISOLATE->InitializeLoggingAndCounters();
-  LOGGER->Setup();
-  CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
-  CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 0));
-  CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 100));
-  CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 100));
-  LOGGER->TearDown();
-}
-
-
-static void CompileAndRunScript(const char *src) {
-  v8::Script::Compile(v8::String::New(src))->Run();
-}
-
-
-namespace v8 {
-namespace internal {
-
-class LoggerTestHelper : public AllStatic {
- public:
-  static bool IsSamplerActive() { return LOGGER->IsProfilerSamplerActive(); }
-  static void ResetSamplesTaken() {
-    reinterpret_cast<Sampler*>(LOGGER->ticker_)->ResetSamplesTaken();
-  }
-  static bool has_samples_taken() {
-    return reinterpret_cast<Sampler*>(LOGGER->ticker_)->samples_taken() > 0;
-  }
-};
-
-}  // namespace v8::internal
-}  // namespace v8
-
-using v8::internal::LoggerTestHelper;
-
-
 namespace {
 
+
 class ScopedLoggerInitializer {
  public:
   explicit ScopedLoggerInitializer(bool prof_lazy)
-      : saved_prof_lazy_(i::FLAG_prof_lazy),
+      : saved_log_(i::FLAG_log),
+        saved_prof_lazy_(i::FLAG_prof_lazy),
         saved_prof_(i::FLAG_prof),
         saved_prof_auto_(i::FLAG_prof_auto),
+        temp_file_(NULL),
+        // Need to run this prior to creating the scope.
         trick_to_run_init_flags_(init_flags_(prof_lazy)),
-        need_to_set_up_logger_(i::V8::IsRunning()),
         scope_(),
         env_(v8::Context::New()) {
-    if (need_to_set_up_logger_) LOGGER->Setup();
     env_->Enter();
   }
 
   ~ScopedLoggerInitializer() {
     env_->Exit();
     LOGGER->TearDown();
+    if (temp_file_ != NULL) fclose(temp_file_);
     i::FLAG_prof_lazy = saved_prof_lazy_;
     i::FLAG_prof = saved_prof_;
     i::FLAG_prof_auto = saved_prof_auto_;
+    i::FLAG_log = saved_log_;
   }
 
   v8::Handle<v8::Context>& env() { return env_; }
 
+  FILE* StopLoggingGetTempFile() {
+    temp_file_ = LOGGER->TearDown();
+    CHECK_NE(NULL, temp_file_);
+    fflush(temp_file_);
+    rewind(temp_file_);
+    return temp_file_;
+  }
+
  private:
   static bool init_flags_(bool prof_lazy) {
+    i::FLAG_log = true;
     i::FLAG_prof = true;
     i::FLAG_prof_lazy = prof_lazy;
     i::FLAG_prof_auto = false;
-    i::FLAG_logfile = "*";
+    i::FLAG_logfile = i::Log::kLogToTemporaryFile;
     return prof_lazy;
   }
 
+  const bool saved_log_;
   const bool saved_prof_lazy_;
   const bool saved_prof_;
   const bool saved_prof_auto_;
+  FILE* temp_file_;
   const bool trick_to_run_init_flags_;
-  const bool need_to_set_up_logger_;
   v8::HandleScope scope_;
   v8::Handle<v8::Context> env_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
 };
 
-
-class LogBufferMatcher {
- public:
-  LogBufferMatcher() {
-    // Skip all initially logged stuff.
-    log_pos_ = GetLogLines(0, &buffer_);
-  }
-
-  int log_pos() { return log_pos_; }
-
-  int GetNextChunk() {
-    int chunk_size = GetLogLines(log_pos_, &buffer_);
-    CHECK_GT(buffer_.length(), chunk_size);
-    buffer_[chunk_size] = '\0';
-    log_pos_ += chunk_size;
-    return chunk_size;
-  }
-
-  const char* Find(const char* substr) {
-    return strstr(buffer_.start(), substr);
-  }
-
-  const char* Find(const i::Vector<char>& substr) {
-    return Find(substr.start());
-  }
-
-  bool IsInSequence(const char* s1, const char* s2) {
-    const char* s1_pos = Find(s1);
-    const char* s2_pos = Find(s2);
-    CHECK_NE(NULL, s1_pos);
-    CHECK_NE(NULL, s2_pos);
-    return s1_pos < s2_pos;
-  }
-
-  void PrintBuffer() {
-    puts(buffer_.start());
-  }
-
- private:
-  EmbeddedVector<char, 102400> buffer_;
-  int log_pos_;
-};
-
 }  // namespace
 
 
-static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
-  CHECK(i::RuntimeProfiler::IsEnabled() ||
-        !LoggerTestHelper::IsSamplerActive());
-  LoggerTestHelper::ResetSamplesTaken();
-
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK(LoggerTestHelper::IsSamplerActive());
-
-  // Verify that the current map of compiled functions has been logged.
-  CHECK_GT(matcher->GetNextChunk(), 0);
-  const char* code_creation = "\ncode-creation,";  // eq. to /^code-creation,/
-  CHECK_NE(NULL, matcher->Find(code_creation));
-
-  // Force compiler to generate new code by parametrizing source.
-  EmbeddedVector<char, 100> script_src;
-  i::OS::SNPrintF(script_src,
-                  "function f%d(x) { return %d * x; }"
-                  "for (var i = 0; i < 10000; ++i) { f%d(i); }",
-                  matcher->log_pos(), matcher->log_pos(), matcher->log_pos());
-  // Run code for 200 msecs to get some ticks.
-  const double end_time = i::OS::TimeCurrentMillis() + 200;
-  while (i::OS::TimeCurrentMillis() < end_time) {
-    CompileAndRunScript(script_src.start());
-    // Yield CPU to give Profiler thread a chance to process ticks.
-    i::OS::Sleep(1);
-  }
-
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK(i::RuntimeProfiler::IsEnabled() ||
-        !LoggerTestHelper::IsSamplerActive());
-
-  // Wait 50 msecs to allow Profiler thread to process the last
-  // tick sample it has got.
-  i::OS::Sleep(50);
-
-  // Now we must have compiler and tick records.
-  CHECK_GT(matcher->GetNextChunk(), 0);
-  matcher->PrintBuffer();
-  CHECK_NE(NULL, matcher->Find(code_creation));
-  const char* tick = "\ntick,";
-  const bool ticks_found = matcher->Find(tick) != NULL;
-  CHECK_EQ(LoggerTestHelper::has_samples_taken(), ticks_found);
+static const char* StrNStr(const char* s1, const char* s2, int n) {
+  if (s1[n] == '\0') return strstr(s1, s2);
+  i::ScopedVector<char> str(n + 1);
+  i::OS::StrNCpy(str, s1, static_cast<size_t>(n));
+  str[n] = '\0';
+  char* found = strstr(str.start(), s2);
+  return found != NULL ? s1 + (found - str.start()) : NULL;
 }
 
 
@@ -298,29 +101,61 @@
 
   if (!i::V8::UseCrankshaft()) return;
 
-  // No sampling should happen prior to resuming profiler unless we
-  // are runtime profiling.
-  CHECK(i::RuntimeProfiler::IsEnabled() ||
-        !LoggerTestHelper::IsSamplerActive());
-
-  LogBufferMatcher matcher;
-  // Nothing must be logged until profiling is resumed.
-  CHECK_EQ(0, matcher.log_pos());
-
-  CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
-
-  // Nothing must be logged while profiling is suspended.
-  CHECK_EQ(0, matcher.GetNextChunk());
-
-  CheckThatProfilerWorks(&matcher);
-
-  CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
-
-  // No new data beyond last retrieved position.
-  CHECK_EQ(0, matcher.GetNextChunk());
-
+  LOGGER->StringEvent("test-start", "");
+  CompileRun("var a = (function(x) { return x + 1; })(10);");
+  LOGGER->StringEvent("test-profiler-start", "");
+  v8::V8::ResumeProfiler();
+  CompileRun(
+      "var b = (function(x) { return x + 2; })(10);\n"
+      "var c = (function(x) { return x + 3; })(10);\n"
+      "var d = (function(x) { return x + 4; })(10);\n"
+      "var e = (function(x) { return x + 5; })(10);");
+  v8::V8::PauseProfiler();
+  LOGGER->StringEvent("test-profiler-stop", "");
+  CompileRun("var f = (function(x) { return x + 6; })(10);");
   // Check that profiling can be resumed again.
-  CheckThatProfilerWorks(&matcher);
+  LOGGER->StringEvent("test-profiler-start-2", "");
+  v8::V8::ResumeProfiler();
+  CompileRun(
+      "var g = (function(x) { return x + 7; })(10);\n"
+      "var h = (function(x) { return x + 8; })(10);\n"
+      "var i = (function(x) { return x + 9; })(10);\n"
+      "var j = (function(x) { return x + 10; })(10);");
+  v8::V8::PauseProfiler();
+  LOGGER->StringEvent("test-profiler-stop-2", "");
+  LOGGER->StringEvent("test-stop", "");
+
+  bool exists = false;
+  i::Vector<const char> log(
+      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+  CHECK(exists);
+
+  const char* test_start_position =
+      StrNStr(log.start(), "test-start,", log.length());
+  CHECK_NE(NULL, test_start_position);
+  const char* test_profiler_start_position =
+      StrNStr(log.start(), "test-profiler-start,", log.length());
+  CHECK_NE(NULL, test_profiler_start_position);
+  CHECK_GT(test_profiler_start_position, test_start_position);
+  const char* test_profiler_stop_position =
+      StrNStr(log.start(), "test-profiler-stop,", log.length());
+  CHECK_NE(NULL, test_profiler_stop_position);
+  CHECK_GT(test_profiler_stop_position, test_profiler_start_position);
+  const char* test_profiler_start_2_position =
+      StrNStr(log.start(), "test-profiler-start-2,", log.length());
+  CHECK_NE(NULL, test_profiler_start_2_position);
+  CHECK_GT(test_profiler_start_2_position, test_profiler_stop_position);
+
+  // Nothing must be logged until profiling is resumed.
+  CHECK_EQ(NULL, StrNStr(test_start_position,
+                         "code-creation,",
+                         static_cast<int>(test_profiler_start_position -
+                                          test_start_position)));
+  // Nothing must be logged while profiling is suspended.
+  CHECK_EQ(NULL, StrNStr(test_profiler_stop_position,
+                         "code-creation,",
+                         static_cast<int>(test_profiler_start_2_position -
+                                          test_profiler_stop_position)));
 }
 
 
@@ -385,7 +220,7 @@
       {
         v8::Context::Scope context_scope(context);
         SignalRunning();
-        CompileAndRunScript(
+        CompileRun(
             "var j; for (var i=0; i<10000; ++i) { j = Math.sin(i); }");
       }
       context.Dispose();
@@ -533,34 +368,34 @@
 
 TEST(LogCallbacks) {
   ScopedLoggerInitializer initialize_logger(false);
-  LogBufferMatcher matcher;
 
   v8::Persistent<v8::FunctionTemplate> obj =
       v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
-  obj->SetClassName(v8::String::New("Obj"));
+  obj->SetClassName(v8_str("Obj"));
   v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
   v8::Local<v8::Signature> signature = v8::Signature::New(obj);
-  proto->Set(v8::String::New("method1"),
+  proto->Set(v8_str("method1"),
              v8::FunctionTemplate::New(ObjMethod1,
                                        v8::Handle<v8::Value>(),
                                        signature),
              static_cast<v8::PropertyAttribute>(v8::DontDelete));
 
   initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
-  CompileAndRunScript("Obj.prototype.method1.toString();");
+  CompileRun("Obj.prototype.method1.toString();");
 
   LOGGER->LogCompiledFunctions();
-  CHECK_GT(matcher.GetNextChunk(), 0);
 
-  const char* callback_rec = "code-creation,Callback,";
-  char* pos = const_cast<char*>(matcher.Find(callback_rec));
-  CHECK_NE(NULL, pos);
-  pos += strlen(callback_rec);
-  EmbeddedVector<char, 100> ref_data;
+  bool exists = false;
+  i::Vector<const char> log(
+      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+  CHECK(exists);
+
+  i::EmbeddedVector<char, 100> ref_data;
   i::OS::SNPrintF(ref_data,
-                  "0x%" V8PRIxPTR ",1,\"method1\"", ObjMethod1);
-  *(pos + strlen(ref_data.start())) = '\0';
-  CHECK_EQ(ref_data.start(), pos);
+                  "code-creation,Callback,0x%" V8PRIxPTR ",1,\"method1\"\0",
+                  ObjMethod1);
+
+  CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
 
   obj.Dispose();
 }
@@ -583,527 +418,62 @@
 
 TEST(LogAccessorCallbacks) {
   ScopedLoggerInitializer initialize_logger(false);
-  LogBufferMatcher matcher;
 
   v8::Persistent<v8::FunctionTemplate> obj =
       v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
-  obj->SetClassName(v8::String::New("Obj"));
+  obj->SetClassName(v8_str("Obj"));
   v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
-  inst->SetAccessor(v8::String::New("prop1"), Prop1Getter, Prop1Setter);
-  inst->SetAccessor(v8::String::New("prop2"), Prop2Getter);
+  inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
+  inst->SetAccessor(v8_str("prop2"), Prop2Getter);
 
   LOGGER->LogAccessorCallbacks();
-  CHECK_GT(matcher.GetNextChunk(), 0);
-  matcher.PrintBuffer();
+
+  bool exists = false;
+  i::Vector<const char> log(
+      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+  CHECK(exists);
 
   EmbeddedVector<char, 100> prop1_getter_record;
   i::OS::SNPrintF(prop1_getter_record,
                   "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop1\"",
                   Prop1Getter);
-  CHECK_NE(NULL, matcher.Find(prop1_getter_record));
+  CHECK_NE(NULL,
+           StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+
   EmbeddedVector<char, 100> prop1_setter_record;
   i::OS::SNPrintF(prop1_setter_record,
                   "code-creation,Callback,0x%" V8PRIxPTR ",1,\"set prop1\"",
                   Prop1Setter);
-  CHECK_NE(NULL, matcher.Find(prop1_setter_record));
+  CHECK_NE(NULL,
+           StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+
   EmbeddedVector<char, 100> prop2_getter_record;
   i::OS::SNPrintF(prop2_getter_record,
                   "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop2\"",
                   Prop2Getter);
-  CHECK_NE(NULL, matcher.Find(prop2_getter_record));
+  CHECK_NE(NULL,
+           StrNStr(log.start(), prop2_getter_record.start(), log.length()));
 
   obj.Dispose();
 }
 
 
-TEST(LogTags) {
-  ScopedLoggerInitializer initialize_logger(false);
-  LogBufferMatcher matcher;
-
-  const char* open_tag = "open-tag,";
-  const char* close_tag = "close-tag,";
-
-  // Check compatibility with the old style behavior.
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  CHECK_EQ(NULL, matcher.Find(open_tag));
-  CHECK_EQ(NULL, matcher.Find(close_tag));
-
-  const char* open_tag1 = "open-tag,1\n";
-  const char* close_tag1 = "close-tag,1\n";
-
-  // Check non-nested tag case.
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  CHECK_GT(matcher.GetNextChunk(), 0);
-  CHECK(matcher.IsInSequence(open_tag1, close_tag1));
-
-  const char* open_tag2 = "open-tag,2\n";
-  const char* close_tag2 = "close-tag,2\n";
-
-  // Check nested tags case.
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  CHECK_GT(matcher.GetNextChunk(), 0);
-  // open_tag1 < open_tag2 < close_tag2 < close_tag1
-  CHECK(matcher.IsInSequence(open_tag1, open_tag2));
-  CHECK(matcher.IsInSequence(open_tag2, close_tag2));
-  CHECK(matcher.IsInSequence(close_tag2, close_tag1));
-
-  // Check overlapped tags case.
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  CHECK_GT(matcher.GetNextChunk(), 0);
-  // open_tag1 < open_tag2 < close_tag1 < close_tag2
-  CHECK(matcher.IsInSequence(open_tag1, open_tag2));
-  CHECK(matcher.IsInSequence(open_tag2, close_tag1));
-  CHECK(matcher.IsInSequence(close_tag1, close_tag2));
-
-  const char* open_tag3 = "open-tag,3\n";
-  const char* close_tag3 = "close-tag,3\n";
-
-  // Check pausing overflow case.
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
-  CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 3);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 3);
-  CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
-  // Must be no tags, because logging must be disabled.
-  CHECK_EQ(NULL, matcher.Find(open_tag3));
-  CHECK_EQ(NULL, matcher.Find(close_tag3));
-}
-
-
 TEST(IsLoggingPreserved) {
   ScopedLoggerInitializer initialize_logger(false);
 
   CHECK(LOGGER->is_logging());
-  LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+  LOGGER->ResumeProfiler();
   CHECK(LOGGER->is_logging());
-  LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
-  CHECK(LOGGER->is_logging());
-
-  CHECK(LOGGER->is_logging());
-  LOGGER->ResumeProfiler(
-      v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
-  CHECK(LOGGER->is_logging());
-  LOGGER->PauseProfiler(
-      v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
-  CHECK(LOGGER->is_logging());
-
-  CHECK(LOGGER->is_logging());
-  LOGGER->ResumeProfiler(
-      v8::PROFILER_MODULE_CPU |
-      v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
-  CHECK(LOGGER->is_logging());
-  LOGGER->PauseProfiler(
-      v8::PROFILER_MODULE_CPU |
-      v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
+  LOGGER->PauseProfiler();
   CHECK(LOGGER->is_logging());
 }
 
 
-static inline bool IsStringEqualTo(const char* r, const char* s) {
-  return strncmp(r, s, strlen(r)) == 0;
-}
+typedef i::NativesCollection<i::TEST> TestSources;
 
 
-static bool Consume(const char* str, char** buf) {
-  if (IsStringEqualTo(str, *buf)) {
-    *buf += strlen(str);
-    return true;
-  }
-  return false;
-}
-
-
-namespace {
-
-// A code entity is a pointer to a position of code-creation event in buffer log
-// offset to a point where entity size begins, i.e.: '255,"func"\n'. This makes
-// comparing code entities pretty easy.
-typedef char* CodeEntityInfo;
-
-class Interval {
- public:
-  Interval()
-      : min_addr_(reinterpret_cast<Address>(-1)),
-        max_addr_(reinterpret_cast<Address>(0)), next_(NULL) {}
-
-  ~Interval() { delete next_; }
-
-  size_t Length() {
-    size_t result = max_addr_ - min_addr_ + 1;
-    if (next_ != NULL) result += next_->Length();
-    return result;
-  }
-
-  void CloneFrom(Interval* src) {
-    while (src != NULL) {
-      RegisterAddress(src->min_addr_);
-      RegisterAddress(src->max_addr_);
-      src = src->next_;
-    }
-  }
-
-  bool Contains(Address addr) {
-    if (min_addr_ <= addr && addr <= max_addr_) {
-      return true;
-    }
-    if (next_ != NULL) {
-      return next_->Contains(addr);
-    } else {
-      return false;
-    }
-  }
-
-  size_t GetIndex(Address addr) {
-    if (min_addr_ <= addr && addr <= max_addr_) {
-      return addr - min_addr_;
-    }
-    CHECK_NE(NULL, next_);
-    return (max_addr_ - min_addr_ + 1) + next_->GetIndex(addr);
-  }
-
-  Address GetMinAddr() {
-    return next_ == NULL ? min_addr_ : i::Min(min_addr_, next_->GetMinAddr());
-  }
-
-  Address GetMaxAddr() {
-    return next_ == NULL ? max_addr_ : i::Max(max_addr_, next_->GetMaxAddr());
-  }
-
-  void RegisterAddress(Address addr) {
-    if (min_addr_ == reinterpret_cast<Address>(-1)
-        || (size_t)(addr > min_addr_ ?
-           addr - min_addr_ : min_addr_ - addr) < MAX_DELTA) {
-      if (addr < min_addr_) min_addr_ = addr;
-      if (addr > max_addr_) max_addr_ = addr;
-    } else {
-      if (next_ == NULL) next_ = new Interval();
-      next_->RegisterAddress(addr);
-    }
-  }
-
-  Address raw_min_addr() { return min_addr_; }
-
-  Address raw_max_addr() { return max_addr_; }
-
-  Interval* get_next() { return next_; }
-
- private:
-  static const size_t MAX_DELTA = 0x100000;
-  Address min_addr_;
-  Address max_addr_;
-  Interval* next_;
-};
-
-
-// A structure used to return log parsing results.
-class ParseLogResult {
- public:
-  ParseLogResult()
-      : entities_map(NULL), entities(NULL),
-        max_entities(0) {}
-
-  ~ParseLogResult() {
-    i::DeleteArray(entities_map);
-    i::DeleteArray(entities);
-  }
-
-  void AllocateEntities() {
-    // Make sure that the test doesn't operate on a bogus log.
-    CHECK_GT(max_entities, 0);
-    CHECK_GT(bounds.GetMinAddr(), 0);
-    CHECK_GT(bounds.GetMaxAddr(), bounds.GetMinAddr());
-
-    entities = i::NewArray<CodeEntityInfo>(max_entities);
-    for (int i = 0; i < max_entities; ++i) {
-      entities[i] = NULL;
-    }
-    const size_t map_length = bounds.Length();
-    entities_map = i::NewArray<int>(static_cast<int>(map_length));
-    for (size_t i = 0; i < map_length; ++i) {
-      entities_map[i] = -1;
-    }
-  }
-
-  bool HasIndexForAddress(Address addr) {
-    return bounds.Contains(addr);
-  }
-
-  size_t GetIndexForAddress(Address addr) {
-    CHECK(HasIndexForAddress(addr));
-    return bounds.GetIndex(addr);
-  }
-
-  CodeEntityInfo GetEntity(Address addr) {
-    if (HasIndexForAddress(addr)) {
-      size_t idx = GetIndexForAddress(addr);
-      int item = entities_map[idx];
-      return item != -1 ? entities[item] : NULL;
-    }
-    return NULL;
-  }
-
-  void ParseAddress(char* start) {
-    Address addr =
-        reinterpret_cast<Address>(strtoul(start, NULL, 16));  // NOLINT
-    bounds.RegisterAddress(addr);
-  }
-
-  Address ConsumeAddress(char** start) {
-    char* end_ptr;
-    Address addr =
-        reinterpret_cast<Address>(strtoul(*start, &end_ptr, 16));  // NOLINT
-    CHECK(HasIndexForAddress(addr));
-    *start = end_ptr;
-    return addr;
-  }
-
-  Interval bounds;
-  // Memory map of entities start addresses.
-  int* entities_map;
-  // An array of code entities.
-  CodeEntityInfo* entities;
-  // Maximal entities count. Actual entities count can be lower,
-  // empty entity slots are pointing to NULL.
-  int max_entities;
-};
-
-}  // namespace
-
-
-typedef void (*ParserBlock)(char* start, char* end, ParseLogResult* result);
-
-static void ParserCycle(
-    char* start, char* end, ParseLogResult* result,
-    ParserBlock block_creation, ParserBlock block_delete,
-    ParserBlock block_move) {
-
-  const char* code_creation = "code-creation,";
-  const char* code_delete = "code-delete,";
-  const char* code_move = "code-move,";
-
-  const char* lazy_compile = "LazyCompile,";
-  const char* script = "Script,";
-  const char* function = "Function,";
-
-  while (start < end) {
-    if (Consume(code_creation, &start)) {
-      if (Consume(lazy_compile, &start)
-          || Consume(script, &start)
-          || Consume(function, &start)) {
-        block_creation(start, end, result);
-      }
-    } else if (Consume(code_delete, &start)) {
-      block_delete(start, end, result);
-    } else if (Consume(code_move, &start)) {
-      block_move(start, end, result);
-    }
-    while (start < end && *start != '\n') ++start;
-    ++start;
-  }
-}
-
-
-static void Pass1CodeCreation(char* start, char* end, ParseLogResult* result) {
-  result->ParseAddress(start);
-  ++result->max_entities;
-}
-
-
-static void Pass1CodeDelete(char* start, char* end, ParseLogResult* result) {
-  result->ParseAddress(start);
-}
-
-
-static void Pass1CodeMove(char* start, char* end, ParseLogResult* result) {
-  result->ParseAddress(start);
-  // Skip old address.
-  while (start < end && *start != ',') ++start;
-  CHECK_GT(end, start);
-  ++start;  // Skip ','.
-  result->ParseAddress(start);
-}
-
-
-static void Pass2CodeCreation(char* start, char* end, ParseLogResult* result) {
-  Address addr = result->ConsumeAddress(&start);
-  CHECK_GT(end, start);
-  ++start;  // Skip ','.
-
-  size_t idx = result->GetIndexForAddress(addr);
-  result->entities_map[idx] = -1;
-  for (int i = 0; i < result->max_entities; ++i) {
-    // Find an empty slot and fill it.
-    if (result->entities[i] == NULL) {
-      result->entities[i] = start;
-      result->entities_map[idx] = i;
-      break;
-    }
-  }
-  // Make sure that a slot was found.
-  CHECK_GE(result->entities_map[idx], 0);
-}
-
-
-static void Pass2CodeDelete(char* start, char* end, ParseLogResult* result) {
-  Address addr = result->ConsumeAddress(&start);
-  size_t idx = result->GetIndexForAddress(addr);
-  // There can be code deletes that are not related to JS code.
-  if (result->entities_map[idx] >= 0) {
-    result->entities[result->entities_map[idx]] = NULL;
-    result->entities_map[idx] = -1;
-  }
-}
-
-
-static void Pass2CodeMove(char* start, char* end, ParseLogResult* result) {
-  Address from_addr = result->ConsumeAddress(&start);
-  CHECK_GT(end, start);
-  ++start;  // Skip ','.
-  Address to_addr = result->ConsumeAddress(&start);
-  CHECK_GT(end, start);
-
-  size_t from_idx = result->GetIndexForAddress(from_addr);
-  size_t to_idx = result->GetIndexForAddress(to_addr);
-  // There can be code moves that are not related to JS code.
-  if (from_idx != to_idx && result->entities_map[from_idx] >= 0) {
-    CHECK_EQ(-1, result->entities_map[to_idx]);
-    result->entities_map[to_idx] = result->entities_map[from_idx];
-    result->entities_map[from_idx] = -1;
-  };
-}
-
-
-static void ParseLog(char* start, char* end, ParseLogResult* result) {
-  // Pass 1: Calculate boundaries of addresses and entities count.
-  ParserCycle(start, end, result,
-              Pass1CodeCreation, Pass1CodeDelete, Pass1CodeMove);
-
-  printf("min_addr: %p, max_addr: %p, entities: %d\n",
-         result->bounds.GetMinAddr(), result->bounds.GetMaxAddr(),
-         result->max_entities);
-
-  result->AllocateEntities();
-
-  // Pass 2: Fill in code entries data.
-  ParserCycle(start, end, result,
-              Pass2CodeCreation, Pass2CodeDelete, Pass2CodeMove);
-}
-
-
-static inline void PrintCodeEntityInfo(CodeEntityInfo entity) {
-  const int max_len = 50;
-  if (entity != NULL) {
-    char* eol = strchr(entity, '\n');
-    int len = static_cast<int>(eol - entity);
-    len = len <= max_len ? len : max_len;
-    printf("%-*.*s ", max_len, len, entity);
-  } else {
-    printf("%*s", max_len + 1, "");
-  }
-}
-
-
-static void PrintCodeEntitiesInfo(
-    bool is_equal, Address addr,
-    CodeEntityInfo l_entity, CodeEntityInfo r_entity) {
-  printf("%c %p ", is_equal ? ' ' : '*', addr);
-  PrintCodeEntityInfo(l_entity);
-  PrintCodeEntityInfo(r_entity);
-  printf("\n");
-}
-
-
-static inline int StrChrLen(const char* s, char c) {
-  return static_cast<int>(strchr(s, c) - s);
-}
-
-
-static bool AreFuncSizesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
-  int ref_len = StrChrLen(ref_s, ',');
-  int new_len = StrChrLen(new_s, ',');
-  return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
-}
-
-
-static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
-  // Skip size.
-  ref_s = strchr(ref_s, ',') + 1;
-  new_s = strchr(new_s, ',') + 1;
-  CHECK_EQ('"', ref_s[0]);
-  CHECK_EQ('"', new_s[0]);
-  int ref_len = StrChrLen(ref_s + 1, '\"');
-  int new_len = StrChrLen(new_s + 1, '\"');
-  // A special case for ErrorPrototype. Haven't yet figured out why they
-  // are different.
-  const char* error_prototype = "\"ErrorPrototype";
-  if (IsStringEqualTo(error_prototype, ref_s)
-      && IsStringEqualTo(error_prototype, new_s)) {
-    return true;
-  }
-  // Built-in objects have problems too.
-  const char* built_ins[] = {
-      "\"Boolean\"", "\"Function\"", "\"Number\"",
-      "\"Object\"", "\"Script\"", "\"String\""
-  };
-  for (size_t i = 0; i < sizeof(built_ins) / sizeof(*built_ins); ++i) {
-    if (IsStringEqualTo(built_ins[i], new_s)) {
-      return true;
-    }
-  }
-  return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
-}
-
-
-static bool AreEntitiesEqual(CodeEntityInfo ref_e, CodeEntityInfo new_e) {
-  if (ref_e == NULL && new_e != NULL) return true;
-  if (ref_e != NULL && new_e != NULL) {
-    return AreFuncSizesEqual(ref_e, new_e) && AreFuncNamesEqual(ref_e, new_e);
-  }
-  if (ref_e != NULL && new_e == NULL) {
-    // args_count entities (argument adapters) are not found by heap traversal,
-    // but they are not needed because they doesn't contain any code.
-    ref_e = strchr(ref_e, ',') + 1;
-    const char* args_count = "\"args_count:";
-    return IsStringEqualTo(args_count, ref_e);
-  }
-  return false;
-}
-
-
-// Test that logging of code create / move / delete events
-// is equivalent to traversal of a resulting heap.
+// Test that logging of code create / move events is equivalent to traversal of
+// a resulting heap.
 TEST(EquivalenceOfLoggingAndTraversal) {
   // This test needs to be run on a "clean" V8 to ensure that snapshot log
   // is loaded. This is always true when running using tools/test.py because
@@ -1114,86 +484,55 @@
   // P.S. No, V8 can't be re-initialized after disposal, see include/v8.h.
   CHECK(!i::V8::IsRunning());
 
-  i::FLAG_logfile = "*";
-  i::FLAG_log = true;
-  i::FLAG_log_code = true;
-
-  // Make sure objects move.
-  bool saved_always_compact = i::FLAG_always_compact;
-  if (!i::FLAG_never_compact) {
-    i::FLAG_always_compact = true;
-  }
-
-  v8::HandleScope scope;
-  v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
-  v8::Handle<v8::Context> env = v8::Context::New(
-      0, v8::Handle<v8::ObjectTemplate>(), global_object);
-  env->Enter();
+  // Start with profiling to capture all code events from the beginning.
+  ScopedLoggerInitializer initialize_logger(false);
 
   // Compile and run a function that creates other functions.
-  CompileAndRunScript(
+  CompileRun(
       "(function f(obj) {\n"
       "  obj.test =\n"
       "    (function a(j) { return function b() { return j; } })(100);\n"
       "})(this);");
-  HEAP->CollectAllGarbage(false);
-
-  EmbeddedVector<char, 204800> buffer;
-  int log_size;
-  ParseLogResult ref_result;
-
-  // Retrieve the log.
-  {
-    // Make sure that no GCs occur prior to LogCompiledFunctions call.
-    i::AssertNoAllocation no_alloc;
-
-    log_size = GetLogLines(0, &buffer);
-    CHECK_GT(log_size, 0);
-    CHECK_GT(buffer.length(), log_size);
-
-    // Fill a map of compiled code objects.
-    ParseLog(buffer.start(), buffer.start() + log_size, &ref_result);
-  }
+  v8::V8::PauseProfiler();
+  HEAP->CollectAllGarbage(true);
+  LOGGER->StringEvent("test-logging-done", "");
 
   // Iterate heap to find compiled functions, will write to log.
   LOGGER->LogCompiledFunctions();
-  char* new_log_start = buffer.start() + log_size;
-  const int new_log_size = LOGGER->GetLogLines(
-      log_size, new_log_start, buffer.length() - log_size);
-  CHECK_GT(new_log_size, 0);
-  CHECK_GT(buffer.length(), log_size + new_log_size);
+  LOGGER->StringEvent("test-traversal-done", "");
 
-  // Fill an equivalent map of compiled code objects.
-  ParseLogResult new_result;
-  ParseLog(new_log_start, new_log_start + new_log_size, &new_result);
+  bool exists = false;
+  i::Vector<const char> log(
+      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+  CHECK(exists);
+  v8::Handle<v8::String> log_str = v8::String::New(log.start(), log.length());
+  initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
 
-  // Test their actual equivalence.
-  Interval combined;
-  combined.CloneFrom(&ref_result.bounds);
-  combined.CloneFrom(&new_result.bounds);
-  Interval* iter = &combined;
-  bool results_equal = true;
-
-  while (iter != NULL) {
-    for (Address addr = iter->raw_min_addr();
-         addr <= iter->raw_max_addr(); ++addr) {
-      CodeEntityInfo ref_entity = ref_result.GetEntity(addr);
-      CodeEntityInfo new_entity = new_result.GetEntity(addr);
-      if (ref_entity != NULL || new_entity != NULL) {
-        const bool equal = AreEntitiesEqual(ref_entity, new_entity);
-        if (!equal) results_equal = false;
-        PrintCodeEntitiesInfo(equal, addr, ref_entity, new_entity);
-      }
-    }
-    iter = iter->get_next();
+  i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
+  v8::Handle<v8::String> source_str = v8::String::New(
+      reinterpret_cast<const char*>(source.start()), source.length());
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Script> script = v8::Script::Compile(source_str, v8_str(""));
+  if (script.IsEmpty()) {
+    v8::String::Utf8Value exception(try_catch.Exception());
+    printf("compile: %s\n", *exception);
+    CHECK(false);
   }
-  // Make sure that all log data is written prior crash due to CHECK failure.
-  fflush(stdout);
-  CHECK(results_equal);
-
-  env->Exit();
-  LOGGER->TearDown();
-  i::FLAG_always_compact = saved_always_compact;
+  v8::Handle<v8::Value> result = script->Run();
+  if (result.IsEmpty()) {
+    v8::String::Utf8Value exception(try_catch.Exception());
+    printf("run: %s\n", *exception);
+    CHECK(false);
+  }
+  // The result either be a "true" literal or problem description.
+  if (!result->IsTrue()) {
+    v8::Local<v8::String> s = result->ToString();
+    i::ScopedVector<char> data(s->Length() + 1);
+    CHECK_NE(NULL, data.start());
+    s->WriteAscii(data.start());
+    printf("%s\n", data.start());
+    // Make sure that our output is written prior crash due to CHECK failure.
+    fflush(stdout);
+    CHECK(false);
+  }
 }
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 2b06a5c..d98b675 100755
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -31,18 +31,16 @@
 
 #include "v8.h"
 
-#include "isolate.h"
-#include "token.h"
-#include "scanner.h"
-#include "parser.h"
-#include "utils.h"
-#include "execution.h"
-#include "preparser.h"
 #include "cctest.h"
+#include "execution.h"
+#include "isolate.h"
+#include "parser.h"
+#include "preparser.h"
+#include "scanner-character-streams.h"
+#include "token.h"
+#include "utils.h"
 
-namespace i = ::v8::internal;
-
-TEST(KeywordMatcher) {
+TEST(ScanKeywords) {
   struct KeywordToken {
     const char* keyword;
     i::Token::Value token;
@@ -50,86 +48,58 @@
 
   static const KeywordToken keywords[] = {
 #define KEYWORD(t, s, d) { s, i::Token::t },
-#define IGNORE(t, s, d)  /* */
-      TOKEN_LIST(IGNORE, KEYWORD, IGNORE)
+      TOKEN_LIST(IGNORE_TOKEN, KEYWORD)
 #undef KEYWORD
       { NULL, i::Token::IDENTIFIER }
   };
 
-  static const char* future_keywords[] = {
-#define FUTURE(t, s, d) s,
-      TOKEN_LIST(IGNORE, IGNORE, FUTURE)
-#undef FUTURE
-#undef IGNORE
-      NULL
-  };
-
   KeywordToken key_token;
+  i::UnicodeCache unicode_cache;
+  i::byte buffer[32];
   for (int i = 0; (key_token = keywords[i]).keyword != NULL; i++) {
-    i::KeywordMatcher matcher;
-    const char* keyword = key_token.keyword;
-    int length = i::StrLength(keyword);
-    for (int j = 0; j < length; j++) {
-      if (key_token.token == i::Token::INSTANCEOF && j == 2) {
-        // "in" is a prefix of "instanceof". It's the only keyword
-        // that is a prefix of another.
-        CHECK_EQ(i::Token::IN, matcher.token());
-      } else {
-        CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
-      }
-      matcher.AddChar(keyword[j]);
+    const i::byte* keyword =
+        reinterpret_cast<const i::byte*>(key_token.keyword);
+    int length = i::StrLength(key_token.keyword);
+    CHECK(static_cast<int>(sizeof(buffer)) >= length);
+    {
+      i::Utf8ToUC16CharacterStream stream(keyword, length);
+      i::JavaScriptScanner scanner(&unicode_cache);
+      // The scanner should parse 'let' as Token::LET for this test.
+      scanner.SetHarmonyBlockScoping(true);
+      scanner.Initialize(&stream);
+      CHECK_EQ(key_token.token, scanner.Next());
+      CHECK_EQ(i::Token::EOS, scanner.Next());
     }
-    CHECK_EQ(key_token.token, matcher.token());
-    // Adding more characters will make keyword matching fail.
-    matcher.AddChar('z');
-    CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
-    // Adding a keyword later will not make it match again.
-    matcher.AddChar('i');
-    matcher.AddChar('f');
-    CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
-  }
-
-  // Future keywords are not recognized.
-  const char* future_keyword;
-  for (int i = 0; (future_keyword = future_keywords[i]) != NULL; i++) {
-    i::KeywordMatcher matcher;
-    int length = i::StrLength(future_keyword);
-    for (int j = 0; j < length; j++) {
-      matcher.AddChar(future_keyword[j]);
+    // Removing characters will make keyword matching fail.
+    {
+      i::Utf8ToUC16CharacterStream stream(keyword, length - 1);
+      i::JavaScriptScanner scanner(&unicode_cache);
+      scanner.Initialize(&stream);
+      CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
+      CHECK_EQ(i::Token::EOS, scanner.Next());
     }
-    CHECK_EQ(i::Token::IDENTIFIER, matcher.token());
+    // Adding characters will make keyword matching fail.
+    static const char chars_to_append[] = { 'z', '0', '_' };
+    for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
+      memmove(buffer, keyword, length);
+      buffer[length] = chars_to_append[j];
+      i::Utf8ToUC16CharacterStream stream(buffer, length + 1);
+      i::JavaScriptScanner scanner(&unicode_cache);
+      scanner.Initialize(&stream);
+      CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
+      CHECK_EQ(i::Token::EOS, scanner.Next());
+    }
+    // Replacing characters will make keyword matching fail.
+    {
+      memmove(buffer, keyword, length);
+      buffer[length - 1] = '_';
+      i::Utf8ToUC16CharacterStream stream(buffer, length);
+      i::JavaScriptScanner scanner(&unicode_cache);
+      scanner.Initialize(&stream);
+      CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
+      CHECK_EQ(i::Token::EOS, scanner.Next());
+    }
   }
-
-  // Zero isn't ignored at first.
-  i::KeywordMatcher bad_start;
-  bad_start.AddChar(0);
-  CHECK_EQ(i::Token::IDENTIFIER, bad_start.token());
-  bad_start.AddChar('i');
-  bad_start.AddChar('f');
-  CHECK_EQ(i::Token::IDENTIFIER, bad_start.token());
-
-  // Zero isn't ignored at end.
-  i::KeywordMatcher bad_end;
-  bad_end.AddChar('i');
-  bad_end.AddChar('f');
-  CHECK_EQ(i::Token::IF, bad_end.token());
-  bad_end.AddChar(0);
-  CHECK_EQ(i::Token::IDENTIFIER, bad_end.token());
-
-  // Case isn't ignored.
-  i::KeywordMatcher bad_case;
-  bad_case.AddChar('i');
-  bad_case.AddChar('F');
-  CHECK_EQ(i::Token::IDENTIFIER, bad_case.token());
-
-  // If we mark it as failure, continuing won't help.
-  i::KeywordMatcher full_stop;
-  full_stop.AddChar('i');
-  CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
-  full_stop.Fail();
-  CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
-  full_stop.AddChar('f');
-  CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
 }
 
 
@@ -139,8 +109,9 @@
   // Regression test. See:
   //    http://code.google.com/p/chromium/issues/detail?id=53548
   // Tests that --> is correctly interpreted as comment-to-end-of-line if there
-  // is only whitespace before it on the line, even after a multiline-comment
-  // comment. This was not the case if it occurred before the first real token
+  // is only whitespace before it on the line (with comments considered as
+  // whitespace, even a multiline-comment containing a newline).
+  // This was not the case if it occurred before the first real token
   // in the input.
   const char* tests[] = {
       // Before first real token.
@@ -154,6 +125,16 @@
       NULL
   };
 
+  const char* fail_tests[] = {
+      "x --> is eol-comment\nvar y = 37;\n",
+      "\"\\n\" --> is eol-comment\nvar y = 37;\n",
+      "x/* precomment */ --> is eol-comment\nvar y = 37;\n",
+      "x/* precomment\n */ --> is eol-comment\nvar y = 37;\n",
+      "var x = 42; --> is eol-comment\nvar y = 37;\n",
+      "var x = 42; /* precomment\n */ --> is eol-comment\nvar y = 37;\n",
+      NULL
+  };
+
   // Parser/Scanner needs a stack limit.
   int marker;
   i::Isolate::Current()->stack_guard()->SetStackLimit(
@@ -165,6 +146,13 @@
     CHECK(data != NULL && !data->HasError());
     delete data;
   }
+
+  for (int i = 0; fail_tests[i]; i++) {
+    v8::ScriptData* data =
+        v8::ScriptData::PreCompile(fail_tests[i], i::StrLength(fail_tests[i]));
+    CHECK(data == NULL || data->HasError());
+    delete data;
+  }
 }
 
 
@@ -257,7 +245,7 @@
       "{label: 42}",
       "var x = 42;",
       "function foo(x, y) { return x + y; }",
-      "native function foo(); return %ArgleBargle(glop);",
+      "%ArgleBargle(glop);",
       "var x = new new Function('this.x = 42');",
       NULL
   };
@@ -269,7 +257,7 @@
         reinterpret_cast<const i::byte*>(program),
         static_cast<unsigned>(strlen(program)));
     i::CompleteParserRecorder log;
-    i::V8JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+    i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
     scanner.Initialize(&stream);
 
     v8::preparser::PreParser::PreParseResult result =
@@ -301,7 +289,7 @@
   i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
                                       static_cast<unsigned>(strlen(program)));
   i::ScriptDataImpl* data =
-      i::ParserApi::PreParse(&stream, NULL);
+      i::ParserApi::PreParse(&stream, NULL, false);
   CHECK(data->HasError());
   delete data;
 }
@@ -325,7 +313,7 @@
   i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
                                       static_cast<unsigned>(strlen(program)));
   i::ScriptDataImpl* data =
-      i::ParserApi::PartialPreParse(&stream, NULL);
+      i::ParserApi::PartialPreParse(&stream, NULL, false);
   CHECK(!data->HasError());
 
   data->Initialize();
@@ -357,7 +345,7 @@
       reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
 
   size_t kProgramSize = 1024 * 1024;
-  i::SmartPointer<char> program(
+  i::SmartArrayPointer<char> program(
       reinterpret_cast<char*>(malloc(kProgramSize + 1)));
   memset(*program, '(', kProgramSize);
   program[kProgramSize] = '\0';
@@ -368,7 +356,7 @@
       reinterpret_cast<const i::byte*>(*program),
       static_cast<unsigned>(kProgramSize));
   i::CompleteParserRecorder log;
-  i::V8JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
 
@@ -410,7 +398,7 @@
   if (end == 0) end = length;
   unsigned sub_length = end - start;
   i::HandleScope test_scope;
-  i::SmartPointer<i::uc16> uc16_buffer(new i::uc16[length]);
+  i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
   for (unsigned i = 0; i < length; i++) {
     uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
   }
@@ -586,7 +574,7 @@
                        i::Token::Value* expected_tokens,
                        int skip_pos = 0,  // Zero means not skipping.
                        int skip_to = 0) {
-  i::V8JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(stream);
 
   int i = 0;
@@ -667,7 +655,7 @@
   i::Utf8ToUC16CharacterStream stream(
        reinterpret_cast<const i::byte*>(re_source),
        static_cast<unsigned>(strlen(re_source)));
-  i::V8JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
   i::Token::Value start = scanner.peek();
@@ -718,3 +706,179 @@
   TestScanRegExp("/=/", "=");
   TestScanRegExp("/=?/", "=?");
 }
+
+
+void TestParserSync(i::Handle<i::String> source, bool allow_lazy) {
+  uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+
+  // Preparse the data.
+  i::CompleteParserRecorder log;
+  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::GenericStringUC16CharacterStream stream(source, 0, source->length());
+  scanner.Initialize(&stream);
+  v8::preparser::PreParser::PreParseResult result =
+      v8::preparser::PreParser::PreParseProgram(
+          &scanner, &log, allow_lazy, stack_limit);
+  CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+  i::ScriptDataImpl data(log.ExtractData());
+
+  // Parse the data
+  i::Handle<i::Script> script = FACTORY->NewScript(source);
+  i::Parser parser(script, false, NULL, NULL);
+  i::FunctionLiteral* function =
+      parser.ParseProgram(source, true, i::kNonStrictMode);
+
+  i::String* type_string = NULL;
+  if (function == NULL) {
+    // Extract exception from the parser.
+    i::Handle<i::String> type_symbol = FACTORY->LookupAsciiSymbol("type");
+    CHECK(i::Isolate::Current()->has_pending_exception());
+    i::MaybeObject* maybe_object = i::Isolate::Current()->pending_exception();
+    i::JSObject* exception = NULL;
+    CHECK(maybe_object->To(&exception));
+
+    // Get the type string.
+    maybe_object = exception->GetProperty(*type_symbol);
+    CHECK(maybe_object->To(&type_string));
+  }
+
+  // Check that preparsing fails iff parsing fails.
+  if (data.has_error() && function != NULL) {
+    i::OS::Print(
+        "Preparser failed on:\n"
+        "\t%s\n"
+        "with error:\n"
+        "\t%s\n"
+        "However, the parser succeeded",
+        *source->ToCString(), data.BuildMessage());
+    CHECK(false);
+  } else if (!data.has_error() && function == NULL) {
+    i::OS::Print(
+        "Parser failed on:\n"
+        "\t%s\n"
+        "with error:\n"
+        "\t%s\n"
+        "However, the preparser succeeded",
+        *source->ToCString(), *type_string->ToCString());
+    CHECK(false);
+  }
+
+  // Check that preparser and parser produce the same error.
+  if (function == NULL) {
+    if (!type_string->IsEqualTo(i::CStrVector(data.BuildMessage()))) {
+      i::OS::Print(
+          "Expected parser and preparser to produce the same error on:\n"
+          "\t%s\n"
+          "However, found the following error messages\n"
+          "\tparser:    %s\n"
+          "\tpreparser: %s\n",
+          *source->ToCString(), *type_string->ToCString(), data.BuildMessage());
+      CHECK(false);
+    }
+  }
+}
+
+
+TEST(ParserSync) {
+  const char* context_data[][2] = {
+    { "", "" },
+    { "{", "}" },
+    { "if (true) ", " else {}" },
+    { "if (true) {} else ", "" },
+    { "if (true) ", "" },
+    { "do ", " while (false)" },
+    { "while (false) ", "" },
+    { "for (;;) ", "" },
+    { "with ({})", "" },
+    { "switch (12) { case 12: ", "}" },
+    { "switch (12) { default: ", "}" },
+    { "label2: ", "" },
+    { NULL, NULL }
+  };
+
+  const char* statement_data[] = {
+    "{}",
+    "var x",
+    "var x = 1",
+    "const x",
+    "const x = 1",
+    ";",
+    "12",
+    "if (false) {} else ;",
+    "if (false) {} else {}",
+    "if (false) {} else 12",
+    "if (false) ;"
+    "if (false) {}",
+    "if (false) 12",
+    "do {} while (false)",
+    "for (;;) ;",
+    "for (;;) {}",
+    "for (;;) 12",
+    "continue",
+    "continue label",
+    "continue\nlabel",
+    "break",
+    "break label",
+    "break\nlabel",
+    "return",
+    "return  12",
+    "return\n12",
+    "with ({}) ;",
+    "with ({}) {}",
+    "with ({}) 12",
+    "switch ({}) { default: }"
+    "label3: "
+    "throw",
+    "throw  12",
+    "throw\n12",
+    "try {} catch(e) {}",
+    "try {} finally {}",
+    "try {} catch(e) {} finally {}",
+    "debugger",
+    NULL
+  };
+
+  const char* termination_data[] = {
+    "",
+    ";",
+    "\n",
+    ";\n",
+    "\n;",
+    NULL
+  };
+
+  v8::HandleScope handles;
+  v8::Persistent<v8::Context> context = v8::Context::New();
+  v8::Context::Scope context_scope(context);
+
+  int marker;
+  i::Isolate::Current()->stack_guard()->SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+  for (int i = 0; context_data[i][0] != NULL; ++i) {
+    for (int j = 0; statement_data[j] != NULL; ++j) {
+      for (int k = 0; termination_data[k] != NULL; ++k) {
+        int kPrefixLen = i::StrLength(context_data[i][0]);
+        int kStatementLen = i::StrLength(statement_data[j]);
+        int kTerminationLen = i::StrLength(termination_data[k]);
+        int kSuffixLen = i::StrLength(context_data[i][1]);
+        int kProgramSize = kPrefixLen + kStatementLen + kTerminationLen
+            + kSuffixLen + i::StrLength("label: for (;;) {  }");
+
+        // Plug the source code pieces together.
+        i::Vector<char> program = i::Vector<char>::New(kProgramSize + 1);
+        int length = i::OS::SNPrintF(program,
+            "label: for (;;) { %s%s%s%s }",
+            context_data[i][0],
+            statement_data[j],
+            termination_data[k],
+            context_data[i][1]);
+        CHECK(length == kProgramSize);
+        i::Handle<i::String> source =
+            FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
+        TestParserSync(source, true);
+        TestParserSync(source, false);
+      }
+    }
+  }
+}
diff --git a/test/cctest/test-platform-tls.cc b/test/cctest/test-platform-tls.cc
index b2cb101..7f33aa2 100644
--- a/test/cctest/test-platform-tls.cc
+++ b/test/cctest/test-platform-tls.cc
@@ -48,7 +48,7 @@
 
 class TestThread : public Thread {
  public:
-  TestThread() : Thread(NULL, "TestThread") {}
+  TestThread() : Thread("TestThread") {}
 
   virtual void Run() {
     DoTest();
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index fbe5834..76fd244 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -2,15 +2,11 @@
 //
 // Tests of profiles generator and utilities.
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
 #include "v8.h"
 #include "profile-generator-inl.h"
 #include "cctest.h"
 #include "../include/v8-profiler.h"
 
-namespace i = v8::internal;
-
 using i::CodeEntry;
 using i::CodeMap;
 using i::CpuProfile;
@@ -41,16 +37,16 @@
   TokenEnumerator te;
   CHECK_EQ(TokenEnumerator::kNoSecurityToken, te.GetTokenId(NULL));
   v8::HandleScope hs;
-  v8::Local<v8::String> token1(v8::String::New("1"));
+  v8::Local<v8::String> token1(v8::String::New("1x"));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
-  v8::Local<v8::String> token2(v8::String::New("2"));
+  v8::Local<v8::String> token2(v8::String::New("2x"));
   CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
   CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
   {
     v8::HandleScope hs;
-    v8::Local<v8::String> token3(v8::String::New("3"));
+    v8::Local<v8::String> token3(v8::String::New("3x"));
     CHECK_EQ(2, te.GetTokenId(*v8::Utils::OpenHandle(*token3)));
     CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
     CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
@@ -553,13 +549,14 @@
   code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
   CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
   CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
-  code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1800));
+  code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700));  // Deprecate bbb.
   CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1500)));
-  CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
-  CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1800)));
-  code_map.DeleteCode(ToAddress(0x1700));
+  CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1700)));
+  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0,
+                   TokenEnumerator::kNoSecurityToken);
+  code_map.AddCode(ToAddress(0x1750), &entry3, 0x100);
   CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700)));
-  CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1800)));
+  CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1750)));
 }
 
 
@@ -824,5 +821,3 @@
   for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
     i::DeleteArray(titles[i]);
 }
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index fa3c6ea..89a9112 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -30,30 +30,37 @@
 
 #include "v8.h"
 
-#include "string-stream.h"
-#include "cctest.h"
-#include "zone-inl.h"
-#include "parser.h"
 #include "ast.h"
+#include "char-predicates-inl.h"
+#include "cctest.h"
 #include "jsregexp.h"
+#include "parser.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-macro-assembler-irregexp.h"
+#include "string-stream.h"
+#include "zone-inl.h"
 #ifdef V8_INTERPRETED_REGEXP
 #include "interpreter-irregexp.h"
 #else  // V8_INTERPRETED_REGEXP
+#include "macro-assembler.h"
+#include "code.h"
 #ifdef V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm.h"
 #include "arm/macro-assembler-arm.h"
 #include "arm/regexp-macro-assembler-arm.h"
 #endif
 #ifdef V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips.h"
 #include "mips/macro-assembler-mips.h"
 #include "mips/regexp-macro-assembler-mips.h"
 #endif
 #ifdef V8_TARGET_ARCH_X64
+#include "x64/assembler-x64.h"
 #include "x64/macro-assembler-x64.h"
 #include "x64/regexp-macro-assembler-x64.h"
 #endif
 #ifdef V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32.h"
 #include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 #endif
@@ -65,23 +72,23 @@
 static bool CheckParse(const char* input) {
   V8::Initialize(NULL);
   v8::HandleScope scope;
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
   return v8::internal::RegExpParser::ParseRegExp(&reader, false, &result);
 }
 
 
-static SmartPointer<const char> Parse(const char* input) {
+static SmartArrayPointer<const char> Parse(const char* input) {
   V8::Initialize(NULL);
   v8::HandleScope scope;
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
   CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
   CHECK(result.tree != NULL);
   CHECK(result.error.is_null());
-  SmartPointer<const char> output = result.tree->ToString();
+  SmartArrayPointer<const char> output = result.tree->ToString();
   return output;
 }
 
@@ -89,7 +96,7 @@
   V8::Initialize(NULL);
   v8::HandleScope scope;
   unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
   CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
@@ -107,7 +114,7 @@
   V8::Initialize(NULL);
   v8::HandleScope scope;
   unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
   CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
@@ -378,13 +385,13 @@
                         const char* expected) {
   V8::Initialize(NULL);
   v8::HandleScope scope;
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
   CHECK(!v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
   CHECK(result.tree == NULL);
   CHECK(!result.error.is_null());
-  SmartPointer<char> str = result.error->ToCString(ALLOW_NULLS);
+  SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
   CHECK_EQ(expected, *str);
 }
 
@@ -416,7 +423,7 @@
   for (int i = 0; i <= kMaxCaptures; i++) {
     accumulator.Add("()");
   }
-  SmartPointer<const char> many_captures(accumulator.ToCString());
+  SmartArrayPointer<const char> many_captures(accumulator.ToCString());
   ExpectError(*many_captures, kTooManyCaptures);
 }
 
@@ -460,7 +467,7 @@
 
 
 static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
   CharacterRange::AddClassEscape(c, ranges);
   for (unsigned i = 0; i < (1 << 16); i++) {
@@ -506,7 +513,7 @@
                     bool is_ascii,
                     bool dot_output = false) {
   v8::HandleScope scope;
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   RegExpNode* node = Compile(input, multiline, is_ascii);
   USE(node);
 #ifdef DEBUG
@@ -547,7 +554,7 @@
 TEST(SplayTreeSimple) {
   v8::internal::V8::Initialize(NULL);
   static const unsigned kLimit = 1000;
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneSplayTree<TestConfig> tree;
   bool seen[kLimit];
   for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
@@ -615,7 +622,7 @@
     }
   }
   // Enter test data into dispatch table.
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   DispatchTable table;
   for (int i = 0; i < kRangeCount; i++) {
     uc16* range = ranges[i];
@@ -682,7 +689,7 @@
 class ContextInitializer {
  public:
   ContextInitializer()
-      : env_(), scope_(), zone_(DELETE_ON_EXIT) {
+      : env_(), scope_(), zone_(Isolate::Current(), DELETE_ON_EXIT) {
     env_ = v8::Context::New();
     env_->Enter();
   }
@@ -1377,7 +1384,7 @@
   static const int kLimit = 1000;
   static const int kRangeCount = 16;
   for (int t = 0; t < 10; t++) {
-    ZoneScope zone_scope(DELETE_ON_EXIT);
+    ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
     ZoneList<CharacterRange>* ranges =
         new ZoneList<CharacterRange>(kRangeCount);
     for (int i = 0; i < kRangeCount; i++) {
@@ -1398,7 +1405,7 @@
       CHECK_EQ(is_on, set->Get(0) == false);
     }
   }
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange>* ranges =
           new ZoneList<CharacterRange>(1);
   ranges->Add(CharacterRange(0xFFF0, 0xFFFE));
@@ -1511,7 +1518,7 @@
 
 static void TestRangeCaseIndependence(CharacterRange input,
                                       Vector<CharacterRange> expected) {
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   int count = expected.length();
   ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(count);
   input.AddCaseEquivalents(list, false);
@@ -1575,7 +1582,7 @@
 
 TEST(CharClassDifference) {
   v8::internal::V8::Initialize(NULL);
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange>* base = new ZoneList<CharacterRange>(1);
   base->Add(CharacterRange::Everything());
   Vector<const uc16> overlay = CharacterRange::GetWordBounds();
@@ -1602,7 +1609,7 @@
 
 TEST(CanonicalizeCharacterSets) {
   v8::internal::V8::Initialize(NULL);
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(4);
   CharacterSet set(list);
 
@@ -1673,7 +1680,7 @@
 
 TEST(CharacterRangeMerge) {
   v8::internal::V8::Initialize(NULL);
-  ZoneScope zone_scope(DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange> l1(4);
   ZoneList<CharacterRange> l2(4);
   // Create all combinations of intersections of ranges, both singletons and
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
index 0378fb3..5bdc4c3 100644
--- a/test/cctest/test-reloc-info.cc
+++ b/test/cctest/test-reloc-info.cc
@@ -45,7 +45,7 @@
   const int code_size = 10 * KB;
   int relocation_info_size = 10 * KB;
   const int buffer_size = code_size + relocation_info_size;
-  SmartPointer<byte> buffer(new byte[buffer_size]);
+  SmartArrayPointer<byte> buffer(new byte[buffer_size]);
 
   byte* pc = *buffer;
   byte* buffer_end = *buffer + buffer_size;
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 4767fc6..8e85444 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -459,7 +459,9 @@
       CHECK(root->IsString());
     }
     v8::HandleScope handle_scope;
-    Handle<Object>root_handle(root);
+    Handle<Object> root_handle(root);
+
+    ReserveSpaceForPartialSnapshot(file_name);
 
     Object* root2;
     {
@@ -542,7 +544,9 @@
       CHECK(root->IsContext());
     }
     v8::HandleScope handle_scope;
-    Handle<Object>root_handle(root);
+    Handle<Object> root_handle(root);
+
+    ReserveSpaceForPartialSnapshot(file_name);
 
     Object* root2;
     {
diff --git a/test/cctest/test-sockets.cc b/test/cctest/test-sockets.cc
index 5246d09..4af55db 100644
--- a/test/cctest/test-sockets.cc
+++ b/test/cctest/test-sockets.cc
@@ -10,8 +10,8 @@
 
 class SocketListenerThread : public Thread {
  public:
-  explicit SocketListenerThread(Isolate* isolate, int port, int data_size)
-      : Thread(isolate, "SocketListenerThread"),
+  SocketListenerThread(int port, int data_size)
+      : Thread("SocketListenerThread"),
         port_(port),
         data_size_(data_size),
         server_(NULL),
@@ -92,8 +92,7 @@
   OS::SNPrintF(Vector<char>(port_str, kPortBuferLen), "%d", port);
 
   // Create a socket listener.
-  SocketListenerThread* listener = new SocketListenerThread(Isolate::Current(),
-      port, len);
+  SocketListenerThread* listener = new SocketListenerThread(port, len);
   listener->Start();
   listener->WaitForListening();
 
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 9c76d2c..55c2141 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -233,7 +233,7 @@
   InitializeVM();
   v8::HandleScope scope;
   Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS];
-  ZoneScope zone(DELETE_ON_EXIT);
+  ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
   InitializeBuildingBlocks(building_blocks);
   Handle<String> flat = ConstructBalanced(building_blocks);
   FlattenString(flat);
@@ -348,7 +348,7 @@
 
 
 TEST(ExternalShortStringAdd) {
-  ZoneScope zone(DELETE_ON_EXIT);
+  ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
 
   InitializeVM();
   v8::HandleScope handle_scope;
@@ -430,8 +430,7 @@
     "  return 0;"
     "};"
     "test()";
-  CHECK_EQ(0,
-           v8::Script::Compile(v8::String::New(source))->Run()->Int32Value());
+  CHECK_EQ(0, CompileRun(source)->Int32Value());
 }
 
 
@@ -439,7 +438,7 @@
   // We incorrectly allowed strings to be tagged as array indices even if their
   // values didn't fit in the hash field.
   // See http://code.google.com/p/v8/issues/detail?id=728
-  ZoneScope zone(DELETE_ON_EXIT);
+  ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
 
   InitializeVM();
   v8::HandleScope handle_scope;
@@ -481,3 +480,81 @@
     }
   }
 }
+
+
+TEST(SliceFromCons) {
+  FLAG_string_slices = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  Handle<String> string =
+      FACTORY->NewStringFromAscii(CStrVector("parentparentparent"));
+  Handle<String> parent = FACTORY->NewConsString(string, string);
+  CHECK(parent->IsConsString());
+  CHECK(!parent->IsFlat());
+  Handle<String> slice = FACTORY->NewSubString(parent, 1, 25);
+  // After slicing, the original string becomes a flat cons.
+  CHECK(parent->IsFlat());
+  CHECK(slice->IsSlicedString());
+  CHECK_EQ(SlicedString::cast(*slice)->parent(),
+           ConsString::cast(*parent)->first());
+  CHECK(SlicedString::cast(*slice)->parent()->IsSeqString());
+  CHECK(slice->IsFlat());
+}
+
+
+TEST(TrivialSlice) {
+  // This tests whether a slice that contains the entire parent string
+  // actually creates a new string (it should not).
+  FLAG_string_slices = true;
+  InitializeVM();
+  HandleScope scope;
+  v8::Local<v8::Value> result;
+  Handle<String> string;
+  const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
+  const char* check = "str.slice(0,26)";
+  const char* crosscheck = "str.slice(1,25)";
+
+  CompileRun(init);
+
+  result = CompileRun(check);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(!string->IsSlicedString());
+
+  string = FACTORY->NewSubString(string, 0, 26);
+  CHECK(!string->IsSlicedString());
+  result = CompileRun(crosscheck);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(string->IsSlicedString());
+  CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+}
+
+
+TEST(SliceFromSlice) {
+  // This tests whether a slice that contains the entire parent string
+  // actually creates a new string (it should not).
+  FLAG_string_slices = true;
+  InitializeVM();
+  HandleScope scope;
+  v8::Local<v8::Value> result;
+  Handle<String> string;
+  const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
+  const char* slice = "var slice = str.slice(1,-1); slice";
+  const char* slice_from_slice = "slice.slice(1,-1);";
+
+  CompileRun(init);
+  result = CompileRun(slice);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(string->IsSlicedString());
+  CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
+  CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+
+  result = CompileRun(slice_from_slice);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(string->IsSlicedString());
+  CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
+  CHECK_EQ("cdefghijklmnopqrstuvwx", *(string->ToCString()));
+}
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index 5635b17..1aa57e3 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -161,12 +161,16 @@
 class TerminatorThread : public v8::internal::Thread {
  public:
   explicit TerminatorThread(i::Isolate* isolate)
-      : Thread(isolate, "TerminatorThread") { }
+      : Thread("TerminatorThread"),
+        isolate_(reinterpret_cast<v8::Isolate*>(isolate)) { }
   void Run() {
     semaphore->Wait();
-    CHECK(!v8::V8::IsExecutionTerminating());
-    v8::V8::TerminateExecution();
+    CHECK(!v8::V8::IsExecutionTerminating(isolate_));
+    v8::V8::TerminateExecution(isolate_);
   }
+
+ private:
+  v8::Isolate* isolate_;
 };
 
 
@@ -196,8 +200,7 @@
 
 class LoopingThread : public v8::internal::Thread {
  public:
-  explicit LoopingThread(i::Isolate* isolate)
-      : Thread(isolate, "LoopingThread") { }
+  LoopingThread() : Thread("LoopingThread") { }
   void Run() {
     v8::Locker locker;
     v8::HandleScope scope;
@@ -221,29 +224,37 @@
 };
 
 
-// Test that multiple threads using V8 can be terminated from another
-// thread when using Lockers and preemption.
-TEST(TerminateMultipleV8Threads) {
+// Test that multiple threads using default isolate can be terminated
+// from another thread when using Lockers and preemption.
+TEST(TerminateMultipleV8ThreadsDefaultIsolate) {
   {
     v8::Locker locker;
     v8::V8::Initialize();
     v8::Locker::StartPreemption(1);
     semaphore = v8::internal::OS::CreateSemaphore(0);
   }
-  LoopingThread thread1(i::Isolate::Current());
-  thread1.Start();
-  LoopingThread thread2(i::Isolate::Current());
-  thread2.Start();
-  // Wait until both threads have signaled the semaphore.
-  semaphore->Wait();
-  semaphore->Wait();
+  const int kThreads = 2;
+  i::List<LoopingThread*> threads(kThreads);
+  for (int i = 0; i < kThreads; i++) {
+    threads.Add(new LoopingThread());
+  }
+  for (int i = 0; i < kThreads; i++) {
+    threads[i]->Start();
+  }
+  // Wait until all threads have signaled the semaphore.
+  for (int i = 0; i < kThreads; i++) {
+    semaphore->Wait();
+  }
   {
     v8::Locker locker;
-    v8::V8::TerminateExecution(thread1.GetV8ThreadId());
-    v8::V8::TerminateExecution(thread2.GetV8ThreadId());
+    for (int i = 0; i < kThreads; i++) {
+      v8::V8::TerminateExecution(threads[i]->GetV8ThreadId());
+    }
   }
-  thread1.Join();
-  thread2.Join();
+  for (int i = 0; i < kThreads; i++) {
+    threads[i]->Join();
+    delete threads[i];
+  }
 
   delete semaphore;
   semaphore = NULL;
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index c6d5cb0..985b9e5 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -65,7 +65,7 @@
 
 class ThreadA: public v8::internal::Thread {
  public:
-  explicit ThreadA(i::Isolate* isolate) : Thread(isolate, "ThreadA") { }
+  ThreadA() : Thread("ThreadA") { }
   void Run() {
     v8::Locker locker;
     v8::HandleScope scope;
@@ -101,7 +101,7 @@
 
 class ThreadB: public v8::internal::Thread {
  public:
-  explicit ThreadB(i::Isolate* isolate) : Thread(isolate, "ThreadB") { }
+  ThreadB() : Thread("ThreadB") { }
   void Run() {
     do {
       {
@@ -126,8 +126,8 @@
 TEST(JSFunctionResultCachesInTwoThreads) {
   v8::V8::Initialize();
 
-  ThreadA threadA(i::Isolate::Current());
-  ThreadB threadB(i::Isolate::Current());
+  ThreadA threadA;
+  ThreadB threadB;
 
   threadA.Start();
   threadB.Start();
@@ -144,7 +144,7 @@
                            i::List<i::ThreadId>* refs,
                            unsigned int thread_no,
                            i::Semaphore* semaphore)
-    : Thread(NULL, "ThreadRefValidationThread"),
+    : Thread("ThreadRefValidationThread"),
       refs_(refs), thread_no_(thread_no), thread_to_start_(thread_to_start),
       semaphore_(semaphore) {
   }
@@ -161,6 +161,7 @@
     }
     semaphore_->Signal();
   }
+
  private:
   i::List<i::ThreadId>* refs_;
   int thread_no_;
diff --git a/test/cctest/test-type-info.cc b/test/cctest/test-type-info.cc
deleted file mode 100644
index 59dd83d..0000000
--- a/test/cctest/test-type-info.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "cctest.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-TEST(ThreeBitRepresentation) {
-  // Numeric types and unknown should fit into the short
-  // representation.
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Unknown().ThreeBitRepresentation()).IsUnknown());
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Number().ThreeBitRepresentation()).IsNumber());
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Integer32().ThreeBitRepresentation()).IsInteger32());
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Smi().ThreeBitRepresentation()).IsSmi());
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Double().ThreeBitRepresentation()).IsDouble());
-
-  // Other types should map to unknown.
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::Primitive().ThreeBitRepresentation()).IsUnknown());
-  CHECK(TypeInfo::ExpandedRepresentation(
-      TypeInfo::String().ThreeBitRepresentation()).IsUnknown());
-}
-
-} }  // namespace v8::internal
diff --git a/test/cctest/test-unbound-queue.cc b/test/cctest/test-unbound-queue.cc
index df5509e..3dc87ae 100644
--- a/test/cctest/test-unbound-queue.cc
+++ b/test/cctest/test-unbound-queue.cc
@@ -6,8 +6,6 @@
 #include "unbound-queue-inl.h"
 #include "cctest.h"
 
-namespace i = v8::internal;
-
 using i::UnboundQueue;
 
 
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index ce53f8e..e4f70df 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,9 @@
 
 #include "v8.h"
 
-#include "platform.h"
 #include "cctest.h"
+#include "platform.h"
+#include "utils-inl.h"
 
 using namespace v8::internal;
 
@@ -194,3 +195,15 @@
   }
   result.Dispose();
 }
+
+
+TEST(SequenceCollectorRegression) {
+  SequenceCollector<char> collector(16);
+  collector.StartSequence();
+  collector.Add('0');
+  collector.AddBlock(
+      i::Vector<const char>("12345678901234567890123456789012", 32));
+  i::Vector<char> seq = collector.EndSequence();
+  CHECK_EQ(0, strncmp("0123456789012345678901234567890123",
+                      seq.start(), seq.length()));
+}
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
new file mode 100644
index 0000000..db4db25
--- /dev/null
+++ b/test/cctest/test-weakmaps.cc
@@ -0,0 +1,149 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "global-handles.h"
+#include "snapshot.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+static Handle<JSWeakMap> AllocateJSWeakMap() {
+  Handle<Map> map = FACTORY->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+  Handle<JSObject> weakmap_obj = FACTORY->NewJSObjectFromMap(map);
+  Handle<JSWeakMap> weakmap(JSWeakMap::cast(*weakmap_obj));
+  // Do not use handles for the hash table, it would make entries strong.
+  Object* table_obj = ObjectHashTable::Allocate(1)->ToObjectChecked();
+  ObjectHashTable* table = ObjectHashTable::cast(table_obj);
+  weakmap->set_table(table);
+  weakmap->set_next(Smi::FromInt(0));
+  return weakmap;
+}
+
+static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
+                           Handle<JSObject> key,
+                           int value) {
+  Handle<ObjectHashTable> table = PutIntoObjectHashTable(
+      Handle<ObjectHashTable>(weakmap->table()),
+      Handle<JSObject>(JSObject::cast(*key)),
+      Handle<Smi>(Smi::FromInt(value)));
+  weakmap->set_table(*table);
+}
+
+static int NumberOfWeakCalls = 0;
+static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
+  ASSERT(id == reinterpret_cast<void*>(1234));
+  NumberOfWeakCalls++;
+  handle.Dispose();
+}
+
+
+TEST(Weakness) {
+  LocalContext context;
+  v8::HandleScope scope;
+  Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
+  GlobalHandles* global_handles = Isolate::Current()->global_handles();
+
+  // Keep global reference to the key.
+  Handle<Object> key;
+  {
+    v8::HandleScope scope;
+    Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+    Handle<JSObject> object = FACTORY->NewJSObjectFromMap(map);
+    key = global_handles->Create(*object);
+  }
+  CHECK(!global_handles->IsWeak(key.location()));
+
+  // Put entry into weak map.
+  {
+    v8::HandleScope scope;
+    PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
+  }
+  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+
+  // Force a full GC.
+  HEAP->CollectAllGarbage(false);
+  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+
+  // Make the global reference to the key weak.
+  {
+    v8::HandleScope scope;
+    global_handles->MakeWeak(key.location(),
+                             reinterpret_cast<void*>(1234),
+                             &WeakPointerCallback);
+  }
+  CHECK(global_handles->IsWeak(key.location()));
+
+  // Force a full GC.
+  // Perform two consecutive GCs because the first one will only clear
+  // weak references whereas the second one will also clear weak maps.
+  HEAP->CollectAllGarbage(false);
+  CHECK_EQ(1, NumberOfWeakCalls);
+  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  HEAP->CollectAllGarbage(false);
+  CHECK_EQ(1, NumberOfWeakCalls);
+  CHECK_EQ(0, weakmap->table()->NumberOfElements());
+  CHECK_EQ(1, weakmap->table()->NumberOfDeletedElements());
+}
+
+
+TEST(Shrinking) {
+  LocalContext context;
+  v8::HandleScope scope;
+  Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
+
+  // Check initial capacity.
+  CHECK_EQ(32, weakmap->table()->Capacity());
+
+  // Fill up weak map to trigger capacity change.
+  {
+    v8::HandleScope scope;
+    Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+    for (int i = 0; i < 32; i++) {
+      Handle<JSObject> object = FACTORY->NewJSObjectFromMap(map);
+      PutIntoWeakMap(weakmap, object, i);
+    }
+  }
+
+  // Check increased capacity.
+  CHECK_EQ(128, weakmap->table()->Capacity());
+
+  // Force a full GC.
+  CHECK_EQ(32, weakmap->table()->NumberOfElements());
+  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  HEAP->CollectAllGarbage(false);
+  CHECK_EQ(0, weakmap->table()->NumberOfElements());
+  CHECK_EQ(32, weakmap->table()->NumberOfDeletedElements());
+
+  // Check shrunk capacity.
+  CHECK_EQ(32, weakmap->table()->Capacity());
+}
diff --git a/test/cctest/testcfg.py b/test/cctest/testcfg.py
index a137275..b2eabc4 100644
--- a/test/cctest/testcfg.py
+++ b/test/cctest/testcfg.py
@@ -48,7 +48,11 @@
     return self.path[-1]
 
   def BuildCommand(self, name):
-    serialization_file = join('obj', 'test', self.mode, 'serdes')
+    serialization_file = ''
+    if exists(join(self.context.buildspace, 'obj', 'test', self.mode)):
+      serialization_file = join('obj', 'test', self.mode, 'serdes')
+    else:
+      serialization_file = join('obj', 'serdes')
     serialization_file += '_' + self.GetName()
     serialization_file = join(self.context.buildspace, serialization_file)
     serialization_file += ''.join(self.variant_flags).replace('-', '_')
@@ -78,10 +82,15 @@
     return ['cctests']
 
   def ListTests(self, current_path, path, mode, variant_flags):
-    executable = join('obj', 'test', mode, 'cctest')
+    executable = 'cctest'
     if utils.IsWindows():
       executable += '.exe'
     executable = join(self.context.buildspace, executable)
+    if not exists(executable):
+      executable = join('obj', 'test', mode, 'cctest')
+      if utils.IsWindows():
+        executable += '.exe'
+      executable = join(self.context.buildspace, executable)
     output = test.Execute([executable, '--list'], self.context)
     if output.exit_code != 0:
       print output.stdout
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 1dc90d3..d095a24 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -75,11 +75,11 @@
 # NOT IMPLEMENTED: RegExp.prototype.multiline
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-215: UNIMPLEMENTED
 
-# All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because 
+# All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because
 # the tests assumes that objects can not have more properties
-# than those described in the spec - but according to spec they can 
+# than those described in the spec - but according to spec they can
 # have additional properties.
-# All compareArray calls in these tests could be exchanged with a 
+# All compareArray calls in these tests could be exchanged with a
 # isSubsetOfArray call (I will upload a patch to the es5conform site).
 
 # SUBSETFAIL
@@ -172,14 +172,6 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
 
-# Bad test - the spec does not say anything about throwing errors
-# on calling Array.prototype.indexOf with undefined as argument.
-chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-1-1: FAIL_OK
-
-# Bad test - the spec does not say anything about throwing errors
-# on calling Array.prototype.indexOf with null as argument.
-chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-1-2: FAIL_OK
-
 # Bad test - the test at the end should be "i === true".
 chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-8-10: FAIL_OK
 
@@ -204,10 +196,6 @@
 # have no effect on the actual array on which reduceRight is called.
 chapter15/15.4/15.4.4/15.4.4.22/15.4.4.22-9-7: FAIL_OK
 
-# We do not implement trim correctly on null and undefined.
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-1-1: FAIL
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-1-2: FAIL
-
 # We do not correctly recognize \uFEFF as whitespace
 chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-10: FAIL
 chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-18: FAIL
diff --git a/test/es5conform/testcfg.py b/test/es5conform/testcfg.py
index af74b8c..b6a17d9 100644
--- a/test/es5conform/testcfg.py
+++ b/test/es5conform/testcfg.py
@@ -97,7 +97,7 @@
     return tests
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'es5conform.status')
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/message/regress/regress-1527.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/message/regress/regress-1527.js
index aa93b25..682e386 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/message/regress/regress-1527.js
@@ -25,12 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var o1 = {foo: 'bar'};
+var o2 = {
+  1: 'blah',
+  2: o1.foo,
+  3: foo
 }
-
-test();
diff --git a/test/message/regress/regress-1527.out b/test/message/regress/regress-1527.out
new file mode 100644
index 0000000..dc17fb3
--- /dev/null
+++ b/test/message/regress/regress-1527.out
@@ -0,0 +1,32 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*%(basename)s:32: ReferenceError: foo is not defined
+  3: foo
+     ^
+ReferenceError: foo is not defined
+    at *%(basename)s:32:6
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/message/replacement-marker-as-argument.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/message/replacement-marker-as-argument.js
index aa93b25..9036654 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/message/replacement-marker-as-argument.js
@@ -25,12 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+"use strict";
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+var o = { get "%3" (){} };
+o["%3"] = 10;
\ No newline at end of file
diff --git a/test/message/replacement-marker-as-argument.out b/test/message/replacement-marker-as-argument.out
new file mode 100644
index 0000000..a91fe5b
--- /dev/null
+++ b/test/message/replacement-marker-as-argument.out
@@ -0,0 +1,32 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*%(basename)s:31: TypeError: Cannot set property %%3 of #<Object> which has only a getter
+o["%%3"] = 10;
+        ^
+TypeError: Cannot set property %%3 of #<Object> which has only a getter
+    at *%(basename)s:31:9
diff --git a/test/message/testcfg.py b/test/message/testcfg.py
index aabbfef..af467e6 100644
--- a/test/message/testcfg.py
+++ b/test/message/testcfg.py
@@ -125,7 +125,7 @@
     return result
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'message.status')
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index 613d37d..c166110 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -33,43 +33,43 @@
   return a;
 }
 
-assertTrue(this === f0.apply(), "1-0");
+assertSame(this, f0.apply(), "1-0");
 
-assertTrue(this === f0.apply(this), "2a");
-assertTrue(this === f0.apply(this, new Array(1)), "2b");
-assertTrue(this === f0.apply(this, new Array(2)), "2c");
-assertTrue(this === f0.apply(this, new Array(4242)), "2d");
+assertSame(this, f0.apply(this), "2a");
+assertSame(this, f0.apply(this, new Array(1)), "2b");
+assertSame(this, f0.apply(this, new Array(2)), "2c");
+assertSame(this, f0.apply(this, new Array(4242)), "2d");
 
-assertTrue(this === f0.apply(null), "3a");
-assertTrue(this === f0.apply(null, new Array(1)), "3b");
-assertTrue(this === f0.apply(null, new Array(2)), "3c");
-assertTrue(this === f0.apply(this, new Array(4242)), "3d");
+assertSame(this, f0.apply(null), "3a");
+assertSame(this, f0.apply(null, new Array(1)), "3b");
+assertSame(this, f0.apply(null, new Array(2)), "3c");
+assertSame(this, f0.apply(this, new Array(4242)), "3d");
 
-assertTrue(this === f0.apply(void 0), "4a");
-assertTrue(this === f0.apply(void 0, new Array(1)), "4b");
-assertTrue(this === f0.apply(void 0, new Array(2)), "4c");
+assertSame(this, f0.apply(void 0), "4a");
+assertSame(this, f0.apply(void 0, new Array(1)), "4b");
+assertSame(this, f0.apply(void 0, new Array(2)), "4c");
 
-assertTrue(void 0 === f1.apply(), "1-1");
+assertEquals(void 0, f1.apply(), "1-1");
 
-assertTrue(void 0 === f1.apply(this), "5a");
-assertTrue(void 0 === f1.apply(this, new Array(1)), "5b");
-assertTrue(void 0 === f1.apply(this, new Array(2)), "5c");
-assertTrue(void 0 === f1.apply(this, new Array(4242)), "5d");
-assertTrue(42 === f1.apply(this, new Array(42, 43)), "5e");
+assertEquals(void 0, f1.apply(this), "5a");
+assertEquals(void 0, f1.apply(this, new Array(1)), "5b");
+assertEquals(void 0, f1.apply(this, new Array(2)), "5c");
+assertEquals(void 0, f1.apply(this, new Array(4242)), "5d");
+assertEquals(42, f1.apply(this, new Array(42, 43)), "5e");
 assertEquals("foo", f1.apply(this, new Array("foo", "bar", "baz", "bo")), "5f");
 
-assertTrue(void 0 === f1.apply(null), "6a");
-assertTrue(void 0 === f1.apply(null, new Array(1)), "6b");
-assertTrue(void 0 === f1.apply(null, new Array(2)), "6c");
-assertTrue(void 0 === f1.apply(null, new Array(4242)), "6d");
-assertTrue(42 === f1.apply(null, new Array(42, 43)), "6e");
+assertEquals(void 0, f1.apply(null), "6a");
+assertEquals(void 0, f1.apply(null, new Array(1)), "6b");
+assertEquals(void 0, f1.apply(null, new Array(2)), "6c");
+assertEquals(void 0, f1.apply(null, new Array(4242)), "6d");
+assertEquals(42, f1.apply(null, new Array(42, 43)), "6e");
 assertEquals("foo", f1.apply(null, new Array("foo", "bar", "baz", "bo")), "6f");
 
-assertTrue(void 0 === f1.apply(void 0), "7a");
-assertTrue(void 0 === f1.apply(void 0, new Array(1)), "7b");
-assertTrue(void 0 === f1.apply(void 0, new Array(2)), "7c");
-assertTrue(void 0 === f1.apply(void 0, new Array(4242)), "7d");
-assertTrue(42 === f1.apply(void 0, new Array(42, 43)), "7e");
+assertEquals(void 0, f1.apply(void 0), "7a");
+assertEquals(void 0, f1.apply(void 0, new Array(1)), "7b");
+assertEquals(void 0, f1.apply(void 0, new Array(2)), "7c");
+assertEquals(void 0, f1.apply(void 0, new Array(4242)), "7d");
+assertEquals(42, f1.apply(void 0, new Array(42, 43)), "7e");
 assertEquals("foo", f1.apply(void 0, new Array("foo", "bar", "ba", "b")), "7f");
 
 var arr = new Array(42, "foo", "fish", "horse");
@@ -108,7 +108,7 @@
 assertEquals("bar42foofishhorse", s.apply("bar", arr), "apply to string");
 
 function al() {
-  assertEquals(345, this);
+  assertEquals(Object(345), this);
   return arguments.length + arguments[arguments.length - 1];
 }
 
@@ -186,7 +186,7 @@
 primes[1] = holey;
 assertThrows("String.prototype.concat.apply.apply('foo', primes)");
 assertEquals("morseper",
-    String.prototype.concat.apply.apply(String.prototype.concat, primes), 
+    String.prototype.concat.apply.apply(String.prototype.concat, primes),
     "moreseper-prime");
 
 delete(Array.prototype["1"]);
diff --git a/test/mjsunit/argument-assigned.js b/test/mjsunit/argument-assigned.js
new file mode 100644
index 0000000..e30c881
--- /dev/null
+++ b/test/mjsunit/argument-assigned.js
@@ -0,0 +1,133 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+(function() {
+  function f(x) {
+    var arguments = [ 1, 2, 3 ];
+    return x;
+  }
+  assertEquals(7, f(7));
+})();
+
+
+(function() {
+  function f(x) {
+    arguments[0] = 991;
+    var arguments = [ 1, 2, 3 ];
+    return x;
+  }
+  assertEquals(991, f(7));
+})();
+
+
+(function() {
+  function f(x) {
+    arguments[0] = 991;
+    for (var i = 0; i < 10; i++) {
+      if (i == 5) {
+        var arguments = [ 1, 2, 3 ];
+      }
+    }
+    return x;
+  }
+  assertEquals(991, f(7));
+})();
+
+
+(function() {
+  function f(x, s) {
+    eval(s);
+    return x;
+  }
+  assertEquals(7, f(7, "var arguments = [ 1, 2, 3 ];"));
+})();
+
+
+(function() {
+  function f(x, s) {
+    var tmp = arguments[0];
+    eval(s);
+    return tmp;
+  }
+  assertEquals(7, f(7, "var arguments = [ 1, 2, 3 ];"));
+})();
+
+
+(function() {
+  function f(x, s) {
+    var tmp = arguments[0];
+    eval(s);
+    return tmp;
+  }
+  assertEquals(7, f(7, ""));
+})();
+
+
+(function() {
+  function f(x, s) {
+    var tmp = arguments[0];
+    eval(s);
+    return x;
+  }
+  assertEquals(7, f(7, "var arguments = [ 1, 2, 3 ];"));
+})();
+
+
+(function() {
+  function f(x, s) {
+    var tmp = arguments[0];
+    eval(s);
+    return x;
+  }
+  assertEquals(7, f(7, ""));
+})();
+
+
+(function() {
+  function f(x) {
+    function g(y) {
+      x = y;
+    }
+    arguments = {};
+    g(991);
+    return x;
+  }
+  assertEquals(991, f(7));
+})();
+
+
+(function() {
+  function f(x) {
+    function g(y, s) {
+      eval(s);
+    }
+    arguments = {};
+    g(991, "x = y;");
+    return x;
+  }
+  assertEquals(991, f(7));
+})();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/argument-named-arguments.js
similarity index 66%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/argument-named-arguments.js
index aa93b25..2845102 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/argument-named-arguments.js
@@ -25,12 +25,43 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Tests to verify proper arguments handling if the arguments
+// variable is declared as a parameter or local variable.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+function e(a) {
+  assertEquals(9, a.length);
+  assertEquals("arguments", a);
+};
 
-test();
+e("arguments");
+
+
+function f(arguments) {
+  assertEquals(9, arguments.length);
+  assertEquals("arguments", arguments);
+};
+
+f("arguments");
+
+
+function g(x) {
+  var arguments;
+  assertEquals("arguments", x);
+  assertEquals(1, arguments.length);
+  assertEquals("[object Arguments]", '' + arguments);
+};
+
+g("arguments");
+
+
+function h(x) {
+  assertEquals("arguments", x);
+  assertEquals(1, arguments.length);
+  assertEquals("[object Arguments]", '' + arguments);
+  var arguments = "foobar";
+  assertEquals("arguments", x);
+  assertEquals(6, arguments.length);
+  assertEquals("foobar", '' + arguments);
+};
+
+h("arguments");
diff --git a/test/mjsunit/arguments-apply.js b/test/mjsunit/arguments-apply.js
index 5a91228..48c4234 100644
--- a/test/mjsunit/arguments-apply.js
+++ b/test/mjsunit/arguments-apply.js
@@ -73,11 +73,11 @@
   return ReturnReceiver.apply(receiver, arguments);
 }
 
-assertEquals(42, NonObjectReceiver(42));
+assertEquals(Object(42), NonObjectReceiver(42));
 assertEquals("object", typeof NonObjectReceiver(42));
-assertTrue(NonObjectReceiver(42) instanceof Number);
-assertTrue(this === NonObjectReceiver(null));
-assertTrue(this === NonObjectReceiver(void 0));
+assertInstanceof(NonObjectReceiver(42), Number);
+assertSame(this, NonObjectReceiver(null));
+assertSame(this, NonObjectReceiver(void 0));
 
 
 function FunctionReceiver() {
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/arguments-escape.js
similarity index 77%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/arguments-escape.js
index aa93b25..042100c 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/arguments-escape.js
@@ -25,12 +25,35 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function foo(x) {
+  var a = arguments;
+  function bar(i) {
+    assertEquals(i, ++a[0]);
+    assertEquals(i, x);
+  };
+  bar(1);
+  bar(2);
+  bar(3);
+  return bar;
 }
+var baz = foo(0);
+baz(4);
+baz(5);
+baz(6);
 
-test();
+// Test writing a non-smi.
+function foo2(x) {
+  var a = arguments;
+  function bar2(i) {
+    assertEquals(i, ++a[0]);
+    assertEquals(i, x);
+  };
+  bar2(1.5);
+  bar2(2.5);
+  bar2(3.5);
+  return bar2;
+}
+var baz2 = foo2(0.5);
+baz2(4.5);
+baz2(5.5);
+baz2(6.5);
diff --git a/test/mjsunit/arguments-opt.js b/test/mjsunit/arguments-opt.js
index c74fc75..b8280b4 100644
--- a/test/mjsunit/arguments-opt.js
+++ b/test/mjsunit/arguments-opt.js
@@ -79,36 +79,38 @@
 assertTrue(typeof(A(10000, 0)) == 'undefined');
 
 // String access.
-assertEquals(0, A('0'));
-assertEquals(0, A('0',1));
+assertEquals('0', A('0'));
+assertEquals('0', A('0',1));
 assertEquals(2, A('1',2));
 assertEquals(2, A('1',2,3,4,5));
 assertEquals(5, A('4',2,3,4,5));
-assertTrue(typeof A('1') == 'undefined');
-assertTrue(typeof A('3',2,1) == 'undefined');
+assertEquals('undefined', typeof A('1'));
+assertEquals('undefined', typeof A('3',2,1));
 assertEquals(A, A('callee'));
 assertEquals(1, A('length'));
 assertEquals(2, A('length',2));
 assertEquals(5, A('length',2,3,4,5));
 assertEquals({}.toString, A('toString'));
 assertEquals({}.isPrototypeOf, A('isPrototypeOf'));
-assertTrue(typeof A('xxx') == 'undefined');
+assertEquals('undefined', typeof A('xxx'));
 
 // Object access.
 function O(key) {
   return { toString: function() { return key; } };
 }
 
-assertEquals(0, A(O(0)));
-assertEquals(0, A(O(0),1));
+var O0 = O(0);
+assertSame(O0, A(O0));
+assertSame(O0, A(O0,1));
 assertEquals(2, A(O(1),2));
 assertEquals(2, A(O(1),2,3,4,5));
 assertEquals(5, A(O(4),2,3,4,5));
 assertTrue(typeof A(O(1)) == 'undefined');
 assertTrue(typeof A(O(3),2,1) == 'undefined');
 
-assertEquals(0, A(O('0')));
-assertEquals(0, A(O('0'),1));
+O0 = O('0');
+assertSame(O0, A(O0));
+assertSame(O0, A(O0,1));
 assertEquals(2, A(O('1'),2));
 assertEquals(2, A(O('1'),2,3,4,5));
 assertEquals(5, A(O('4'),2,3,4,5));
diff --git a/test/mjsunit/arguments.js b/test/mjsunit/arguments.js
index 0302739..78b7722 100644
--- a/test/mjsunit/arguments.js
+++ b/test/mjsunit/arguments.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,8 +50,6 @@
 assertEquals(2, argc2(1, 2));
 assertEquals(3, argc2(1, 2, 3));
 
-
-
 var index;
 
 function argv0() {
@@ -95,3 +93,98 @@
 // an unexpected number of arguments works.
 function f(a) { return arguments.length; };
 assertEquals(3, f(1, 2, 3));
+
+function f1(x, y) {
+  function g(a) {
+    a[0] = "three";
+    return a.length;
+  }
+  var l = g(arguments);
+  y = 5;
+  assertEquals(2, l);
+  assertEquals("three", x);
+  assertEquals(5, y);
+}
+f1(3, "five");
+
+
+function f2() {
+  if (arguments[0] > 0) {
+    return arguments.callee(arguments[0] - 1) + arguments[0];
+  }
+  return 0;
+}
+assertEquals(55, f2(10));
+
+
+function f3() {
+  assertEquals(0, arguments.length);
+}
+f3();
+
+
+function f4() {
+  var arguments = 0;
+  assertEquals(void 0, arguments.length);
+}
+f4();
+
+
+function f5(x, y, z) {
+  function g(a) {
+    x = "two";
+    y = "three";
+    a[1] = "drei";
+    a[2] = "fuenf";
+  };
+
+  g(arguments);
+  assertEquals("two", x);
+  assertEquals("drei", y);
+  assertEquals("fuenf", z);
+}
+f5(2, 3, 5);
+
+
+function f6(x, y) {
+  x = "x";
+  arguments[1] = "y";
+  return [arguments.length, arguments[0], y, arguments[2]];
+}
+
+assertArrayEquals([0, void 0, void 0, void 0], f6());
+assertArrayEquals([1, "x", void 0, void 0], f6(1));
+assertArrayEquals([2, "x", "y", void 0], f6(9, 17));
+assertArrayEquals([3, "x", "y", 7], f6(3, 5, 7));
+assertArrayEquals([4, "x", "y", "c"], f6("a", "b", "c", "d"));
+
+
+function list_args(a) {
+  assertEquals("function", typeof a.callee);
+  var result = [];
+  result.push(a.length);
+  for (i = 0; i < a.length; i++) result.push(a[i]);
+  return result;
+}
+
+
+function f1(x, y) {
+  function g(p) {
+    x = p;
+  }
+  g(y);
+  return list_args(arguments);
+}
+
+assertArrayEquals([0], f1());
+assertArrayEquals([1, void 0], f1(3));
+assertArrayEquals([2, 5, 5], f1(3, 5));
+assertArrayEquals([3, 5, 5, 7], f1(3, 5, 7));
+
+// Check out of bounds behavior.
+function arg_get(x) { return arguments[x]; }
+function arg_del(x) { return delete arguments[x]; }
+function arg_set(x) { return (arguments[x] = 117); }
+assertEquals(undefined, arg_get(0xFFFFFFFF));
+assertEquals(true, arg_del(0xFFFFFFFF));
+assertEquals(117, arg_set(0xFFFFFFFF));
\ No newline at end of file
diff --git a/test/mjsunit/array-constructor.js b/test/mjsunit/array-constructor.js
index 063ccde..bf5d3d6 100644
--- a/test/mjsunit/array-constructor.js
+++ b/test/mjsunit/array-constructor.js
@@ -73,7 +73,7 @@
   a = new Array(0, 1, 2, 3, 4, 5, 6, 7, 8);
   assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7, 8], a);
   a = new Array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
-  assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], a);  
+  assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], a);
 }
 
 
@@ -91,9 +91,9 @@
   var a = eval('[' + str + ']');
   var b = eval('new Array(' + str + ')')
   var c = eval('Array(' + str + ')')
-  assertEquals(n, a.length);  
-  assertArrayEquals(a, b);  
-  assertArrayEquals(a, c);  
+  assertEquals(n, a.length);
+  assertArrayEquals(a, b);
+  assertArrayEquals(a, c);
 }
 
 
diff --git a/test/mjsunit/array-iteration.js b/test/mjsunit/array-iteration.js
index f11b51c..0ee2e6e 100644
--- a/test/mjsunit/array-iteration.js
+++ b/test/mjsunit/array-iteration.js
@@ -134,7 +134,7 @@
   a = [0,1];
   assertFalse(a.every(function(n, index, array) { array[index] = n + 1; return n == 1;}));
   assertArrayEquals([1,1], a);
-  
+
   // Only loop through initial part of array eventhough elements are
   // added.
   a = [1,1];
@@ -156,23 +156,23 @@
 //
 (function() {
   var a = [0,1,2,3,4];
-  
+
   // Simple use.
   var result = [1,2,3,4,5];
   assertArrayEquals(result, a.map(function(n) { return n + 1; }));
   assertEquals(a, a);
-  
+
   // Use specified object as this object when calling the function.
   var o = { delta: 42 }
   result = [42,43,44,45,46];
   assertArrayEquals(result, a.map(function(n) { return this.delta + n; }, o));
-  
+
   // Modify original array.
   a = [0,1,2,3,4];
   result = [1,2,3,4,5];
   assertArrayEquals(result, a.map(function(n, index, array) { array[index] = n + 1; return n + 1;}));
   assertArrayEquals(result, a);
-  
+
   // Only loop through initial part of array eventhough elements are
   // added.
   a = [0,1,2,3,4];
@@ -197,7 +197,7 @@
   // Simple use.
   assertTrue(a.some(function(n) { return n == 3}));
   assertFalse(a.some(function(n) { return n == 5}));
-  
+
   // Use specified object as this object when calling the function.
   var o = { element: 42 };
   a = [1,42,3];
diff --git a/test/mjsunit/array-join.js b/test/mjsunit/array-join.js
index ddd1496..5c837a5 100644
--- a/test/mjsunit/array-join.js
+++ b/test/mjsunit/array-join.js
@@ -44,7 +44,8 @@
 assertEquals('1,2**********3**********4**********5,6**********', a.join('**********'));
 
 // Replace array.prototype.toString.
-Array.prototype.toString = function() { return "array"; }
+var oldToString = Array.prototype.toString;
+Array.prototype.toString = function() { return "array"; };
 assertEquals('array34arrayarray', a.join(''));
 assertEquals('array*3*4*array*array', a.join('*'));
 assertEquals('array**3**4**array**array', a.join('**'));
@@ -52,7 +53,7 @@
 assertEquals('array********3********4********array********array', a.join('********'));
 assertEquals('array**********3**********4**********array**********array', a.join('**********'));
 
-Array.prototype.toString = function() { throw 42; }
+Array.prototype.toString = function() { throw 42; };
 assertThrows("a.join('')");
 assertThrows("a.join('*')");
 assertThrows("a.join('**')");
@@ -60,7 +61,7 @@
 assertThrows("a.join('********')");
 assertThrows("a.join('**********')");
 
-Array.prototype.toString = function() { return "array"; }
+Array.prototype.toString = function() { return "array"; };
 assertEquals('array34arrayarray', a.join(''));
 assertEquals('array*3*4*array*array', a.join('*'));
 assertEquals('array**3**4**array**array', a.join('**'));
@@ -68,3 +69,25 @@
 assertEquals('array********3********4********array********array', a.join('********'));
 assertEquals('array**********3**********4**********array**********array', a.join('**********'));
 
+// Restore original toString.
+delete Array.prototype.toString;
+if (Array.prototype.toString != oldToString) {
+  Array.prototype.toString = oldToString;
+}
+
+var a = new Array(123123123);
+assertEquals(123123122, String(a).length);
+assertEquals(123123122, a.join(",").length);
+assertEquals(246246244, a.join("oo").length);
+
+a = new Array(Math.pow(2,32) - 1);  // Max length.
+assertEquals("", a.join(""));
+a[123123123] = "o";
+a[1255215215] = "p";
+assertEquals("op", a.join(""));
+
+a = new Array(100001);
+for (var i = 0; i < a.length; i++) a[i] = undefined;
+a[5] = "ab";
+a[90000] = "cd";
+assertEquals("abcd", a.join(""));  // Must not throw.
\ No newline at end of file
diff --git a/test/mjsunit/array-length.js b/test/mjsunit/array-length.js
index 967d720..16867db 100644
--- a/test/mjsunit/array-length.js
+++ b/test/mjsunit/array-length.js
@@ -102,7 +102,7 @@
 
 
 var a = new Array();
-assertEquals(12, a.length = new Number(12));
+assertEquals(Object(12), a.length = new Number(12));
 assertEquals(12, a.length);
 
 
diff --git a/test/mjsunit/array-reduce.js b/test/mjsunit/array-reduce.js
index 83d9023..1e96188 100755
--- a/test/mjsunit/array-reduce.js
+++ b/test/mjsunit/array-reduce.js
@@ -411,67 +411,77 @@
 
 // Test error conditions:
 
+var exception = false;
 try {
   [1].reduce("not a function");
-  assertUnreachable("Reduce callback not a function not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduce callback not a function not throwing TypeError");
   assertEquals("called_non_callable", e.type,
                "reduce non function TypeError type");
 }
+assertTrue(exception);
 
+exception = false;
 try {
   [1].reduceRight("not a function");
-  assertUnreachable("ReduceRight callback not a function not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduceRight callback not a function not throwing TypeError");
   assertEquals("called_non_callable", e.type,
                "reduceRight non function TypeError type");
 }
+assertTrue(exception);
 
-
+exception = false;
 try {
   [].reduce(sum);
-  assertUnreachable("Reduce no initial value not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduce no initial value not throwing TypeError");
   assertEquals("reduce_no_initial", e.type,
                "reduce no initial TypeError type");
 }
+assertTrue(exception);
 
+exception = false;
 try {
   [].reduceRight(sum);
-  assertUnreachable("ReduceRight no initial value not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduceRight no initial value not throwing TypeError");
   assertEquals("reduce_no_initial", e.type,
                "reduceRight no initial TypeError type");
 }
+assertTrue(exception);
 
-
+exception = false;
 try {
   [,,,].reduce(sum);
-  assertUnreachable("Reduce sparse no initial value not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduce sparse no initial value not throwing TypeError");
   assertEquals("reduce_no_initial", e.type,
                "reduce no initial TypeError type");
 }
+assertTrue(exception);
 
+exception = false;
 try {
   [,,,].reduceRight(sum);
-  assertUnreachable("ReduceRight sparse no initial value not throwing");
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError,
              "reduceRight sparse no initial value not throwing TypeError");
   assertEquals("reduce_no_initial", e.type,
                "reduceRight no initial TypeError type");
 }
+assertTrue(exception);
 
 
 // Array changing length
@@ -511,4 +521,3 @@
             [3, 3, 2, [1, 2, 3, 4, 4, 5], 6],
             [6, 4, 3, [1, 2, 3, 4, 4, 5, 6], 10],
            ], arr, extender, 0);
-
diff --git a/test/mjsunit/array-sort.js b/test/mjsunit/array-sort.js
index 7060c5f..3fa623a 100644
--- a/test/mjsunit/array-sort.js
+++ b/test/mjsunit/array-sort.js
@@ -70,30 +70,59 @@
   a.sort();
   assertArrayEquals([-1000000000, -10000000000, -1000000001, 1000000000, 10000000000, 1000000001], a);
 
+  // Other cases are tested implicitly in TestSmiLexicographicCompare.
+}
 
-  for (var xb = 1; xb <= 1000 * 1000 * 1000; xb *= 10) {
+TestNumberSort();
+
+function TestSmiLexicographicCompare() {
+
+  assertFalse(%_IsSmi(2147483648), 'Update test for >32 bit Smi');
+
+  // Collect a list of interesting Smis.
+  var seen = {};
+  var smis = [];
+  function add(x) {
+    if (x | 0 == x) {
+      x = x | 0;  // Canonicalizes to Smi if 32-bit signed and fits in Smi.
+    }
+    if (%_IsSmi(x) && !seen[x]) {
+      seen[x] = 1;
+      smis.push(x);
+    }
+  }
+  function addSigned(x) {
+    add(x);
+    add(-x);
+  }
+
+  var BIGGER_THAN_ANY_SMI = 10 * 1000 * 1000 * 1000;
+  for (var xb = 1; xb <= BIGGER_THAN_ANY_SMI; xb *= 10) {
     for (var xf = 0; xf <= 9; xf++) {
       for (var xo = -1; xo <= 1; xo++) {
-        for (var yb = 1; yb <= 1000 * 1000 * 1000; yb *= 10) {
-          for (var yf = 0; yf <= 9; yf++) {
-            for (var yo = -1; yo <= 1; yo++) {
-              var x = xb * xf + xo;
-              var y = yb * yf + yo;
-              if (!%_IsSmi(x)) continue;
-              if (!%_IsSmi(y)) continue;
-              var lex = %SmiLexicographicCompare(x, y);
-              if (lex < 0) lex = -1;
-              if (lex > 0) lex = 1;
-              assertEquals(lex, (x == y) ? 0 : ((x + "") < (y + "") ? -1 : 1), x + " < " + y);
-            }
-          }
-        }
+        addSigned(xb * xf + xo);
       }
     }
   }
+
+  for (var yb = 1; yb <= BIGGER_THAN_ANY_SMI; yb *= 2) {
+    for (var yo = -2; yo <= 2; yo++) {
+      addSigned(yb + yo);
+    }
+  }
+
+  for (var i = 0; i < smis.length; i++) {
+    for (var j = 0; j < smis.length; j++) {
+      var x = smis[i];
+      var y = smis[j];
+      var lex = %SmiLexicographicCompare(x, y);
+      var expected = (x == y) ? 0 : ((x + "") < (y + "") ? -1 : 1);
+      assertEquals(lex, expected, x + " < " + y);
+    }
+  }
 }
 
-TestNumberSort();
+TestSmiLexicographicCompare();
 
 
 // Test lexicographical string sorting.
@@ -363,7 +392,7 @@
 
 // Test that sort calls compare function with global object as receiver,
 // and with only elements of the array as arguments.
-function o(v) { 
+function o(v) {
   return {__proto__: o.prototype, val: v};
 }
 var arr = [o(1), o(2), o(4), o(8), o(16), o(32), o(64), o(128), o(256), o(-0)];
@@ -374,4 +403,4 @@
   assertTrue(b instanceof o);
   return a.val - b.val;
 }
-arr.sort(cmpTest);
\ No newline at end of file
+arr.sort(cmpTest);
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
new file mode 100644
index 0000000..c9adb5b
--- /dev/null
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -0,0 +1,181 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+/**
+ * This class shows how to use %GetOptimizationCount() and
+ * %GetOptimizationStatus() to infer information about opts and deopts.
+ * Might be nice to put this into mjsunit.js, but that doesn't depend on
+ * the --allow-natives-syntax flag so far.
+ */
+function OptTracker() {
+  this.opt_counts_ = {};
+}
+
+/**
+ * The possible optimization states of a function. Must be in sync with the
+ * return values of Runtime_GetOptimizationStatus() in runtime.cc!
+ * @enum {int}
+ */
+OptTracker.OptimizationState = {
+    YES: 1,
+    NO: 2,
+    ALWAYS: 3,
+    NEVER: 4
+};
+
+/**
+ * Always call this at the beginning of your test, once for each function
+ * that you later want to track de/optimizations for. It is necessary because
+ * tests are sometimes executed several times in a row, and you want to
+ * disregard counts from previous runs.
+ */
+OptTracker.prototype.CheckpointOptCount = function(func) {
+  this.opt_counts_[func] = %GetOptimizationCount(func);
+};
+
+OptTracker.prototype.AssertOptCount = function(func, optcount) {
+  if (this.DisableAsserts_(func)) {
+    return;
+  }
+  assertEquals(optcount, this.GetOptCount_(func));
+};
+
+OptTracker.prototype.AssertDeoptCount = function(func, deopt_count) {
+  if (this.DisableAsserts_(func)) {
+    return;
+  }
+  assertEquals(deopt_count, this.GetDeoptCount_(func));
+};
+
+OptTracker.prototype.AssertDeoptHappened = function(func, expect_deopt) {
+  if (this.DisableAsserts_(func)) {
+    return;
+  }
+  if (expect_deopt) {
+    assertTrue(this.GetDeoptCount_(func) > 0);
+  } else {
+    assertEquals(0, this.GetDeoptCount_(func));
+  }
+}
+
+OptTracker.prototype.AssertIsOptimized = function(func, expect_optimized) {
+  if (this.DisableAsserts_(func)) {
+    return;
+  }
+  var raw_optimized = %GetOptimizationStatus(func);
+  if (expect_optimized) {
+    assertEquals(OptTracker.OptimizationState.YES, raw_optimized);
+  } else {
+    assertEquals(OptTracker.OptimizationState.NO, raw_optimized);
+  }
+}
+
+/**
+ * @private
+ */
+OptTracker.prototype.GetOptCount_ = function(func) {
+  var raw_count = %GetOptimizationCount(func);
+  if (func in this.opt_counts_) {
+    var checkpointed_count = this.opt_counts_[func];
+    return raw_count - checkpointed_count;
+  }
+  return raw_count;
+}
+
+/**
+ * @private
+ */
+OptTracker.prototype.GetDeoptCount_ = function(func) {
+  var count = this.GetOptCount_(func);
+  if (%GetOptimizationStatus(func) == OptTracker.OptimizationState.YES) {
+    count -= 1;
+  }
+  return count;
+}
+
+/**
+ * @private
+ */
+OptTracker.prototype.DisableAsserts_ = function(func) {
+  switch(%GetOptimizationStatus(func)) {
+    case OptTracker.OptimizationState.YES:
+    case OptTracker.OptimizationState.NO:
+      return false;
+    case OptTracker.OptimizationState.ALWAYS:
+    case OptTracker.OptimizationState.NEVER:
+      return true;
+  }
+  return false;
+}
+// (End of class OptTracker.)
+
+// Example function used by the test below.
+function f(a) {
+  return a+1;
+}
+
+var tracker = new OptTracker();
+tracker.CheckpointOptCount(f);
+
+tracker.AssertOptCount(f, 0);
+tracker.AssertIsOptimized(f, false);
+tracker.AssertDeoptHappened(f, false);
+tracker.AssertDeoptCount(f, 0);
+
+f(1);
+
+tracker.AssertOptCount(f, 0);
+tracker.AssertIsOptimized(f, false);
+tracker.AssertDeoptHappened(f, false);
+tracker.AssertDeoptCount(f, 0);
+
+%OptimizeFunctionOnNextCall(f);
+f(1);
+
+tracker.AssertOptCount(f, 1);
+tracker.AssertIsOptimized(f, true);
+tracker.AssertDeoptHappened(f, false);
+tracker.AssertDeoptCount(f, 0);
+
+%DeoptimizeFunction(f);
+
+tracker.AssertOptCount(f, 1);
+tracker.AssertIsOptimized(f, false);
+tracker.AssertDeoptHappened(f, true);
+tracker.AssertDeoptCount(f, 1);
+
+// Let's trigger optimization for another type.
+for (var i = 0; i < 5; i++) f("a");
+%OptimizeFunctionOnNextCall(f);
+f("b");
+
+tracker.AssertOptCount(f, 2);
+tracker.AssertIsOptimized(f, true);
+tracker.AssertDeoptHappened(f, true);
+tracker.AssertDeoptCount(f, 1);
diff --git a/test/mjsunit/binary-op-newspace.js b/test/mjsunit/binary-op-newspace.js
index 032284c..e3341c4 100644
--- a/test/mjsunit/binary-op-newspace.js
+++ b/test/mjsunit/binary-op-newspace.js
@@ -25,8 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --max-new-space-size=256
-
+// Flags: --max-new-space-size=256 --noopt
 
 // Check that a mod where the stub code hits a failure in heap number
 // allocation still works.
diff --git a/test/mjsunit/bit-not.js b/test/mjsunit/bit-not.js
index 85eccc4..d0316a7 100644
--- a/test/mjsunit/bit-not.js
+++ b/test/mjsunit/bit-not.js
@@ -68,6 +68,8 @@
 // the fast path and just use the slow path instead.
 function TryToGC() {
   var x = 0x40000000;
+  // Put in an eval to foil Crankshaft.
+  eval("");
   for (var i = 0; i < 1000000; i++) {
     assertEquals(~0x40000000, ~x);
   }
diff --git a/test/mjsunit/boolean.js b/test/mjsunit/boolean.js
new file mode 100644
index 0000000..d955855
--- /dev/null
+++ b/test/mjsunit/boolean.js
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals(Boolean(void 0), false);
+assertEquals(Boolean(null), false);
+assertEquals(Boolean(false), false);
+assertEquals(Boolean(true), true);
+assertEquals(Boolean(0), false);
+assertEquals(Boolean(1), true);
+assertEquals(Boolean(assertEquals), true);
+assertEquals(Boolean(new Object()), true);
+assertTrue(new Boolean(false) !== false);
+assertTrue(new Boolean(false) == false);
+assertTrue(new Boolean(true) !== true);
+assertTrue(new Boolean(true) == true);
+
+assertEquals(true, !false);
+assertEquals(false, !true);
+assertEquals(true, !!true);
+assertEquals(false, !!false);
+
+assertEquals(true, true ? true : false);
+assertEquals(false, false ? true : false);
+
+assertEquals(false, true ? false : true);
+assertEquals(true, false ? false : true);
+
+
+assertEquals(true, true && true);
+assertEquals(false, true && false);
+assertEquals(false, false && true);
+assertEquals(false, false && false);
+
+// Regression.
+var t = 42;
+assertEquals(void 0, t.p);
+assertEquals(void 0, t.p && true);
+assertEquals(void 0, t.p && false);
+assertEquals(void 0, t.p && (t.p == 0));
+assertEquals(void 0, t.p && (t.p == null));
+assertEquals(void 0, t.p && (t.p == t.p));
+
+var o = new Object();
+o.p = 'foo';
+assertEquals('foo', o.p);
+assertEquals('foo', o.p || true);
+assertEquals('foo', o.p || false);
+assertEquals('foo', o.p || (o.p == 0));
+assertEquals('foo', o.p || (o.p == null));
+assertEquals('foo', o.p || (o.p == o.p));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/break.js
similarity index 71%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/break.js
index aa93b25..741263d 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/break.js
@@ -25,12 +25,52 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+function f() {
+  var i = 10;
+  var c = 0;
+  while (i-- > 0) {
+    c++;
+    if (i == 5) ;
+  }
+  assertEquals(10, c);
+}
+f();
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+
+function f2() {
+  var i = 10;
+  var c = 0;
+  while (i-- > 0) {
+    c++;
+    if (i == 5) break;
+  }
+  assertEquals(5, c);
+}
+f2();
+
+
+function f3() {
+  var i = 10;
+  var c = 0;
+  outer: while (i-- > 0) {
+    var j = 10;
+    inner1: inner2: inner3: while (j-- > 0) {
+      c++;
+      if (i == 8)
+        break inner2;
+      if (i == 6)
+        break outer;
+    }
+  }
+  assertEquals(22, c);
+}
+f3();
+
+outer2: {
+  break outer2;
+  assertUnreachable();
 }
 
-test();
+
+outer3: break outer3;  // nop
+l1: l2: l3: break l2;  // nop
diff --git a/test/mjsunit/bugs/618.js b/test/mjsunit/bugs/618.js
index afa9929..ddc0c19 100644
--- a/test/mjsunit/bugs/618.js
+++ b/test/mjsunit/bugs/618.js
@@ -32,14 +32,14 @@
 var c1 = new C1();
 assertEquals(23, c1.x);
 assertEquals("undefined", typeof c1.y);
-  
+
 // Add setter somewhere on the prototype chain after having constructed the
 // first instance.
 C1.prototype = { set x(value) { this.y = 23; } };
 var c1 = new C1();
 assertEquals("undefined", typeof c1.x);
 assertEquals(23, c1.y);
-  
+
 // Simple class using inline constructor.
 function C2() {
   this.x = 23;
diff --git a/test/mjsunit/bugs/bug-618.js b/test/mjsunit/bugs/bug-618.js
index 8f47440..ae84326 100644
--- a/test/mjsunit/bugs/bug-618.js
+++ b/test/mjsunit/bugs/bug-618.js
@@ -33,11 +33,11 @@
   this.x = 23;
 }
 
-// If a setter is added to the prototype chain of a simple constructor setting 
-// one of the properties assigned in the constructor then this setter is 
+// If a setter is added to the prototype chain of a simple constructor setting
+// one of the properties assigned in the constructor then this setter is
 // ignored when constructing new objects from the constructor.
 
-// This only happens if the setter is added _after_ an instance has been 
+// This only happens if the setter is added _after_ an instance has been
 // created.
 
 assertEquals(23, new C().x);
diff --git a/test/mjsunit/bugs/harmony/debug-blockscopes.js b/test/mjsunit/bugs/harmony/debug-blockscopes.js
new file mode 100644
index 0000000..a407c53
--- /dev/null
+++ b/test/mjsunit/bugs/harmony/debug-blockscopes.js
@@ -0,0 +1,224 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --harmony-block-scoping
+// The functions used for testing backtraces. They are at the top to make the
+// testing of source line/column easier.
+
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var test_name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+
+// Debug event listener which delegates.
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      break_count++;
+      listener_called = true;
+      listener_delegate(exec_state);
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a new test.
+function BeginTest(name) {
+  test_name = name;
+  listener_delegate = null;
+  listener_called = false;
+  exception = null;
+  begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+  assertTrue(listener_called, "listerner not called for " + test_name);
+  assertNull(exception, test_name);
+  end_test_count++;
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+  assertEquals(scopes.length, exec_state.frame().scopeCount());
+  for (var i = 0; i < scopes.length; i++) {
+    var scope = exec_state.frame().scope(i);
+    assertTrue(scope.isScope());
+    assertEquals(scopes[i], scope.scopeType());
+
+    // Check the global object when hitting the global scope.
+    if (scopes[i] == debug.ScopeType.Global) {
+      // Objects don't have same class (one is "global", other is "Object",
+      // so just check the properties directly.
+      assertPropertiesEqual(this, scope.scopeObject().value());
+    }
+  }
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scopes request and check the result.
+  var json;
+  var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scopes.length, response.body.scopes.length);
+  for (var i = 0; i < scopes.length; i++) {
+    assertEquals(i, response.body.scopes[i].index);
+    assertEquals(scopes[i], response.body.scopes[i].type);
+    if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Closure) {
+      assertTrue(response.body.scopes[i].object.ref < 0);
+    } else {
+      assertTrue(response.body.scopes[i].object.ref >= 0);
+    }
+    var found = false;
+    for (var j = 0; j < response.refs.length && !found; j++) {
+      found = response.refs[j].handle == response.body.scopes[i].object.ref;
+    }
+    assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+  }
+}
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+  var scope = exec_state.frame().scope(number);
+  var count = 0;
+  for (var p in content) {
+    var property_mirror = scope.scopeObject().property(p);
+    if (property_mirror.isUndefined()) {
+      print('property ' + p + ' not found in scope');
+    }
+    assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+    if (typeof(content[p]) === 'function') {
+      assertTrue(property_mirror.value().isFunction());
+    } else {
+      assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+    }
+    count++;
+  }
+
+  // 'arguments' and might be exposed in the local and closure scope. Just
+  // ignore this.
+  var scope_size = scope.scopeObject().properties().length;
+  if (!scope.scopeObject().property('arguments').isUndefined()) {
+    scope_size--;
+  }
+  // Also ignore synthetic variable from catch block.
+  if (!scope.scopeObject().property('.catch-var').isUndefined()) {
+    scope_size--;
+  }
+  // Skip property with empty name.
+  if (!scope.scopeObject().property('').isUndefined()) {
+    scope_size--;
+  }
+  // Also ignore synthetic variable from block scopes.
+  if (!scope.scopeObject().property('.block').isUndefined()) {
+    scope_size--;
+  }
+
+  if (count != scope_size) {
+    print('Names found in scope:');
+    var names = scope.scopeObject().propertyNames();
+    for (var i = 0; i < names.length; i++) {
+      print(names[i]);
+    }
+  }
+  assertEquals(count, scope_size);
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scope request for information on a single scope and check the
+  // result.
+  var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
+  request_json += scope.scopeIndex();
+  request_json += '}}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scope.scopeType(), response.body.type);
+  assertEquals(number, response.body.index);
+  if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Closure) {
+    assertTrue(response.body.object.ref < 0);
+  } else {
+    assertTrue(response.body.object.ref >= 0);
+  }
+  var found = false;
+  for (var i = 0; i < response.refs.length && !found; i++) {
+    found = response.refs[i].handle == response.body.object.ref;
+  }
+  assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+// Simple closure formed by returning an inner function referering to an outer
+// block local variable and an outer function's parameter. Due to VM
+// optimizations parts of the actual closure is missing from the debugger
+// information.
+BeginTest("Closure 1");
+
+function closure_1(a) {
+  var x = 2;
+  let y = 3;
+  if (true) {
+    let z = 4;
+    function f() {
+      debugger;
+      return a + x + y + z;
+    };
+    return f;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({z:4}, 1, exec_state);
+  CheckScopeContent({a:1,x:2,y:3}, 2, exec_state);
+};
+closure_1(1)();
+EndTest();
diff --git a/test/mjsunit/builtins.js b/test/mjsunit/builtins.js
new file mode 100644
index 0000000..f2ad544
--- /dev/null
+++ b/test/mjsunit/builtins.js
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-natives-as=builtins
+
+// Checks that all function properties of the builtin object are neither
+// writable nor configurable. Also, theose functions that are actually
+// constructors (recognized by having properties on their .prototype object),
+// have only unconfigurable properties on the prototype, and the methods
+// are also non-writable.
+
+var names = Object.getOwnPropertyNames(builtins);
+
+function isFunction(obj) {
+  return typeof obj == "function";
+}
+
+function checkConstructor(func, name) {
+  // A constructor is a function with a prototype and properties on the
+  // prototype object besides "constructor";
+  if (name.charAt(0) == "$") return;
+  if (typeof func.prototype != "object") return;
+  var propNames = Object.getOwnPropertyNames(func.prototype);
+  if (propNames.length == 0 ||
+      (propNames.length == 1 && propNames[0] == "constructor")) {
+    // Not a constructor.
+    return;
+  }
+  var proto_desc = Object.getOwnPropertyDescriptor(func, "prototype");
+  assertTrue(proto_desc.hasOwnProperty("value"), name);
+  assertFalse(proto_desc.writable, name);
+  assertFalse(proto_desc.configurable, name);
+  var prototype = proto_desc.value;
+  assertEquals(null, prototype.__proto__, name);
+  for (var i = 0; i < propNames.length; i++) {
+    var propName = propNames[i];
+    if (propName == "constructor") continue;
+    var testName = name + "-" + propName;
+    var propDesc = Object.getOwnPropertyDescriptor(prototype, propName);
+    assertTrue(propDesc.hasOwnProperty("value"), testName);
+    assertFalse(propDesc.configurable, testName);
+    if (isFunction(propDesc.value)) {
+      assertFalse(propDesc.writable, testName);
+    }
+  }
+}
+
+for (var i = 0; i < names.length; i++) {
+  var name = names[i];
+  var desc = Object.getOwnPropertyDescriptor(builtins, name);
+  assertTrue(desc.hasOwnProperty("value"));
+  var value = desc.value;
+  if (isFunction(value)) {
+    assertFalse(desc.writable, name);
+    assertFalse(desc.configurable, name);
+    checkConstructor(value, name);
+  }
+}
diff --git a/test/mjsunit/closures.js b/test/mjsunit/closures.js
index ee487a4..7c11971 100644
--- a/test/mjsunit/closures.js
+++ b/test/mjsunit/closures.js
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 function runner(f, expected) {
-  for (var i = 0; i < 1000000; i++) {
-    assertEquals(expected, f.call(this));
-  }
+  assertEquals(expected, f.call(this));
 }
 
 function test(n) {
@@ -36,6 +36,8 @@
     var result = n * 2 + arguments.length;
     return result;
   }
+  for (var i = 0; i < 5; ++i) MyFunction();
+  %OptimizeFunctionOnNextCall(MyFunction)
   runner(MyFunction, n * 2);
 }
 
diff --git a/test/mjsunit/compiler/delete.js b/test/mjsunit/compiler/delete.js
index 373a1cb..2aaecb2 100644
--- a/test/mjsunit/compiler/delete.js
+++ b/test/mjsunit/compiler/delete.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -66,6 +66,7 @@
 assertEquals(3, (function (x) { return ((delete x) || 2) + 1; })(0));
 
 
-// 'this' at toplevel is different from all other global variables---not
-// deletable.
+// 'this' is not a Reference so delete returns true (see section 11.4.1,
+// step 2 of ES 5.1).
 assertEquals(true, delete this);
+assertEquals(true, (function () { return delete this; })());
diff --git a/test/mjsunit/compiler/eval-introduced-closure.js b/test/mjsunit/compiler/eval-introduced-closure.js
new file mode 100644
index 0000000..550c7c3
--- /dev/null
+++ b/test/mjsunit/compiler/eval-introduced-closure.js
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that functions introduced by eval work both when there are
+// strict mode and non-strict mode eval in scopes.
+
+// Flags: --allow-natives-syntax
+
+var x = 27;
+
+function f() { return x; }
+
+assertEquals(27, f());
+
+function do_eval(str) {
+  "use strict";
+  return eval(str);
+}
+
+var eval_f = do_eval('(' + f + ')');
+for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
+%OptimizeFunctionOnNextCall(eval_f);
+assertEquals(27, eval_f());
+
+function do_eval_local(str) {
+  "use strict";
+  var x = 42;
+  return eval(str);
+}
+
+eval_f = do_eval_local('(' + f + ')');
+for (var i = 0; i < 5; i++) assertEquals(42, eval_f());
+%OptimizeFunctionOnNextCall(eval_f);
+assertEquals(42, eval_f());
+
+function do_eval_with_other_eval_call(str) {
+  "use strict";
+  var f = eval(str);
+  eval('var x = 1');
+  return f;
+}
+
+eval_f = do_eval_with_other_eval_call('(' + f + ')');
+for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
+%OptimizeFunctionOnNextCall(eval_f);
+assertEquals(27, eval_f());
+
+function test_non_strict_outer_eval() {
+  function strict_eval(str) { "use strict"; return eval(str); }
+  var eval_f = strict_eval('(' + f + ')');
+  for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
+  %OptimizeFunctionOnNextCall(eval_f);
+  assertEquals(27, eval_f());
+  eval("var x = 3");
+  assertEquals(3, eval_f());
+}
+
+test_non_strict_outer_eval();
+
+function test_strict_outer_eval() {
+  "use strict";
+  function strict_eval(str) { "use strict"; return eval(str); }
+  var eval_f = strict_eval('(' + f + ')');
+  for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
+  %OptimizeFunctionOnNextCall(eval_f);
+  assertEquals(27, eval_f());
+  eval("var x = 3");
+  assertEquals(27, eval_f());
+}
+
+test_non_strict_outer_eval();
diff --git a/test/mjsunit/compiler/global-accessors.js b/test/mjsunit/compiler/global-accessors.js
index bd031a8..337424d 100644
--- a/test/mjsunit/compiler/global-accessors.js
+++ b/test/mjsunit/compiler/global-accessors.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // This test tests that no bailouts are missing by not hitting asserts in debug
-// mode. 
+// mode.
 
 test_count_operation()
 test_compound_assignment()
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/compiler/inline-arguments.js
similarity index 85%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/compiler/inline-arguments.js
index aa93b25..532fc26 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/compiler/inline-arguments.js
@@ -25,12 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Test inlining functions that use arguments.
+function f() { return g(1, 2, 3); }
 
-test();
+function g(x, y, z) { return %_ArgumentsLength(); }
+
+for (var i = 0; i < 5; ++i) f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(3, f());
diff --git a/src/mips/virtual-frame-mips-inl.h b/test/mjsunit/compiler/inline-throw.js
similarity index 71%
rename from src/mips/virtual-frame-mips-inl.h
rename to test/mjsunit/compiler/inline-throw.js
index f0d2fab..e3aab39 100644
--- a/src/mips/virtual-frame-mips-inl.h
+++ b/test/mjsunit/compiler/inline-throw.js
@@ -25,34 +25,45 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
+// Flags: --allow-natives-syntax
 
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-MemOperand VirtualFrame::ParameterAt(int index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+// Test inlined functions contain throw.
+function doThrow() {
+  throw "uha";
 }
 
-
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+function f(x) {
+  if (x == 42) throw doThrow();
+  if (x == 43) throw "wow";
+  return x == 0;
 }
 
-
-void VirtualFrame::Forget(int count) {
-  UNIMPLEMENTED_MIPS();
+function g(x) {
+  return f(x);
 }
 
+for (var i = 0; i < 5; i++) g(0);
+%OptimizeFunctionOnNextCall(g);
+assertEquals(true, g(0));
 
-} }  // namespace v8::internal
+try {
+  g(42);
+} catch(e) {
+  assertEquals("uha", e);
+}
 
-#endif  // V8_VIRTUAL_FRAME_MIPS_INL_H_
+// Test inlining in a test context.
+function h(x) {
+  return f(x) ? "yes" : "no";
+}
+
+for (var i = 0; i < 5; i++) h(0);
+%OptimizeFunctionOnNextCall(h);
+assertEquals("yes", h(0));
+
+try {
+  h(43);
+} catch(e) {
+  assertEquals("wow", e);
+}
+
diff --git a/test/mjsunit/compiler/logical-and.js b/test/mjsunit/compiler/logical-and.js
index 1d31a0a..783edb6 100644
--- a/test/mjsunit/compiler/logical-and.js
+++ b/test/mjsunit/compiler/logical-and.js
@@ -46,8 +46,8 @@
 assertFalse(AndBB(0, 1));
 assertFalse(AndBB(1, 1));
 
-assertFalse(AndBN(0, 0));
-assertTrue(AndBN(0, 1));
+assertEquals(0, AndBN(0, 0));
+assertEquals(1, AndBN(0, 1));
 assertFalse(AndBN(1, 0));
 assertEquals(1, AndBN(0, 1));
 assertEquals(2, AndBN(0, 2));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/compiler/regress-106351.js
similarity index 85%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/compiler/regress-106351.js
index aa93b25..2a67a05 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/compiler/regress-106351.js
@@ -25,12 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test Math.round with the input reused in the same expression.
+function test(x) {
+  var v = Math.round(x) - x;
+  assertEquals(0.5, v);
 }
 
-test();
+for (var i = 0; i < 5; ++i) test(0.5);
+%OptimizeFunctionOnNextCall(test);
+test(0.5);
diff --git a/test/mjsunit/compiler/regress-1085.js b/test/mjsunit/compiler/regress-1085.js
index 5d787a4..cea587f 100644
--- a/test/mjsunit/compiler/regress-1085.js
+++ b/test/mjsunit/compiler/regress-1085.js
@@ -25,11 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
 
 // Test correct checks for negative zero.
 // This test relies on specific type feedback for Math.min.
 function f(x) { return 1 / Math.min(1, x); }
 
-for (var i=0; i<1000000; i++) f(1);
+for (var i = 0; i < 5; ++i) f(1);
+%OptimizeFunctionOnNextCall(f);
+%OptimizeFunctionOnNextCall(Math.min);
 
 assertEquals(-Infinity, f(-0));
diff --git a/test/mjsunit/compiler/regress-closures-with-eval.js b/test/mjsunit/compiler/regress-closures-with-eval.js
index 507d74f..57afb16 100644
--- a/test/mjsunit/compiler/regress-closures-with-eval.js
+++ b/test/mjsunit/compiler/regress-closures-with-eval.js
@@ -25,12 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Verifies that closures in presence of eval work fine.
 function withEval(expr, filter) {
   function walk(v) {
     for (var i in v) {
       for (var i in v) {}
     }
+    %OptimizeFunctionOnNextCall(filter);
     return filter(v);
   }
 
@@ -46,6 +49,8 @@
 
 var expr = '([' + makeTagInfoJSON(128).join(', ') + '])'
 
-for (var n = 0; n < 300; n++) {
+for (var n = 0; n < 5; n++) {
   withEval(expr, function(a) { return a; });
 }
+%OptimizeFunctionOnNextCall(withEval);
+withEval(expr, function(a) { return a; });
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/compiler/regress-const.js
similarity index 72%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/compiler/regress-const.js
index aa93b25..aa55d0f 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/compiler/regress-const.js
@@ -25,12 +25,44 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test const initialization and assignments.
+function f() {
+  var x = 42;
+  while (true) {
+    const y = x;
+    if (--x == 0) return y;
+  }
 }
 
-test();
+function g() {
+  const x = 42;
+  x += 1;
+  return x;
+}
+
+for (var i = 0; i < 5; i++) {
+  f();
+  g();
+}
+
+%OptimizeFunctionOnNextCall(f);
+%OptimizeFunctionOnNextCall(g);
+
+assertEquals(42, f());
+assertEquals(42, g());
+
+
+function h(a, b) {
+  var r = a + b;
+  const X = 42;
+  return r + X;
+}
+
+for (var i = 0; i < 5; i++) h(1,2);
+
+%OptimizeFunctionOnNextCall(h);
+
+assertEquals(45, h(1,2));
+assertEquals("foo742", h("foo", 7));
diff --git a/test/mjsunit/compiler/regress-funcaller.js b/test/mjsunit/compiler/regress-funcaller.js
index 88db147..5c2a597 100644
--- a/test/mjsunit/compiler/regress-funcaller.js
+++ b/test/mjsunit/compiler/regress-funcaller.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Test function.caller.
 function A() {}
 
@@ -40,9 +42,10 @@
 
 var o = new A();
 
-for (var i=0; i<5000000; i++) {
+for (var i=0; i<5; i++) {
   o.g(i);
 }
+%OptimizeFunctionOnNextCall(o.g);
 assertEquals(gee, o.g(0));
 assertEquals(null, o.g(1));
 
@@ -53,9 +56,10 @@
   return o.g(x);
 }
 
-for (var j=0; j<5000000; j++) {
+for (var j=0; j<5; j++) {
   hej(j);
 }
+%OptimizeFunctionOnNextCall(hej);
 assertEquals(gee, hej(0));
 assertEquals(hej, hej(1));
 
@@ -66,8 +70,9 @@
   return o.g(x);
 }
 
-for (var j=0; j<5000000; j++) {
+for (var j=0; j<5; j++) {
   from_eval(j);
 }
+%OptimizeFunctionOnNextCall(from_eval);
 assertEquals(gee, from_eval(0));
 assertEquals(from_eval, from_eval(1));
diff --git a/test/mjsunit/compiler/regress-intoverflow.js b/test/mjsunit/compiler/regress-intoverflow.js
index d3842f1..063a376 100644
--- a/test/mjsunit/compiler/regress-intoverflow.js
+++ b/test/mjsunit/compiler/regress-intoverflow.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Test overflow checks in optimized code.
 function testMul(a, b) {
   a *= 2;
@@ -34,7 +36,8 @@
   }
 }
 
-for (var i=0; i<1000000; i++) testMul(0,0);
+for (var i=0; i<5; i++) testMul(0,0);
+%OptimizeFunctionOnNextCall(testMul);
 assertEquals(4611686018427388000, testMul(-0x40000000, -0x40000000));
 
 function testAdd(a, b) {
@@ -45,7 +48,8 @@
   }
 }
 
-for (var i=0; i<1000000; i++) testAdd(0,0);
+for (var i=0; i<5; i++) testAdd(0,0);
+%OptimizeFunctionOnNextCall(testAdd);
 assertEquals(-4294967296, testAdd(-0x40000000, -0x40000000));
 
 
@@ -58,5 +62,6 @@
   }
 }
 
-for (var i=0; i<1000000; i++) testSub(0,0);
+for (var i=0; i<5; i++) testSub(0,0);
+%OptimizeFunctionOnNextCall(testSub);
 assertEquals(-2147483650, testSub(-0x40000000, 1));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/compiler/regress-lazy-deopt.js
similarity index 80%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/compiler/regress-lazy-deopt.js
index aa93b25..d1c3d01 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/compiler/regress-lazy-deopt.js
@@ -25,12 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test lazy deoptimization after CallFunctionStub.
+
+function foo() { return 1; }
+
+function f(x, y) {
+  var a = [0];
+  if (x == 0) {
+    %DeoptimizeFunction(f);
+    return 1;
+  }
+  a[0] = %_CallFunction(null, x - 1, f);
+  return x >> a[0];
 }
 
-test();
+f(42);
+f(42);
+assertEquals(42, f(42));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(42, f(42));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/compiler/regress-max-locals-for-osr.js
similarity index 75%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/compiler/regress-max-locals-for-osr.js
index aa93b25..cc150ed 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/compiler/regress-max-locals-for-osr.js
@@ -25,12 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var limit = %RunningInSimulator() ? 10000 : 10000000;
+
+function f() {
+  var a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+      a11, a12, a13, a14, a15, a16, a17, a18, a19, a20,
+      a21, a22, a23, a24, a25, a26, a27, a28, a29, a30,
+      a31, a32, a33, a34, a35, a36, a37, a38, a39, a40,
+      a41, a42, a43, a44, a45, a46, a47, a48, a49, a50,
+      a51, a52, a53, a54, a55, a56, a57, a58, a59, a60,
+      a61, a62, a63, a64;
+  for (a1 = 0; a1 < limit; a1++) a2 = 23;
 }
 
-test();
+f();
diff --git a/test/mjsunit/const-redecl.js b/test/mjsunit/const-redecl.js
index 26d765b..9459708 100644
--- a/test/mjsunit/const-redecl.js
+++ b/test/mjsunit/const-redecl.js
@@ -55,7 +55,7 @@
 function TestGlobal(s,e) {
   // Collect the global properties before the call.
   var properties = [];
-  for (var key in this) properties.push(key); 
+  for (var key in this) properties.push(key);
   // Compute the result.
   var result;
   try {
@@ -113,7 +113,7 @@
   // Eval second definition.
   TestAll("TypeError", def0 + '; eval("' + def1 + '")');
   // Eval both definitions separately.
-  TestAll("TypeError", 'eval("' + def0 +'"); eval("' + def1 + '")');  
+  TestAll("TypeError", 'eval("' + def0 +'"); eval("' + def1 + '")');
 }
 
 
diff --git a/test/mjsunit/const.js b/test/mjsunit/const.js
index a48e82d..adb0b7a 100644
--- a/test/mjsunit/const.js
+++ b/test/mjsunit/const.js
@@ -50,20 +50,20 @@
 var valueOfCount = 0;
 
 function g() {
-  const o = { valueOf: function() { valueOfCount++; return 42; } }
-  assertEquals(42, o);
+  const o = { valueOf: function() { valueOfCount++; return 42; } };
+  assertEquals(42, +o);
   assertEquals(1, valueOfCount);
   o++;
-  assertEquals(42, o);
+  assertEquals(42, +o);
   assertEquals(3, valueOfCount);
   ++o;
-  assertEquals(42, o);
+  assertEquals(42, +o);
   assertEquals(5, valueOfCount);
   o--;
-  assertEquals(42, o);
+  assertEquals(42, +o);
   assertEquals(7, valueOfCount);
   --o;
-  assertEquals(42, o);
+  assertEquals(42, +o);
   assertEquals(9, valueOfCount);
 }
 
diff --git a/test/mjsunit/cyrillic.js b/test/mjsunit/cyrillic.js
index c5712e6..9b21c4f 100644
--- a/test/mjsunit/cyrillic.js
+++ b/test/mjsunit/cyrillic.js
@@ -187,9 +187,9 @@
       var ignore_case = (j == 0);
       var flag = ignore_case ? "i" : "";
       var re = new RegExp(mixed, flag);
-      assertEquals(ignore_case || (full && add_non_ascii_character_to_subject),
-                   re.test("A" + suffix),
-                   58 + flag + f);
+      var expected =
+          ignore_case || (full && !!add_non_ascii_character_to_subject);
+      assertEquals(expected, re.test("A" + suffix), 58 + flag + f);
       assertTrue(re.test("a" + suffix), 59 + flag + f);
       assertTrue(re.test("~" + suffix), 60 + flag + f);
       assertTrue(re.test(cyrillic.MIDDLE), 61 + flag + f);
diff --git a/test/mjsunit/d8-os.js b/test/mjsunit/d8-os.js
index 630a39e..5640326 100644
--- a/test/mjsunit/d8-os.js
+++ b/test/mjsunit/d8-os.js
@@ -30,6 +30,9 @@
 // implemented on Windows, and even if it were then many of the things
 // we are calling would not be available.
 
+var TEST_DIR = "/tmp/d8-os-test-directory-" + ((Math.random() * (1<<30)) | 0);
+
+
 function arg_error(str) {
   try {
     eval(str);
@@ -53,96 +56,97 @@
 if (this.os && os.system) {
   try {
     // Delete the dir if it is lying around from last time.
-    os.system("ls", ["d8-os-test-directory"]);
-    os.system("rm", ["-r", "d8-os-test-directory"]);
+    os.system("ls", [TEST_DIR]);
+    os.system("rm", ["-r", TEST_DIR]);
   } catch (e) {
   }
-  os.mkdirp("d8-os-test-directory");
-  os.chdir("d8-os-test-directory");
-  // Check the chdir worked.
-  os.system('ls', ['../d8-os-test-directory']);
-  // Simple create dir.
-  os.mkdirp("dir");
-  // Create dir in dir.
-  os.mkdirp("dir/foo");
-  // Check that they are there.
-  os.system('ls', ['dir/foo']);
-  // Check that we can detect when something is not there.
-  assertThrows("os.system('ls', ['dir/bar']);", "dir not there");
-  // Check that mkdirp makes intermediate directories.
-  os.mkdirp("dir2/foo");
-  os.system("ls", ["dir2/foo"]);
-  // Check that mkdirp doesn't mind if the dir is already there.
-  os.mkdirp("dir2/foo");
-  os.mkdirp("dir2/foo/");
-  // Check that mkdirp can cope with trailing /
-  os.mkdirp("dir3/");
-  os.system("ls", ["dir3"]);
-  // Check that we get an error if the name is taken by a file.
-  os.system("sh", ["-c", "echo foo > file1"]);
-  os.system("ls", ["file1"]);
-  assertThrows("os.mkdirp('file1');", "mkdir over file1");
-  assertThrows("os.mkdirp('file1/foo');", "mkdir over file2");
-  assertThrows("os.mkdirp('file1/');", "mkdir over file3");
-  assertThrows("os.mkdirp('file1/foo/');", "mkdir over file4");
-  // Create a dir we cannot read.
-  os.mkdirp("dir4", 0);
-  // This test fails if you are root since root can read any dir.
-  assertThrows("os.chdir('dir4');", "chdir dir4 I");
-  os.rmdir("dir4");
-  assertThrows("os.chdir('dir4');", "chdir dir4 II");
-  // Set umask.
-  var old_umask = os.umask(0777);
-  // Create a dir we cannot read.
-  os.mkdirp("dir5");
-  // This test fails if you are root since root can read any dir.
-  assertThrows("os.chdir('dir5');", "cd dir5 I");
-  os.rmdir("dir5");
-  assertThrows("os.chdir('dir5');", "chdir dir5 II");
-  os.umask(old_umask);
-
-  os.mkdirp("hest/fisk/../fisk/ged");
-  os.system("ls", ["hest/fisk/ged"]);
-
-  os.setenv("FOO", "bar");
-  var environment = os.system("printenv");
-  assertTrue(/FOO=bar/.test(environment));
-
-  // Check we time out.
-  var have_sleep = true;
-  var have_echo = true;
+  os.mkdirp(TEST_DIR);
+  os.chdir(TEST_DIR);
   try {
-    os.system("ls", ["/bin/sleep"]);
-  } catch (e) {
-    have_sleep = false;
-  }
-  try {
-    os.system("ls", ["/bin/echo"]);
-  } catch (e) {
-    have_echo = false;
-  }
-  if (have_sleep) {
-    assertThrows("os.system('sleep', ['2000'], 200);", "sleep 1");
+    // Check the chdir worked.
+    os.system('ls', [TEST_DIR]);
+    // Simple create dir.
+    os.mkdirp("dir");
+    // Create dir in dir.
+    os.mkdirp("dir/foo");
+    // Check that they are there.
+    os.system('ls', ['dir/foo']);
+    // Check that we can detect when something is not there.
+    assertThrows("os.system('ls', ['dir/bar']);", "dir not there");
+    // Check that mkdirp makes intermediate directories.
+    os.mkdirp("dir2/foo");
+    os.system("ls", ["dir2/foo"]);
+    // Check that mkdirp doesn't mind if the dir is already there.
+    os.mkdirp("dir2/foo");
+    os.mkdirp("dir2/foo/");
+    // Check that mkdirp can cope with trailing /
+    os.mkdirp("dir3/");
+    os.system("ls", ["dir3"]);
+    // Check that we get an error if the name is taken by a file.
+    os.system("sh", ["-c", "echo foo > file1"]);
+    os.system("ls", ["file1"]);
+    assertThrows("os.mkdirp('file1');", "mkdir over file1");
+    assertThrows("os.mkdirp('file1/foo');", "mkdir over file2");
+    assertThrows("os.mkdirp('file1/');", "mkdir over file3");
+    assertThrows("os.mkdirp('file1/foo/');", "mkdir over file4");
+    // Create a dir we cannot read.
+    os.mkdirp("dir4", 0);
+    // This test fails if you are root since root can read any dir.
+    assertThrows("os.chdir('dir4');", "chdir dir4 I");
+    os.rmdir("dir4");
+    assertThrows("os.chdir('dir4');", "chdir dir4 II");
+    // Set umask.
+    var old_umask = os.umask(0777);
+    // Create a dir we cannot read.
+    os.mkdirp("dir5");
+    // This test fails if you are root since root can read any dir.
+    assertThrows("os.chdir('dir5');", "cd dir5 I");
+    os.rmdir("dir5");
+    assertThrows("os.chdir('dir5');", "chdir dir5 II");
+    os.umask(old_umask);
 
-    // Check we time out with total time.
-    assertThrows("os.system('sleep', ['2000'], -1, 200);", "sleep 2");
+    os.mkdirp("hest/fisk/../fisk/ged");
+    os.system("ls", ["hest/fisk/ged"]);
 
-    // Check that -1 means no timeout.
-    os.system('sleep', ['1'], -1, -1);
+    os.setenv("FOO", "bar");
+    var environment = os.system("printenv");
+    assertTrue(/FOO=bar/.test(environment));
 
-  }
+    // Check we time out.
+    var have_sleep = true;
+    var have_echo = true;
+    try {
+      os.system("ls", ["/bin/sleep"]);
+    } catch (e) {
+      have_sleep = false;
+    }
+    try {
+      os.system("ls", ["/bin/echo"]);
+    } catch (e) {
+      have_echo = false;
+    }
+    if (have_sleep) {
+      assertThrows("os.system('sleep', ['2000'], 200);", "sleep 1");
 
-  // Check that we don't fill up the process table with zombies.
-  // Disabled because it's too slow.
-  if (have_echo) {
-    //for (var i = 0; i < 65536; i++) {
+      // Check we time out with total time.
+      assertThrows("os.system('sleep', ['2000'], -1, 200);", "sleep 2");
+
+      // Check that -1 means no timeout.
+      os.system('sleep', ['1'], -1, -1);
+
+    }
+
+    // Check that we don't fill up the process table with zombies.
+    // Disabled because it's too slow.
+    if (have_echo) {
+      //for (var i = 0; i < 65536; i++) {
       assertEquals("baz\n", os.system("echo", ["baz"]));
-    //}
+      //}
+    }
+  } finally {
+    os.system("rm", ["-r", TEST_DIR]);
   }
 
-  os.chdir("..");
-  os.system("rm", ["-r", "d8-os-test-directory"]);
-
   // Too few args.
   arg_error("os.umask();");
   arg_error("os.system();");
diff --git a/test/mjsunit/date-parse.js b/test/mjsunit/date-parse.js
index 23a6993..b46e39a 100644
--- a/test/mjsunit/date-parse.js
+++ b/test/mjsunit/date-parse.js
@@ -285,9 +285,9 @@
 
 // Negative tests.
 var testCasesNegative = [
-    'May 25 2008 1:30 (PM)) UTC',
-    'May 25 2008 1:30( )AM (PM)',
-    'May 25 2008 AAA (GMT)'];
+    'May 25 2008 1:30 (PM)) UTC',  // Bad unmatched ')' after number.
+    'May 25 2008 1:30( )AM (PM)',  //
+    'May 25 2008 AAA (GMT)'];      // Unknown word after number.
 
 testCasesNegative.forEach(function (s) {
     assertTrue(isNaN(Date.parse(s)), s + " is not NaN.");
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index f13af82..a7f6cfa 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -187,3 +187,123 @@
 assertTrue(isNaN(d.getTime()));
 d = new Date(1969, 12, 1, -Infinity);
 assertTrue(isNaN(d.getTime()));
+
+// Parsing ES5 ISO-8601 dates.
+// When TZ is omitted, it defaults to 'Z' meaning UTC.
+
+// Check epoch.
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00"));
+assertEquals(0, Date.parse("1970-01-01"));
+
+assertEquals(0, Date.parse("1970-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000"));
+assertEquals(0, Date.parse("1970-01T00:00:00"));
+assertEquals(0, Date.parse("1970-01T00:00"));
+assertEquals(0, Date.parse("1970-01"));
+
+assertEquals(0, Date.parse("1970T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970T00:00:00.000"));
+assertEquals(0, Date.parse("1970T00:00:00"));
+assertEquals(0, Date.parse("1970T00:00"));
+assertEquals(0, Date.parse("1970"));
+
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00"));
+assertEquals(0, Date.parse("+001970-01-01"));
+
+assertEquals(0, Date.parse("+001970-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000"));
+assertEquals(0, Date.parse("+001970-01T00:00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00"));
+assertEquals(0, Date.parse("+001970-01"));
+
+assertEquals(0, Date.parse("+001970T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970T00:00:00.000"));
+assertEquals(0, Date.parse("+001970T00:00:00"));
+assertEquals(0, Date.parse("+001970T00:00"));
+assertEquals(0, Date.parse("+001970"));
+
+// Check random date.
+assertEquals(70671003500, Date.parse("1972-03-28T23:50:03.500+01:00"));
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500"));
+assertEquals(70674603000, Date.parse("1972-03-28T23:50:03"));
+assertEquals(70674600000, Date.parse("1972-03-28T23:50"));
+assertEquals(70588800000, Date.parse("1972-03-28"));
+
+assertEquals(68338203500, Date.parse("1972-03T23:50:03.500+01:00"));
+assertEquals(68341803500, Date.parse("1972-03T23:50:03.500Z"));
+assertEquals(68341803500, Date.parse("1972-03T23:50:03.500"));
+assertEquals(68341803000, Date.parse("1972-03T23:50:03"));
+assertEquals(68341800000, Date.parse("1972-03T23:50"));
+assertEquals(68256000000, Date.parse("1972-03"));
+
+assertEquals(63154203500, Date.parse("1972T23:50:03.500+01:00"));
+assertEquals(63157803500, Date.parse("1972T23:50:03.500Z"));
+assertEquals(63157803500, Date.parse("1972T23:50:03.500"));
+assertEquals(63157803000, Date.parse("1972T23:50:03"));
+assertEquals(63072000000, Date.parse("1972"));
+
+assertEquals(70671003500, Date.parse("+001972-03-28T23:50:03.500+01:00"));
+assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500Z"));
+assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500"));
+assertEquals(70674603000, Date.parse("+001972-03-28T23:50:03"));
+assertEquals(70674600000, Date.parse("+001972-03-28T23:50"));
+assertEquals(70588800000, Date.parse("+001972-03-28"));
+
+assertEquals(68338203500, Date.parse("+001972-03T23:50:03.500+01:00"));
+assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500Z"));
+assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500"));
+assertEquals(68341803000, Date.parse("+001972-03T23:50:03"));
+assertEquals(68341800000, Date.parse("+001972-03T23:50"));
+assertEquals(68256000000, Date.parse("+001972-03"));
+
+assertEquals(63154203500, Date.parse("+001972T23:50:03.500+01:00"));
+assertEquals(63157803500, Date.parse("+001972T23:50:03.500Z"));
+assertEquals(63157803500, Date.parse("+001972T23:50:03.500"));
+assertEquals(63157803000, Date.parse("+001972T23:50:03"));
+assertEquals(63072000000, Date.parse("+001972"));
+
+
+// Ensure that ISO-years in the range 00-99 aren't translated to the range
+// 1950..2049.
+assertEquals(-60904915200000, Date.parse("0040-01-01"));
+assertEquals(-60273763200000, Date.parse("0060-01-01"));
+assertEquals(-62167219200000, Date.parse("0000-01-01"));
+assertEquals(-62167219200000, Date.parse("+000000-01-01"));
+
+// Test negative years.
+assertEquals(-63429523200000, Date.parse("-000040-01-01"));
+assertEquals(-64060675200000, Date.parse("-000060-01-01"));
+assertEquals(-124397510400000, Date.parse("-001972-01-01"));
+
+// Check time-zones.
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
+for (var i = 0; i < 24; i++) {
+  var hh = (i < 10) ? "0" + i : "" + i;
+  for (var j = 0; j < 60; j += 15) {
+    var mm = (j < 10) ? "0" + j : "" + j;
+    var ms = (i * 60 + j) * 60000;
+    var string = "1972-03-28T23:50:03.500-" + hh + ":" + mm;
+    assertEquals(70674603500 + ms, Date.parse(string), string);
+    string = "1972-03-28T23:50:03.500+" + hh + ":" + mm;
+    assertEquals(70674603500 - ms, Date.parse(string), string);
+  }
+}
diff --git a/test/mjsunit/debug-backtrace.js b/test/mjsunit/debug-backtrace.js
index d15b2d2..3647913 100644
--- a/test/mjsunit/debug-backtrace.js
+++ b/test/mjsunit/debug-backtrace.js
@@ -195,7 +195,7 @@
       assertEquals("m", response.lookup(frame.func.ref).inferredName);
       assertFalse(frame.constructCall);
       assertEquals(35, frame.line);
-      assertEquals(2, frame.column);
+      assertEquals(6, frame.column);
       assertEquals(0, frame.arguments.length);
 
       json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":3}}'
@@ -269,4 +269,3 @@
 // Make sure that the debug event listener vas invoked.
 assertFalse(exception, "exception in listener");
 assertTrue(listenerCalled);
-
diff --git a/test/mjsunit/debug-compile-event.js b/test/mjsunit/debug-compile-event.js
index b00a907..94dddfa 100644
--- a/test/mjsunit/debug-compile-event.js
+++ b/test/mjsunit/debug-compile-event.js
@@ -81,7 +81,7 @@
       assertTrue('context' in msg.body.script);
 
       // Check that we pick script name from //@ sourceURL, iff present
-      assertEquals(current_source.indexOf('sourceURL') >= 0 ? 
+      assertEquals(current_source.indexOf('sourceURL') >= 0 ?
                      'myscript.js' : undefined,
                    event_data.script().name());
     }
diff --git a/test/mjsunit/debug-evaluate-locals-optimized-double.js b/test/mjsunit/debug-evaluate-locals-optimized-double.js
new file mode 100644
index 0000000..8447df5
--- /dev/null
+++ b/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -0,0 +1,185 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var listenerComplete = false;
+var exception = false;
+
+var testingConstructCall = false;
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break)
+    {
+      assertEquals(6, exec_state.frameCount());
+
+      for (var i = 0; i < exec_state.frameCount(); i++) {
+        var frame = exec_state.frame(i);
+        if (i < exec_state.frameCount() - 1) {
+          var expected_a = i * 2 + 1 + (i * 2 + 1) / 100;
+          var expected_b = i * 2 + 2 + (i * 2 + 2) / 100;
+          var expected_x = (i + 1) * 2 + 1 + ((i + 1) * 2 + 1) / 100;
+          var expected_y = (i + 1) * 2 + 2 + ((i + 1) * 2 + 2) / 100;
+
+          // All frames except the bottom one has normal variables a and b.
+          var a = ('a' === frame.localName(0)) ? 0 : 1;
+          var b = 1 - a;
+          assertEquals('a', frame.localName(a));
+          assertEquals('b', frame.localName(b));
+          assertEquals(expected_a, frame.localValue(a).value());
+          assertEquals(expected_b, frame.localValue(b).value());
+
+          // All frames except the bottom one has arguments variables x and y.
+          assertEquals('x', frame.argumentName(0));
+          assertEquals('y', frame.argumentName(1));
+          assertEquals(expected_x, frame.argumentValue(0).value());
+          assertEquals(expected_y, frame.argumentValue(1).value());
+
+          // All frames except the bottom one have two scopes.
+          assertEquals(2, frame.scopeCount());
+          assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
+          assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
+          assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
+          assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
+          assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
+
+          // Evaluate in the inlined frame.
+          assertEquals(expected_a, frame.evaluate('a').value());
+          assertEquals(expected_x, frame.evaluate('x').value());
+          assertEquals(expected_x, frame.evaluate('arguments[0]').value());
+          assertEquals(expected_a + expected_b + expected_x + expected_y,
+                       frame.evaluate('a + b + x + y').value());
+          assertEquals(expected_x + expected_y,
+                       frame.evaluate('arguments[0] + arguments[1]').value());
+        } else {
+          // The bottom frame only have the global scope.
+          assertEquals(1, frame.scopeCount());
+          assertEquals(debug.ScopeType.Global, frame.scope(0).scopeType());
+        }
+
+        // Check the frame function.
+        switch (i) {
+          case 0: assertEquals(h, frame.func().value()); break;
+          case 1: assertEquals(g3, frame.func().value()); break;
+          case 2: assertEquals(g2, frame.func().value()); break;
+          case 3: assertEquals(g1, frame.func().value()); break;
+          case 4: assertEquals(f, frame.func().value()); break;
+          case 5: break;
+          default: assertUnreachable();
+        }
+
+        // Check for construct call.
+        assertEquals(testingConstructCall && i == 4, frame.isConstructCall());
+
+        // When function f is optimized (1 means YES, see runtime.cc) we
+        // expect an optimized frame for f with g1, g2 and g3 inlined.
+        if (%GetOptimizationStatus(f) == 1) {
+          if (i == 1 || i == 2 || i == 3) {
+            assertTrue(frame.isOptimizedFrame());
+            assertTrue(frame.isInlinedFrame());
+            assertEquals(4 - i, frame.inlinedFrameIndex());
+          } else if (i == 4) {
+            assertTrue(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          } else {
+            assertFalse(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          }
+        }
+      }
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e
+  };
+};
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function h(x, y) {
+  var a = 1;
+  var b = 2;
+  a = a + a / 100;
+  b = b + b / 100;
+  debugger;  // Breakpoint.
+};
+
+function g3(x, y) {
+  var a = 3;
+  var b = 4;
+  a = a + a / 100;
+  b = b + b / 100;
+  h(a, b);
+  return a+b;
+};
+
+function g2(x, y) {
+  var a = 5;
+  var b = 6;
+  a = a + a / 100;
+  b = b + b / 100;
+  g3(a, b);
+};
+
+function g1(x, y) {
+  var a = 7;
+  var b = 8;
+  a = a + a / 100;
+  b = b + b / 100;
+  g2(a, b);
+};
+
+function f(x, y) {
+  var a = 9;
+  var b = 10;
+  a = a + a / 100;
+  b = b + b / 100;
+  g1(a, b);
+};
+
+// Test calling f normally and as a constructor.
+f(11.11, 12.12);
+testingConstructCall = true;
+new f(11.11, 12.12);
+
+// Make sure that the debug event listener vas invoked.
+assertFalse(exception, "exception in listener " + exception)
+assertTrue(listenerComplete);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-evaluate-locals-optimized.js b/test/mjsunit/debug-evaluate-locals-optimized.js
new file mode 100644
index 0000000..c3cd5eb
--- /dev/null
+++ b/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -0,0 +1,174 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var listenerComplete = false;
+var exception = false;
+
+var testingConstructCall = false;
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break)
+    {
+      assertEquals(6, exec_state.frameCount());
+
+      for (var i = 0; i < exec_state.frameCount(); i++) {
+        var frame = exec_state.frame(i);
+        if (i < exec_state.frameCount() - 1) {
+          var expected_a = i * 2 + 1;
+          var expected_b = i * 2 + 2;
+          var expected_x = (i + 1) * 2 + 1;
+          var expected_y = (i + 1) * 2 + 2;
+
+          // All frames except the bottom one has normal variables a and b.
+          var a = ('a' === frame.localName(0)) ? 0 : 1;
+          var b = 1 - a;
+          assertEquals('a', frame.localName(a));
+          assertEquals('b', frame.localName(b));
+          assertEquals(expected_a, frame.localValue(a).value());
+          assertEquals(expected_b, frame.localValue(b).value());
+
+          // All frames except the bottom one has arguments variables x and y.
+          assertEquals('x', frame.argumentName(0));
+          assertEquals('y', frame.argumentName(1));
+          assertEquals(expected_x, frame.argumentValue(0).value());
+          assertEquals(expected_y, frame.argumentValue(1).value());
+
+          // All frames except the bottom one have two scopes.
+          assertEquals(2, frame.scopeCount());
+          assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
+          assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
+          assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
+          assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
+          assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
+
+          // Evaluate in the inlined frame.
+          assertEquals(expected_a, frame.evaluate('a').value());
+          assertEquals(expected_x, frame.evaluate('x').value());
+          assertEquals(expected_x, frame.evaluate('arguments[0]').value());
+          assertEquals(expected_a + expected_b + expected_x + expected_y,
+                       frame.evaluate('a + b + x + y').value());
+          assertEquals(expected_x + expected_y,
+                       frame.evaluate('arguments[0] + arguments[1]').value());
+        } else {
+          // The bottom frame only have the global scope.
+          assertEquals(1, frame.scopeCount());
+          assertEquals(debug.ScopeType.Global, frame.scope(0).scopeType());
+        }
+
+        // Check the frame function.
+        switch (i) {
+          case 0: assertEquals(h, frame.func().value()); break;
+          case 1: assertEquals(g3, frame.func().value()); break;
+          case 2: assertEquals(g2, frame.func().value()); break;
+          case 3: assertEquals(g1, frame.func().value()); break;
+          case 4: assertEquals(f, frame.func().value()); break;
+          case 5: break;
+          default: assertUnreachable();
+        }
+
+        // Check for construct call.
+        assertEquals(testingConstructCall && i == 4, frame.isConstructCall());
+
+        // When function f is optimized (1 means YES, see runtime.cc) we
+        // expect an optimized frame for f with g1, g2 and g3 inlined.
+        if (%GetOptimizationStatus(f) == 1) {
+          if (i == 1 || i == 2 || i == 3) {
+            assertTrue(frame.isOptimizedFrame());
+            assertTrue(frame.isInlinedFrame());
+            assertEquals(4 - i, frame.inlinedFrameIndex());
+          } else if (i == 4) {
+            assertTrue(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          } else {
+            assertFalse(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          }
+        }
+      }
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e.stack;
+  };
+};
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function h(x, y) {
+  var a = 1;
+  var b = 2;
+  debugger;  // Breakpoint.
+};
+
+function g3(x, y) {
+  var a = 3;
+  var b = 4;
+  h(a, b);
+};
+
+function g2(x, y) {
+  var a = 5;
+  var b = 6;
+  g3(a, b);
+};
+
+function g1(x, y) {
+  var a = 7;
+  var b = 8;
+  g2(a, b);
+};
+
+function f(x, y) {
+  var a = 9;
+  var b = 10;
+  g1(a, b);
+};
+
+// Test calling f normally and as a constructor.
+f(11, 12);
+testingConstructCall = true;
+new f(11, 12);
+
+// Make sure that the debug event listener vas invoked.
+assertFalse(exception, "exception in listener " + exception)
+assertTrue(listenerComplete);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-evaluate-locals.js b/test/mjsunit/debug-evaluate-locals.js
index 4b87829..61b6dd9 100644
--- a/test/mjsunit/debug-evaluate-locals.js
+++ b/test/mjsunit/debug-evaluate-locals.js
@@ -33,33 +33,73 @@
 exception = false;
 
 
-function checkFrame0(name, value) {
-  assertTrue(name == 'a' || name == 'b');
-  if (name == 'a') {
-    assertEquals(1, value);
-  }
-  if (name == 'b') {
-    assertEquals(2, value);
+function h() {
+  var a = 1;
+  var b = 2;
+  debugger;  // Breakpoint.
+}
+
+function checkFrame0(frame) {
+  // Frame 0 (h) has normal variables a and b.
+  var count = frame.localCount();
+  assertEquals(2, count);
+  for (var i = 0; i < count; ++i) {
+    var name = frame.localName(i);
+    var value = frame.localValue(i).value();
+    if (name == 'a') {
+      assertEquals(1, value);
+    } else {
+      assertEquals('b', name);
+      assertEquals(2, value);
+    }
   }
 }
 
 
-function checkFrame1(name, value) {
-  assertTrue(name == '.arguments' || name == 'a');
-  if (name == 'a') {
-    assertEquals(3, value);
+function g() {
+  var a = 3;
+  eval("var b = 4;");
+  h();
+}
+
+function checkFrame1(frame) {
+  // Frame 1 (g) has normal variable a (and arguments).
+  var count = frame.localCount();
+  assertEquals(2, count);
+  for (var i = 0; i < count; ++i) {
+    var name = frame.localName(i);
+    var value = frame.localValue(i).value();
+    if (name == 'a') {
+      assertEquals(3, value);
+    } else {
+      assertEquals('arguments', name);
+    }
   }
 }
 
 
-function checkFrame2(name, value) {
-  assertTrue(name == '.arguments' || name == 'a' ||
-             name == 'arguments' || name == 'b');
-  if (name == 'a') {
-    assertEquals(5, value);
+function f() {
+  var a = 5;
+  var b = 0;
+  with ({b:6}) {
+    g();
   }
-  if (name == 'b') {
-    assertEquals(0, value);
+}
+
+function checkFrame2(frame) {
+  // Frame 2 (f) has normal variables a and b (and arguments).
+  var count = frame.localCount();
+  assertEquals(3, count);
+  for (var i = 0; i < count; ++i) {
+    var name = frame.localName(i);
+    var value = frame.localValue(i).value();
+    if (name == 'a') {
+      assertEquals(5, value);
+    } else if (name == 'b') {
+      assertEquals(0, value);
+    } else {
+      assertEquals('arguments', name);
+    }
   }
 }
 
@@ -68,23 +108,9 @@
   try {
     if (event == Debug.DebugEvent.Break)
     {
-      // Frame 0 has normal variables a and b.
-      var frame0 = exec_state.frame(0);
-      checkFrame0(frame0.localName(0), frame0.localValue(0).value());
-      checkFrame0(frame0.localName(1), frame0.localValue(1).value());
-
-      // Frame 1 has normal variable a (and the .arguments variable).
-      var frame1 = exec_state.frame(1);
-      checkFrame1(frame1.localName(0), frame1.localValue(0).value());
-      checkFrame1(frame1.localName(1), frame1.localValue(1).value());
-
-      // Frame 2 has normal variables a and b (and both the .arguments and
-      // arguments variable).
-      var frame2 = exec_state.frame(2);
-      checkFrame2(frame2.localName(0), frame2.localValue(0).value());
-      checkFrame2(frame2.localName(1), frame2.localValue(1).value());
-      checkFrame2(frame2.localName(2), frame2.localValue(2).value());
-      checkFrame2(frame2.localName(3), frame2.localValue(3).value());
+      checkFrame0(exec_state.frame(0));
+      checkFrame1(exec_state.frame(1));
+      checkFrame2(exec_state.frame(2));
 
       // Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
       assertEquals(1, exec_state.frame(0).evaluate('a').value());
@@ -105,26 +131,6 @@
 // Add the debug event listener.
 Debug.setListener(listener);
 
-function h() {
-  var a = 1;
-  var b = 2;
-  debugger;  // Breakpoint.
-};
-
-function g() {
-  var a = 3;
-  eval("var b = 4;");
-  h();
-};
-
-function f() {
-  var a = 5;
-  var b = 0;
-  with ({b:6}) {
-    g();
-  }
-};
-
 f();
 
 // Make sure that the debug event listener vas invoked.
diff --git a/test/mjsunit/debug-evaluate-recursive.js b/test/mjsunit/debug-evaluate-recursive.js
index 6ee391b..f34943e 100644
--- a/test/mjsunit/debug-evaluate-recursive.js
+++ b/test/mjsunit/debug-evaluate-recursive.js
@@ -110,7 +110,7 @@
     if (event == Debug.DebugEvent.Break)
     {
       break_count++;
-      
+
       // Call functions with break using the FrameMirror directly.
       if (break_count == 1) {
         // First break event evaluates with break enabled.
diff --git a/test/mjsunit/debug-evaluate-with.js b/test/mjsunit/debug-evaluate-with.js
index 9d95a9f..c19a707 100644
--- a/test/mjsunit/debug-evaluate-with.js
+++ b/test/mjsunit/debug-evaluate-with.js
@@ -42,13 +42,13 @@
         // Break point in first with block.
         assertEquals(2, exec_state.frame(0).evaluate('a').value());
         assertEquals(2, exec_state.frame(0).evaluate('b').value());
-      } else {
+      } else if (breakPointCount == 2) {
         // Break point in second with block.
         assertEquals(3, exec_state.frame(0).evaluate('a').value());
         assertEquals(1, exec_state.frame(0).evaluate('b').value());
-
-        // Indicate that all was processed.
-        listenerComplete = true;
+      } else if (breakPointCount == 3) {
+        // Break point in eval with block.
+        assertEquals('local', exec_state.frame(0).evaluate('foo').value());
       }
     }
   } catch (e) {
@@ -72,6 +72,10 @@
 };
 
 f();
+
+var foo = "global";
+eval("with({bar:'with'}) { (function g() { var foo = 'local'; debugger; })(); }");
+
 // Make sure that the debug event listener vas invoked.
-assertTrue(listenerComplete);
+assertEquals(3, breakPointCount);
 assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-evaluate.js b/test/mjsunit/debug-evaluate.js
index 182e2ac..accf656 100644
--- a/test/mjsunit/debug-evaluate.js
+++ b/test/mjsunit/debug-evaluate.js
@@ -91,24 +91,24 @@
       // parameter is passed.
       testRequest(
           dcp,
-          '{"expression":"this.longString","global":true,maxStringLength:-1}',
+          '{"expression":"this.longString","global":true,"maxStringLength":-1}',
           true,
           longString);
       testRequest(
           dcp,
-          '{"expression":"this.longString","global":true,maxStringLength:' +
+          '{"expression":"this.longString","global":true,"maxStringLength":' +
               longString.length + '}',
           true,
           longString);
       var truncatedStringSuffix = '... (length: ' + longString.length + ')';
       testRequest(
           dcp,
-          '{"expression":"this.longString","global":true,maxStringLength:0}',
+          '{"expression":"this.longString","global":true,"maxStringLength":0}',
           true,
           truncatedStringSuffix);
       testRequest(
           dcp,
-          '{"expression":"this.longString","global":true,maxStringLength:1}',
+          '{"expression":"this.longString","global":true,"maxStringLength":1}',
           true,
           longString.charAt(0) + truncatedStringSuffix);
       // Test that by default string is truncated to first 80 chars.
diff --git a/test/mjsunit/debug-handle.js b/test/mjsunit/debug-handle.js
index 98875ce..1582b9f 100644
--- a/test/mjsunit/debug-handle.js
+++ b/test/mjsunit/debug-handle.js
@@ -72,7 +72,7 @@
 
   // The base part of all lookup requests.
   var base_request = '"seq":0,"type":"request","command":"lookup"'
-  
+
   // Generate request with the supplied arguments.
   var request;
   if (arguments) {
@@ -214,7 +214,7 @@
                  'Handle not in the request: ' + handle);
       count++;
     }
-    assertEquals(count, obj.properties.length, 
+    assertEquals(count, obj.properties.length,
                  'Unexpected number of resolved objects');
 
 
diff --git a/test/mjsunit/debug-listbreakpoints.js b/test/mjsunit/debug-listbreakpoints.js
index de0114f..1d4755f 100644
--- a/test/mjsunit/debug-listbreakpoints.js
+++ b/test/mjsunit/debug-listbreakpoints.js
@@ -39,7 +39,7 @@
 // below.  The test checks for these line numbers.
 
 function g() { // line 40
-  var x = 5; 
+  var x = 5;
   var y = 6;
   var z = 7;
 };
diff --git a/test/mjsunit/debug-receiver.js b/test/mjsunit/debug-receiver.js
new file mode 100644
index 0000000..21cdde8
--- /dev/null
+++ b/test/mjsunit/debug-receiver.js
@@ -0,0 +1,126 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var test_name;
+var listener_delegate;
+var listener_called;
+var exception;
+var expected_receiver;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+// Debug event listener which delegates. Exceptions have to be
+// explictly caught here and checked later because exception in the
+// listener are not propagated to the surrounding JavaScript code.
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      break_count++;
+      listener_called = true;
+      listener_delegate(exec_state);
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a new test.
+function BeginTest(name) {
+  test_name = name;
+  listener_called = false;
+  exception = null;
+  begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+  assertTrue(listener_called, "listerner not called for " + test_name);
+  assertNull(exception, test_name);
+  end_test_count++;
+}
+
+
+// Check that the debugger correctly reflects that the receiver is not
+// converted to object for strict mode functions.
+function Strict() { "use strict"; debugger; }
+function TestStrict(receiver) {
+  expected_receiver = receiver;
+  Strict.call(receiver);
+}
+
+listener_delegate = function(exec_state) {
+  var receiver = exec_state.frame().receiver();
+  assertTrue(!receiver.isObject());
+  assertEquals(expected_receiver, receiver.value())
+}
+
+BeginTest("strict: undefined"); TestStrict(undefined); EndTest();
+BeginTest("strict: null"); TestStrict(null); EndTest();
+BeginTest("strict: 1"); TestStrict(1); EndTest();
+BeginTest("strict: 1.2"); TestStrict(1.2); EndTest();
+BeginTest("strict: 'asdf'"); TestStrict('asdf'); EndTest();
+BeginTest("strict: true"); TestStrict(true); EndTest();
+
+
+// Check that the debugger correctly reflects the object conversion of
+// the receiver for non-strict mode functions.
+function NonStrict() { debugger; }
+function TestNonStrict(receiver) {
+  // null and undefined should be transformed to the global object and
+  // primitives should be wrapped.
+  expected_receiver = (receiver == null) ? this : Object(receiver);
+  NonStrict.call(receiver);
+}
+
+listener_delegate = function(exec_state) {
+  var receiver = exec_state.frame().receiver();
+  assertTrue(receiver.isObject());
+  assertEquals(expected_receiver, receiver.value());
+}
+
+BeginTest("non-strict: undefined"); TestNonStrict(undefined); EndTest();
+BeginTest("non-strict: null"); TestNonStrict(null); EndTest();
+BeginTest("non-strict: 1"); TestNonStrict(1); EndTest();
+BeginTest("non-strict: 1.2"); TestNonStrict(1.2); EndTest();
+BeginTest("non-strict: 'asdf'"); TestNonStrict('asdf'); EndTest();
+BeginTest("non-strict: true"); TestNonStrict(true); EndTest();
+
+
+assertEquals(begin_test_count, break_count,
+             'one or more tests did not enter the debugger');
+assertEquals(begin_test_count, end_test_count,
+             'one or more tests did not have its result checked');
diff --git a/test/mjsunit/debug-references.js b/test/mjsunit/debug-references.js
index ab6c629..763e354 100644
--- a/test/mjsunit/debug-references.js
+++ b/test/mjsunit/debug-references.js
@@ -52,7 +52,7 @@
   } else {
     request = '{' + base_request + '}'
   }
-  
+
   // Process the request and check expectation.
   var response = safeEval(dcp.processDebugJSONRequest(request));
   if (success) {
@@ -88,7 +88,7 @@
     var response = safeEval(dcp.processDebugJSONRequest(evaluate_point));
     assertTrue(response.success, "Evaluation of Point failed");
     var handle = response.body.handle;
-    
+
     // Test some legal references requests.
     testRequest(dcp, '{"handle":' + handle + ',"type":"referencedBy"}', true);
     testRequest(dcp, '{"handle":' + handle + ',"type":"constructedBy"}',
diff --git a/test/mjsunit/debug-return-value.js b/test/mjsunit/debug-return-value.js
index 3982ea9..02d6a7c 100644
--- a/test/mjsunit/debug-return-value.js
+++ b/test/mjsunit/debug-return-value.js
@@ -103,12 +103,12 @@
         // Position at the end of the function.
         assertEquals(debugger_source_position + 50,
         exec_state.frame(0).sourcePosition());
-        
+
         // Just about to return from the function.
         assertTrue(exec_state.frame(0).isAtReturn())
         assertEquals(expected_return_value,
                      exec_state.frame(0).returnValue().value());
-        
+
         // Check the same using the JSON commands.
         var dcp = exec_state.debugCommandProcessor(false);
         var request = '{"seq":0,"type":"request","command":"backtrace"}';
@@ -118,7 +118,7 @@
         assertTrue(frames[0].atReturn);
         assertEquals(expected_return_value,
                      response.lookup(frames[0].returnValue.ref).value);
-        
+
         listener_complete = true;
       }
     }
@@ -132,7 +132,7 @@
 
 // Four steps from the debugger statement in this function will position us at
 // the function return.
-//             0         1         2         3         4         5 
+//             0         1         2         3         4         5
 //             0123456789012345678901234567890123456789012345678901
 
 function f(x) {debugger; if (x) { return 1; } else { return 2; } };
diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index 674f2da..1c23b0b 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -31,9 +31,9 @@
 
 
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
+Debug = debug.Debug;
 
-var name;
+var test_name;
 var listener_delegate;
 var listener_called;
 var exception;
@@ -48,7 +48,7 @@
     if (event == Debug.DebugEvent.Break) {
       break_count++;
       listener_called = true;
-      listener_delegate(exec_state)
+      listener_delegate(exec_state);
     }
   } catch (e) {
     exception = e;
@@ -59,7 +59,7 @@
 Debug.setListener(listener);
 
 
-// Initialize for a noew test.
+// Initialize for a new test.
 function BeginTest(name) {
   test_name = name;
   listener_delegate = null;
@@ -72,7 +72,7 @@
 // Check result of a test.
 function EndTest() {
   assertTrue(listener_called, "listerner not called for " + test_name);
-  assertNull(exception, test_name)
+  assertNull(exception, test_name);
   end_test_count++;
 }
 
@@ -87,7 +87,9 @@
 
     // Check the global object when hitting the global scope.
     if (scopes[i] == debug.ScopeType.Global) {
-      assertEquals(this, scope.scopeObject().value());
+      // Objects don't have same class (one is "global", other is "Object",
+      // so just check the properties directly.
+      assertPropertiesEqual(this, scope.scopeObject().value());
     }
   }
 
@@ -96,7 +98,7 @@
 
   // Send a scopes request and check the result.
   var json;
-  request_json = '{"seq":0,"type":"request","command":"scopes"}'
+  var request_json = '{"seq":0,"type":"request","command":"scopes"}';
   var response_json = dcp.processDebugJSONRequest(request_json);
   var response = JSON.parse(response_json);
   assertEquals(scopes.length, response.body.scopes.length);
@@ -121,7 +123,7 @@
 // Check that the content of the scope is as expected. For functions just check
 // that there is a function.
 function CheckScopeContent(content, number, exec_state) {
-  var scope = exec_state.frame().scope(number)
+  var scope = exec_state.frame().scope(number);
   var count = 0;
   for (var p in content) {
     var property_mirror = scope.scopeObject().property(p);
@@ -163,9 +165,9 @@
 
   // Send a scope request for information on a single scope and check the
   // result.
-  request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":'
+  var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
   request_json += scope.scopeIndex();
-  request_json += '}}'
+  request_json += '}}';
   var response_json = dcp.processDebugJSONRequest(request_json);
   var response = JSON.parse(response_json);
   assertEquals(scope.scopeType(), response.body.type);
@@ -195,8 +197,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
-}
-local_1()
+};
+local_1();
 EndTest();
 
 
@@ -211,8 +213,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 0, exec_state);
-}
-local_2(1)
+};
+local_2(1);
 EndTest();
 
 
@@ -228,8 +230,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,x:3}, 0, exec_state);
-}
-local_3(1)
+};
+local_3(1);
 EndTest();
 
 
@@ -246,8 +248,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
-}
-local_4(1, 2)
+};
+local_4(1, 2);
 EndTest();
 
 
@@ -263,8 +265,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
-}
-local_5()
+};
+local_5();
 EndTest();
 
 
@@ -280,8 +282,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({i:5}, 0, exec_state);
-}
-local_6()
+};
+local_6();
 EndTest();
 
 
@@ -301,8 +303,8 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
-}
-local_7(1, 2)
+};
+local_7(1, 2);
 EndTest();
 
 
@@ -320,8 +322,8 @@
                    debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
-}
-with_1()
+};
+with_1();
 EndTest();
 
 
@@ -343,8 +345,8 @@
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
   CheckScopeContent({}, 1, exec_state);
-}
-with_2()
+};
+with_2();
 EndTest();
 
 
@@ -362,8 +364,8 @@
                    debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2}, 0, exec_state);
-}
-with_3()
+};
+with_3();
 EndTest();
 
 
@@ -385,8 +387,8 @@
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:2,b:1}, 0, exec_state);
   CheckScopeContent({a:1,b:2}, 1, exec_state);
-}
-with_4()
+};
+with_4();
 EndTest();
 
 
@@ -411,8 +413,29 @@
   CheckScopeContent(with_object, 1, exec_state);
   assertEquals(exec_state.frame().scope(0).scopeObject(), exec_state.frame().scope(1).scopeObject());
   assertEquals(with_object, exec_state.frame().scope(1).scopeObject().value());
+};
+with_5();
+EndTest();
+
+
+// Nested with blocks using existing object in global code.
+BeginTest("With 6");
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent(with_object, 0, exec_state);
+  CheckScopeContent(with_object, 1, exec_state);
+  assertEquals(exec_state.frame().scope(0).scopeObject(), exec_state.frame().scope(1).scopeObject());
+  assertEquals(with_object, exec_state.frame().scope(1).scopeObject().value());
+};
+
+var with_object = {c:3,d:4};
+with(with_object) {
+  with(with_object) {
+    debugger;
+  }
 }
-with_5()
 EndTest();
 
 
@@ -433,8 +456,8 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
-}
-closure_1(1)()
+};
+closure_1(1)();
 EndTest();
 
 
@@ -458,8 +481,8 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,x:3}, 1, exec_state);
-}
-closure_2(1, 2)()
+};
+closure_2(1, 2)();
 EndTest();
 
 
@@ -484,8 +507,8 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
-}
-closure_3(1, 2)()
+};
+closure_3(1, 2)();
 EndTest();
 
 
@@ -513,8 +536,8 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
-}
-closure_4(1, 2)()
+};
+closure_4(1, 2)();
 EndTest();
 
 
@@ -541,8 +564,8 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
-}
-closure_5(1, 2)()
+};
+closure_5(1, 2)();
 EndTest();
 
 
@@ -559,7 +582,7 @@
       debugger;
       some_global = a;
       return f;
-    }
+    };
   }
   return f(a, b);
 }
@@ -571,8 +594,8 @@
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
   CheckScopeContent({f:function(){}}, 2, exec_state);
-}
-closure_6(1, 2)()
+};
+closure_6(1, 2)();
 EndTest();
 
 
@@ -593,7 +616,7 @@
       debugger;
       some_global = a;
       return f;
-    }
+    };
   }
   return f(a, b);
 }
@@ -606,8 +629,8 @@
   CheckScopeContent({}, 0, exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 1, exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 2, exec_state);
-}
-closure_7(1, 2)()
+};
+closure_7(1, 2)();
 EndTest();
 
 
@@ -623,7 +646,7 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
-}
+};
 closure_8();
 EndTest();
 
@@ -633,7 +656,7 @@
   eval("var y = 1;");
   eval("var z = 1;");
   (function inner(x) {
-    y++; 
+    y++;
     z++;
     debugger;
   })(2);
@@ -643,7 +666,7 @@
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
-}
+};
 closure_9();
 EndTest();
 
@@ -670,7 +693,7 @@
             return f;
           }
         }
-      }
+      };
     }
   }
   return f(a, b);
@@ -690,8 +713,8 @@
   CheckScopeContent({j:13}, 3, exec_state);
   CheckScopeContent({a:1,b:2,x:9,y:10,i:11,j:12}, 4, exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 5, exec_state);
-}
-the_full_monty(1, 2)()
+};
+the_full_monty(1, 2)();
 EndTest();
 
 
@@ -710,7 +733,7 @@
                    debug.ScopeType.Closure,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
-}
+};
 closure_in_with_1();
 EndTest();
 
@@ -735,7 +758,7 @@
   CheckScopeContent({x: 3}, 0, exec_state);
   CheckScopeContent({x: 2}, 1, exec_state);
   CheckScopeContent({x: 1}, 2, exec_state);
-}
+};
 closure_in_with_2();
 EndTest();
 
@@ -750,7 +773,7 @@
          debugger;
        }
      })(2);
-   }
+   };
 }
 
 function closure_in_with_3() {
@@ -769,11 +792,28 @@
 EndTest();
 
 
+BeginTest("Closure inside With 4");
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x: 2}, 0, exec_state);
+  CheckScopeContent({x: 1}, 1, exec_state);
+};
+
+with({x:1}) {
+  (function(x) {
+    debugger;
+  })(2);
+}
+EndTest();
+
+
 // Test global scope.
 BeginTest("Global");
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Global], exec_state);
-}
+};
 debugger;
 EndTest();
 
@@ -793,8 +833,8 @@
                    debug.ScopeType.Local,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
-}
-catch_block_1()
+};
+catch_block_1();
 EndTest();
 
 
@@ -817,8 +857,8 @@
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({n:10}, 0, exec_state);
   CheckScopeContent({e:'Exception'}, 1, exec_state);
-}
-catch_block_2()
+};
+catch_block_2();
 EndTest();
 
 
@@ -841,8 +881,8 @@
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
   CheckScopeContent({y:78}, 1, exec_state);
-}
-catch_block_3()
+};
+catch_block_3();
 EndTest();
 
 
@@ -868,10 +908,49 @@
   CheckScopeContent({n:10}, 0, exec_state);
   CheckScopeContent({e:'Exception'}, 1, exec_state);
   CheckScopeContent({y:98}, 2, exec_state);
-}
-catch_block_4()
+};
+catch_block_4();
 EndTest();
 
 
-assertEquals(begin_test_count, break_count, 'one or more tests did not enter the debugger');
-assertEquals(begin_test_count, end_test_count, 'one or more tests did not have its result checked');
+// Test catch in global scope.
+BeginTest("Catch block 5");
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Catch,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({e:'Exception'}, 0, exec_state);
+};
+
+try {
+  throw 'Exception';
+} catch (e) {
+  debugger;
+}
+
+EndTest();
+
+
+// Closure inside catch in global code.
+BeginTest("Catch block 6");
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Catch,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x: 2}, 0, exec_state);
+  CheckScopeContent({e:'Exception'}, 1, exec_state);
+};
+
+try {
+  throw 'Exception';
+} catch (e) {
+  (function(x) {
+    debugger;
+  })(2);
+}
+EndTest();
+
+
+assertEquals(begin_test_count, break_count,
+             'one or more tests did not enter the debugger');
+assertEquals(begin_test_count, end_test_count,
+             'one or more tests did not have its result checked');
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index 643dd8c..9767888 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -34,13 +34,19 @@
 
 // Count script types.
 var named_native_count = 0;
+var named_native_names = {};
 var extension_count = 0;
 var normal_count = 0;
 var scripts = Debug.scripts();
 for (i = 0; i < scripts.length; i++) {
   if (scripts[i].type == Debug.ScriptType.Native) {
     if (scripts[i].name) {
-      named_native_count++;
+      // TODO(1641): Remove check for equally named native scripts once the
+      // underlying issue is fixed.
+      if (!named_native_names[scripts[i].name]) {
+        named_native_names[scripts[i].name] = true;
+        named_native_count++;
+      }
     }
   } else if (scripts[i].type == Debug.ScriptType.Extension) {
     extension_count++;
diff --git a/test/mjsunit/debug-scripts-request.js b/test/mjsunit/debug-scripts-request.js
index 41bff0e..faa732e 100644
--- a/test/mjsunit/debug-scripts-request.js
+++ b/test/mjsunit/debug-scripts-request.js
@@ -71,7 +71,7 @@
     testArguments(dcp, '{"types":2}', true);
     testArguments(dcp, '{"types":4}', true);
     testArguments(dcp, '{"types":7}', true);
-    testArguments(dcp, '{"types":0xFF}', true);
+    testArguments(dcp, '{"types":255}', true);
 
     // Test request for all scripts.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-step-2.js b/test/mjsunit/debug-step-2.js
new file mode 100644
index 0000000..502b426
--- /dev/null
+++ b/test/mjsunit/debug-step-2.js
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var bp;
+var done = false;
+var step_count = 0;
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
+    step_count++;
+  }
+};
+
+// Set the global variables state to prpare the stepping test.
+function prepare_step_test() {
+  done = false;
+  step_count = 0;
+}
+
+// Test function to step through.
+function f() {
+  var i = 1;
+  var j = 2;
+  done = true;
+};
+
+prepare_step_test();
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+bp = Debug.setBreakPoint(f, 1);
+
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+Debug.clearBreakPoint(bp);
+
+// Set a breakpoint on the first var statement (line 1).
+bp = Debug.setBreakPoint(f, 1);
+
+// Step through the function ensuring that the var statements are hit as well.
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+
+// Clear the breakpoint and check that no stepping happens.
+Debug.clearBreakPoint(bp);
+prepare_step_test();
+f();
+assertEquals(0, step_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-stepin-call-function-stub.js b/test/mjsunit/debug-stepin-call-function-stub.js
index c5cf8fd..053b8bf 100644
--- a/test/mjsunit/debug-stepin-call-function-stub.js
+++ b/test/mjsunit/debug-stepin-call-function-stub.js
@@ -62,7 +62,7 @@
 Debug.setListener(listener);
 
 
-function g() { 
+function g() {
    return "s";  // expected line
 }
 
@@ -71,7 +71,7 @@
   var s = 1 +f(10);
 }
 
-function g2() { 
+function g2() {
    return "s2";  // expected line
 }
 
diff --git a/test/mjsunit/debug-stepin-constructor.js b/test/mjsunit/debug-stepin-constructor.js
index 6ee3347..5549814 100644
--- a/test/mjsunit/debug-stepin-constructor.js
+++ b/test/mjsunit/debug-stepin-constructor.js
@@ -38,7 +38,7 @@
     if (exec_state.frameCount() > 1) {
       exec_state.prepareStep(Debug.StepAction.StepIn);
     }
-    
+
     // Test that there is a script.
     assertTrue(typeof(event_data.func().script()) == 'object');
   }
diff --git a/test/mjsunit/delete-in-with.js b/test/mjsunit/delete-in-with.js
index 1efc18d..cbcfe99 100644
--- a/test/mjsunit/delete-in-with.js
+++ b/test/mjsunit/delete-in-with.js
@@ -29,6 +29,6 @@
 // objects from within 'with' statements.
 (function(){
   var tmp = { x: 12 };
-  with (tmp) { assertTrue(delete x); }  
+  with (tmp) { assertTrue(delete x); }
   assertFalse("x" in tmp);
 })();
diff --git a/test/mjsunit/div-mod.js b/test/mjsunit/div-mod.js
index 3e343de..c314495 100644
--- a/test/mjsunit/div-mod.js
+++ b/test/mjsunit/div-mod.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Test fast div and mod.
 
 function divmod(div_func, mod_func, x, y) {
@@ -190,3 +192,113 @@
 }
 
 negative_zero_modulus_test();
+
+
+function lithium_integer_mod() {
+  var left_operands = [
+    0,
+    305419896,  // 0x12345678
+  ];
+
+  // Test the standard lithium code for modulo opeartions.
+  var mod_func;
+  for (var i = 0; i < left_operands.length; i++) {
+    for (var j = 0; j < divisors.length; j++) {
+      mod_func = this.eval("(function(left) { return left % " + divisors[j]+ "; })");
+      assertEquals((mod_func)(left_operands[i]), left_operands[i] % divisors[j]);
+      assertEquals((mod_func)(-left_operands[i]), -left_operands[i] % divisors[j]);
+    }
+  }
+
+  var results_powers_of_two = [
+    // 0
+    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+    // 305419896 == 0x12345678
+    [0, 0, 0, 8, 24, 56, 120, 120, 120, 632, 1656, 1656, 5752, 5752, 22136, 22136, 22136, 22136, 284280, 284280, 1332856, 3430008, 3430008, 3430008, 3430008, 36984440, 36984440, 36984440, 305419896, 305419896, 305419896],
+  ];
+
+  // Test the lithium code for modulo operations with a variable power of two
+  // right hand side operand.
+  for (var i = 0; i < left_operands.length; i++) {
+    for (var j = 0; j < 31; j++) {
+      assertEquals(results_powers_of_two[i][j], left_operands[i] % (2 << j));
+      assertEquals(results_powers_of_two[i][j], left_operands[i] % -(2 << j));
+      assertEquals(-results_powers_of_two[i][j], -left_operands[i] % (2 << j));
+      assertEquals(-results_powers_of_two[i][j], -left_operands[i] % -(2 << j));
+    }
+  }
+
+  // Test the lithium code for modulo operations with a constant power of two
+  // right hand side operand.
+  for (var i = 0; i < left_operands.length; i++) {
+    // With positive left hand side operand.
+    assertEquals(results_powers_of_two[i][0], left_operands[i] % -(2 << 0));
+    assertEquals(results_powers_of_two[i][1], left_operands[i] % (2 << 1));
+    assertEquals(results_powers_of_two[i][2], left_operands[i] % -(2 << 2));
+    assertEquals(results_powers_of_two[i][3], left_operands[i] % (2 << 3));
+    assertEquals(results_powers_of_two[i][4], left_operands[i] % -(2 << 4));
+    assertEquals(results_powers_of_two[i][5], left_operands[i] % (2 << 5));
+    assertEquals(results_powers_of_two[i][6], left_operands[i] % -(2 << 6));
+    assertEquals(results_powers_of_two[i][7], left_operands[i] % (2 << 7));
+    assertEquals(results_powers_of_two[i][8], left_operands[i] % -(2 << 8));
+    assertEquals(results_powers_of_two[i][9], left_operands[i] % (2 << 9));
+    assertEquals(results_powers_of_two[i][10], left_operands[i] % -(2 << 10));
+    assertEquals(results_powers_of_two[i][11], left_operands[i] % (2 << 11));
+    assertEquals(results_powers_of_two[i][12], left_operands[i] % -(2 << 12));
+    assertEquals(results_powers_of_two[i][13], left_operands[i] % (2 << 13));
+    assertEquals(results_powers_of_two[i][14], left_operands[i] % -(2 << 14));
+    assertEquals(results_powers_of_two[i][15], left_operands[i] % (2 << 15));
+    assertEquals(results_powers_of_two[i][16], left_operands[i] % -(2 << 16));
+    assertEquals(results_powers_of_two[i][17], left_operands[i] % (2 << 17));
+    assertEquals(results_powers_of_two[i][18], left_operands[i] % -(2 << 18));
+    assertEquals(results_powers_of_two[i][19], left_operands[i] % (2 << 19));
+    assertEquals(results_powers_of_two[i][20], left_operands[i] % -(2 << 20));
+    assertEquals(results_powers_of_two[i][21], left_operands[i] % (2 << 21));
+    assertEquals(results_powers_of_two[i][22], left_operands[i] % -(2 << 22));
+    assertEquals(results_powers_of_two[i][23], left_operands[i] % (2 << 23));
+    assertEquals(results_powers_of_two[i][24], left_operands[i] % -(2 << 24));
+    assertEquals(results_powers_of_two[i][25], left_operands[i] % (2 << 25));
+    assertEquals(results_powers_of_two[i][26], left_operands[i] % -(2 << 26));
+    assertEquals(results_powers_of_two[i][27], left_operands[i] % (2 << 27));
+    assertEquals(results_powers_of_two[i][28], left_operands[i] % -(2 << 28));
+    assertEquals(results_powers_of_two[i][29], left_operands[i] % (2 << 29));
+    assertEquals(results_powers_of_two[i][30], left_operands[i] % -(2 << 30));
+    // With negative left hand side operand.
+    assertEquals(-results_powers_of_two[i][0], -left_operands[i] % -(2 << 0));
+    assertEquals(-results_powers_of_two[i][1], -left_operands[i] % (2 << 1));
+    assertEquals(-results_powers_of_two[i][2], -left_operands[i] % -(2 << 2));
+    assertEquals(-results_powers_of_two[i][3], -left_operands[i] % (2 << 3));
+    assertEquals(-results_powers_of_two[i][4], -left_operands[i] % -(2 << 4));
+    assertEquals(-results_powers_of_two[i][5], -left_operands[i] % (2 << 5));
+    assertEquals(-results_powers_of_two[i][6], -left_operands[i] % -(2 << 6));
+    assertEquals(-results_powers_of_two[i][7], -left_operands[i] % (2 << 7));
+    assertEquals(-results_powers_of_two[i][8], -left_operands[i] % -(2 << 8));
+    assertEquals(-results_powers_of_two[i][9], -left_operands[i] % (2 << 9));
+    assertEquals(-results_powers_of_two[i][10], -left_operands[i] % -(2 << 10));
+    assertEquals(-results_powers_of_two[i][11], -left_operands[i] % (2 << 11));
+    assertEquals(-results_powers_of_two[i][12], -left_operands[i] % -(2 << 12));
+    assertEquals(-results_powers_of_two[i][13], -left_operands[i] % (2 << 13));
+    assertEquals(-results_powers_of_two[i][14], -left_operands[i] % -(2 << 14));
+    assertEquals(-results_powers_of_two[i][15], -left_operands[i] % (2 << 15));
+    assertEquals(-results_powers_of_two[i][16], -left_operands[i] % -(2 << 16));
+    assertEquals(-results_powers_of_two[i][17], -left_operands[i] % (2 << 17));
+    assertEquals(-results_powers_of_two[i][18], -left_operands[i] % -(2 << 18));
+    assertEquals(-results_powers_of_two[i][19], -left_operands[i] % (2 << 19));
+    assertEquals(-results_powers_of_two[i][20], -left_operands[i] % -(2 << 20));
+    assertEquals(-results_powers_of_two[i][21], -left_operands[i] % (2 << 21));
+    assertEquals(-results_powers_of_two[i][22], -left_operands[i] % -(2 << 22));
+    assertEquals(-results_powers_of_two[i][23], -left_operands[i] % (2 << 23));
+    assertEquals(-results_powers_of_two[i][24], -left_operands[i] % -(2 << 24));
+    assertEquals(-results_powers_of_two[i][25], -left_operands[i] % (2 << 25));
+    assertEquals(-results_powers_of_two[i][26], -left_operands[i] % -(2 << 26));
+    assertEquals(-results_powers_of_two[i][27], -left_operands[i] % (2 << 27));
+    assertEquals(-results_powers_of_two[i][28], -left_operands[i] % -(2 << 28));
+    assertEquals(-results_powers_of_two[i][29], -left_operands[i] % (2 << 29));
+    assertEquals(-results_powers_of_two[i][30], -left_operands[i] % -(2 << 30));
+  }
+
+}
+
+lithium_integer_mod();
+%OptimizeFunctionOnNextCall(lithium_integer_mod)
+lithium_integer_mod();
diff --git a/test/mjsunit/double-equals.js b/test/mjsunit/double-equals.js
index a68d7ea..5ebf92c 100644
--- a/test/mjsunit/double-equals.js
+++ b/test/mjsunit/double-equals.js
@@ -31,84 +31,206 @@
  * implementation of assertEquals.
  */
 
-assertTrue (void 0 == void 0, "void 0 == void 0");
-assertTrue (null == null,     "null == null");
-assertFalse(NaN == NaN,       "NaN == NaN");
-assertFalse(NaN == 0,         "NaN == 0");
-assertFalse(0 == NaN,         "0 == NaN");
-assertFalse(NaN == Infinity,  "NaN == Inf");
-assertFalse(Infinity == NaN,  "Inf == NaN");
+function testEqual(a, b) {
+  assertTrue(a == b);
+  assertTrue(b == a);
+  assertFalse(a != b);
+  assertFalse(b != a);
+}
 
-assertTrue(Number.MAX_VALUE == Number.MAX_VALUE, "MAX == MAX");
-assertTrue(Number.MIN_VALUE == Number.MIN_VALUE, "MIN == MIN");
-assertTrue(Infinity == Infinity,                 "Inf == Inf");
-assertTrue(-Infinity == -Infinity,               "-Inf == -Inf");
+function testNotEqual(a, b) {
+  assertFalse(a == b);
+  assertFalse(b == a);
+  assertTrue(a != b);
+  assertTrue(b != a);
+}
 
-assertTrue(0 == 0,   "0 == 0");
-assertTrue(0 == -0,  "0 == -0");
-assertTrue(-0 == 0,  "-0 == 0");
-assertTrue(-0 == -0, "-0 == -0");
+// Object where ToPrimitive returns value.
+function Wrapper(value) {
+  this.value = value;
+  this.valueOf = function () { return this.value; };
+}
 
-assertFalse(0.9 == 1,             "0.9 == 1");
-assertFalse(0.999999 == 1,        "0.999999 == 1");
-assertFalse(0.9999999999 == 1,    "0.9999999999 == 1");
-assertFalse(0.9999999999999 == 1, "0.9999999999999 == 1");
+// Object where ToPrimitive returns value by failover to toString when
+// valueOf isn't a function.
+function Wrapper2(value) {
+  this.value = value;
+  this.valueOf = null;
+  this.toString = function () { return this.value; };
+}
 
-assertTrue('hello' == 'hello', "'hello' == 'hello'");
 
-assertTrue (true == true,   "true == true");
-assertTrue (false == false, "false == false");
-assertFalse(true == false,  "true == false");
-assertFalse(false == true,  "false == true");
+// Compare values of same type.
 
-assertFalse(new Wrapper(null) == new Wrapper(null),   "new Wrapper(null) == new Wrapper(null)");
-assertFalse(new Boolean(true) == new Boolean(true),   "new Boolean(true) == new Boolean(true)");
-assertFalse(new Boolean(false) == new Boolean(false), "new Boolean(false) == new Boolean(false)");
+// Numbers are equal if same, unless NaN, which isn't equal to anything, and
+// +/-0 being equal.
+
+testNotEqual(NaN, NaN);
+testNotEqual(NaN, 0);
+testNotEqual(NaN, Infinity);
+
+testEqual(Number.MAX_VALUE, Number.MAX_VALUE);
+testEqual(Number.MIN_VALUE, Number.MIN_VALUE);
+testEqual(Infinity, Infinity);
+testEqual(-Infinity, -Infinity);
+
+testEqual(0, 0);
+testEqual(0, -0);
+testEqual(-0, -0);
+
+testNotEqual(0.9, 1);
+testNotEqual(0.999999, 1);
+testNotEqual(0.9999999999, 1);
+testNotEqual(0.9999999999999, 1);
+
+// Strings are equal if containing the same code points.
+
+testEqual('hello', 'hello');
+testEqual('hello', 'hel' + 'lo');
+testEqual('', '');
+testEqual('\u0020\x20', '  ');  // Escapes are not part of the value.
+
+// Booleans are equal if they are the same.
+
+testEqual(true, true);
+testEqual(false, false);
+testNotEqual(true, false);
+
+// Null and undefined are equal to themselves.
+
+testEqual(null, null);
+testEqual(undefined, undefined);
+
+// Objects are equal if they are the same object only.
+
+testEqual(Math, Math);
+testEqual(Object.prototype, Object.prototype);
+
 
 (function () {
   var x = new Wrapper(null);
   var y = x, z = x;
-  assertTrue(y == x);
+   testEqual(y, x);
 })();
 
 (function () {
   var x = new Boolean(true);
   var y = x, z = x;
-  assertTrue(y == x);
+   testEqual(y, x);
 })();
 
 (function () {
   var x = new Boolean(false);
   var y = x, z = x;
-  assertTrue(y == x);
+   testEqual(y, x);
 })();
 
-assertTrue(null == void 0,             "null == void 0");
-assertTrue(void 0 == null,             "void 0 == null");
-assertFalse(new Wrapper(null) == null, "new Wrapper(null) == null");
-assertFalse(null == new Wrapper(null), "null == new Wrapper(null)");
+// Test comparing values of different types.
 
-assertTrue(1 == '1',       "1 == '1");
-assertTrue(255 == '0xff',  "255 == '0xff'");
-assertTrue(0 == '\r',      "0 == '\\r'");
-assertTrue(1e19 == '1e19', "1e19 == '1e19'");
+// Null and undefined are equal to each-other, and to nothing else.
+testEqual(null, undefined);
+testEqual(undefined, null);
 
-assertTrue(new Boolean(true) == true,   "new Boolean(true) == true");
-assertTrue(new Boolean(false) == false, "new Boolean(false) == false");
-assertTrue(true == new Boolean(true),   "true == new Boolean(true)");
-assertTrue(false == new Boolean(false), "false == new Boolean(false)");
+testNotEqual(null, new Wrapper(null));
+testNotEqual(null, 0);
+testNotEqual(null, false);
+testNotEqual(null, "");
+testNotEqual(null, new Object());
+testNotEqual(undefined, new Wrapper(undefined));
+testNotEqual(undefined, 0);
+testNotEqual(undefined, false);
+testNotEqual(undefined, "");
+testNotEqual(undefined, new Object());
 
-assertTrue(Boolean(true) == true,   "Boolean(true) == true");
-assertTrue(Boolean(false) == false, "Boolean(false) == false");
-assertTrue(true == Boolean(true),   "true == Boolean(true)");
-assertTrue(false == Boolean(false), "false == Boolean(false)");
+// Numbers compared to Strings will convert the string to a number using
+// the internal ToNumber conversion.
 
-assertTrue(new Wrapper(true) == true,   "new Wrapper(true) == true");
-assertTrue(new Wrapper(false) == false, "new Wrapper(false) == false");
-assertTrue(true == new Wrapper(true),   "true = new Wrapper(true)");
-assertTrue(false == new Wrapper(false), "false = new Wrapper(false)");
+testEqual(1, '1');
+testEqual(255, '0xff');
+testEqual(0, '\r');  // ToNumber ignores tailing and trailing whitespace.
+testEqual(1e19, '1e19');
+testEqual(Infinity, "Infinity");
 
-function Wrapper(value) {
-  this.value = value;
-  this.valueOf = function () { return this.value; };
+// Booleans compared to anything else will be converted to numbers.
+testEqual(false, 0);
+testEqual(true, 1);
+testEqual(false, "0");  // String also converted to number.
+testEqual(true, "1");
+
+// Objects compared to Number or String (or Boolean, since that's converted
+// to Number too) is converted to primitive using ToPrimitive with NO HINT.
+// Having no hint means Date gets a string hint, and everything else gets
+// a number hint.
+
+testEqual(new Boolean(true), true);
+testEqual(new Boolean(true), 1);  // First to primtive boolean, then to number.
+testEqual(new Boolean(false), false);
+testEqual(new Boolean(false), 0);
+
+testEqual(new Wrapper(true), true);
+testEqual(new Wrapper(true), 1);
+testEqual(new Wrapper(false), false);
+testEqual(new Wrapper(false), 0);
+
+testEqual(new Wrapper2(true), true);
+testEqual(new Wrapper2(true), 1);
+testEqual(new Wrapper2(false), false);
+testEqual(new Wrapper2(false), 0);
+
+testEqual(new Number(1), true);
+testEqual(new Number(1), 1);
+testEqual(new Number(0), false);
+testEqual(new Number(0), 0);
+
+// Date objects convert to string, not number (and the string does not
+// convert to the number).
+testEqual(new Date(42), String(new Date(42)));
+testNotEqual(new Date(42), Number(new Date(42)));
+var dnow = new Date();
+testEqual(dnow, dnow);
+testEqual(dnow, String(dnow));
+testNotEqual(dnow, Number(dnow));
+
+// Doesn't just call toString, but uses ToPrimitive which tries toString first
+// and valueOf second.
+dnow.toString = null;
+testEqual(dnow, Number(dnow));
+dnow.valueOf = function () { return "42"; };
+testEqual(dnow, 42);
+dnow.toString = function () { return "1"; };
+testEqual(dnow, true);
+
+
+// Objects compared to other objects, or to null and undefined, are not
+// converted to primitive.
+testNotEqual(new Wrapper(null), new Wrapper(null));
+testNotEqual(new Boolean(true), new Boolean(true));
+testNotEqual(new Boolean(false), new Boolean(false));
+testNotEqual(new String("a"), new String("a"));
+testNotEqual(new Number(42), new Number(42));
+testNotEqual(new Date(42), new Date(42));
+testNotEqual(new Array(42), new Array(42));
+testNotEqual(new Object(), new Object());
+
+// Object that can't be converted to primitive.
+var badObject = {
+  valueOf: null,
+  toString: function() {
+    return this;  // Not primitive.
+  }
+};
+
+testEqual(badObject, badObject);
+testNotEqual(badObject, {});
+testNotEqual(badObject, null);
+testNotEqual(badObject, undefined);
+// Forcing conversion will throw.
+function testBadConversion(value) {
+  assertThrows(function() { return badObject == value; });
+  assertThrows(function() { return badObject != value; });
+  assertThrows(function() { return value == badObject; });
+  assertThrows(function() { return value != badObject; });
 }
+testBadConversion(0);
+testBadConversion("string");
+testBadConversion(true);
diff --git a/test/mjsunit/element-kind.js b/test/mjsunit/element-kind.js
new file mode 100644
index 0000000..48a029f
--- /dev/null
+++ b/test/mjsunit/element-kind.js
@@ -0,0 +1,102 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Test element kind of objects
+
+var element_kind = {
+  fast_elements                     :  1,
+  fast_double_elements              :  2,
+  dictionary_elements               :  3,
+  external_byte_elements            :  4,
+  external_unsigned_byte_elements   :  5,
+  external_short_elements           :  6,
+  external_unsigned_short_elements  :  7,
+  external_int_elements             :  8,
+  external_unsigned_int_elements    :  9,
+  external_float_elements           : 10,
+  external_double_elements          : 11,
+  external_pixel_elements           : 12
+}
+
+// We expect an object to only be of one element kind.
+function assertKind(expected, obj){
+  assertEquals(expected == element_kind.fast_elements,
+               %HasFastElements(obj));
+  assertEquals(expected == element_kind.fast_double_elements,
+               %HasFastDoubleElements(obj));
+  assertEquals(expected == element_kind.dictionary_elements,
+               %HasDictionaryElements(obj));
+  assertEquals(expected == element_kind.external_byte_elements,
+               %HasExternalByteElements(obj));
+  assertEquals(expected == element_kind.external_unsigned_byte_elements,
+               %HasExternalUnsignedByteElements(obj));
+  assertEquals(expected == element_kind.external_short_elements,
+               %HasExternalShortElements(obj));
+  assertEquals(expected == element_kind.external_unsigned_short_elements,
+               %HasExternalUnsignedShortElements(obj));
+  assertEquals(expected == element_kind.external_int_elements,
+               %HasExternalIntElements(obj));
+  assertEquals(expected == element_kind.external_unsigned_int_elements,
+               %HasExternalUnsignedIntElements(obj));
+  assertEquals(expected == element_kind.external_float_elements,
+               %HasExternalFloatElements(obj));
+  assertEquals(expected == element_kind.external_double_elements,
+               %HasExternalDoubleElements(obj));
+  assertEquals(expected == element_kind.external_pixel_elements,
+               %HasExternalPixelElements(obj));
+  // every external kind is also an external array
+  assertEquals(expected >= element_kind.external_byte_elements,
+               %HasExternalArrayElements(obj));
+}
+
+var me = {};
+assertKind(element_kind.fast_elements, me);
+me.dance = 0xD15C0;
+me.drink = 0xC0C0A;
+assertKind(element_kind.fast_elements, me);
+
+var you = new Array();
+for(i = 0; i < 1337; i++) {
+  you[i] = i;
+}
+assertKind(element_kind.fast_elements, you);
+
+assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
+
+// fast_double_elements not yet available
+
+
+assertKind(element_kind.external_byte_elements,           new Int8Array(9001));
+assertKind(element_kind.external_unsigned_byte_elements,  new Uint8Array(007));
+assertKind(element_kind.external_short_elements,          new Int16Array(666));
+assertKind(element_kind.external_unsigned_short_elements, new Uint16Array(42));
+assertKind(element_kind.external_int_elements,            new Int32Array(0xF));
+assertKind(element_kind.external_unsigned_int_elements,   new Uint32Array(23));
+assertKind(element_kind.external_float_elements,          new Float32Array(7));
+assertKind(element_kind.external_double_elements,         new Float64Array(0));
+assertKind(element_kind.external_pixel_elements,          new PixelArray(512));
diff --git a/test/mjsunit/error-constructors.js b/test/mjsunit/error-constructors.js
index 8f463fc..966a162 100644
--- a/test/mjsunit/error-constructors.js
+++ b/test/mjsunit/error-constructors.js
@@ -59,3 +59,24 @@
 ReferenceError.prototype.name = "not a reference error";
 assertEquals("ReferenceError", ReferenceError.prototype.name);
 
+// Check that message and name are not enumerable on Error objects.
+var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'name');
+assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(Error.prototype, 'message');
+assertFalse(desc['enumerable']);
+
+var e = new Error("foobar");
+desc = Object.getOwnPropertyDescriptor(e, 'message');
+assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(e, 'arguments');
+assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(e, 'type');
+assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(e, 'stack');
+assertFalse(desc['enumerable']);
+
+// name is not tested above, but in addition we should have no enumerable
+// properties, so we simply assert that.
+for (var v in e) {
+  assertUnreachable();
+}
diff --git a/test/mjsunit/eval.js b/test/mjsunit/eval.js
index 25cfcb6..b6284ba 100644
--- a/test/mjsunit/eval.js
+++ b/test/mjsunit/eval.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -155,3 +155,12 @@
     return (function() { return eval(2); })();
   })();
 assertEquals(4, result);
+
+// Regression test: calling a function named eval found in a context that is
+// not the global context should get the global object as receiver.
+result =
+    (function () {
+      var eval = function (x) { return this; };
+      with ({}) { return eval('ignore'); }
+    })();
+assertEquals(this, result);
diff --git a/test/mjsunit/external-array.js b/test/mjsunit/external-array.js
index 45d8be5..81c6cfe 100644
--- a/test/mjsunit/external-array.js
+++ b/test/mjsunit/external-array.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --expose-gc
 
 // This is a regression test for overlapping key and value registers.
 function f(a) {
@@ -43,15 +43,6 @@
 assertEquals(0, a[0]);
 assertEquals(0, a[1]);
 
-// Test the correct behavior of the |length| property (which is read-only).
-a = new Int32Array(42);
-assertEquals(42, a.length);
-a.length = 2;
-assertEquals(42, a.length);
-assertTrue(delete a.length);
-a.length = 2
-assertEquals(2, a.length);
-
 // Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
 // "constant", but not read-only).
 a = new Int32Array(2);
@@ -63,3 +54,222 @@
 a = new Int16Array(2);
 assertEquals(2, a.BYTES_PER_ELEMENT);
 
+// Test Float64Arrays.
+function get(a, index) {
+  return a[index];
+}
+function set(a, index, value) {
+  a[index] = value;
+}
+function temp() {
+var array = new Float64Array(2);
+for (var i = 0; i < 5; i++) {
+  set(array, 0, 2.5);
+  assertEquals(2.5, array[0]);
+}
+%OptimizeFunctionOnNextCall(set);
+set(array, 0, 2.5);
+assertEquals(2.5, array[0]);
+set(array, 1, 3.5);
+assertEquals(3.5, array[1]);
+for (var i = 0; i < 5; i++) {
+  assertEquals(2.5, get(array, 0));
+  assertEquals(3.5, array[1]);
+}
+%OptimizeFunctionOnNextCall(get);
+assertEquals(2.5, get(array, 0));
+assertEquals(3.5, get(array, 1));
+}
+
+// Test non-number parameters.
+var array_with_length_from_non_number = new Int32Array("2");
+assertEquals(2, array_with_length_from_non_number.length);
+array_with_length_from_non_number = new Int32Array(undefined);
+assertEquals(0, array_with_length_from_non_number.length);
+var foo = { valueOf: function() { return 3; } };
+array_with_length_from_non_number = new Int32Array(foo);
+assertEquals(3, array_with_length_from_non_number.length);
+foo = { toString: function() { return "4"; } };
+array_with_length_from_non_number = new Int32Array(foo);
+assertEquals(4, array_with_length_from_non_number.length);
+
+
+// Test loads and stores.
+types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
+         Uint32Array, PixelArray, Float32Array, Float64Array];
+
+test_result_nan = [NaN, 0, 0, 0, 0, 0, 0, 0, NaN, NaN];
+test_result_low_int = [-1, -1, 255, -1, 65535, -1, 0xFFFFFFFF, 0, -1, -1];
+test_result_low_double = [-1.25, -1, 255, -1, 65535, -1, 0xFFFFFFFF, 0, -1.25, -1.25];
+test_result_middle = [253.75, -3, 253, 253, 253, 253, 253, 254, 253.75, 253.75];
+test_result_high_int = [256, 0, 0, 256, 256, 256, 256, 255, 256, 256];
+test_result_high_double = [256.25, 0, 0, 256, 256, 256, 256, 255, 256.25, 256.25];
+
+const kElementCount = 40;
+
+function test_load(array, sum) {
+  for (var i = 0; i < kElementCount; i++) {
+    sum += array[i];
+  }
+  return sum;
+}
+
+function test_load_const_key(array, sum) {
+  sum += array[0];
+  sum += array[1];
+  sum += array[2];
+  return sum;
+}
+
+function test_store(array, sum) {
+  for (var i = 0; i < kElementCount; i++) {
+    sum += array[i] = i+1;
+  }
+  return sum;
+}
+
+function test_store_const_key(array, sum) {
+  sum += array[0] = 1;
+  sum += array[1] = 2;
+  sum += array[2] = 3;
+  return sum;
+}
+
+function zero() {
+  return 0.0;
+}
+
+function test_store_middle_tagged(array, sum) {
+  array[0] = 253.75;
+  return array[0];
+}
+
+function test_store_high_tagged(array, sum) {
+  array[0] = 256.25;
+  return array[0];
+}
+
+function test_store_middle_double(array, sum) {
+  array[0] = 253.75 + zero(); // + forces double type feedback
+  return array[0];
+}
+
+function test_store_high_double(array, sum) {
+  array[0] = 256.25 + zero(); // + forces double type feedback
+  return array[0];
+}
+
+function test_store_high_double(array, sum) {
+  array[0] = 256.25;
+  return array[0];
+}
+
+function test_store_low_int(array, sum) {
+  array[0] = -1;
+  return array[0];
+}
+
+function test_store_low_tagged(array, sum) {
+  array[0] = -1.25;
+  return array[0];
+}
+
+function test_store_low_double(array, sum) {
+  array[0] = -1.25 + zero(); // + forces double type feedback
+  return array[0];
+}
+
+function test_store_high_int(array, sum) {
+  array[0] = 256;
+  return array[0];
+}
+
+function test_store_nan(array, sum) {
+  array[0] = NaN;
+  return array[0];
+}
+
+const kRuns = 10;
+
+function run_test(test_func, array, expected_result) {
+  for (var i = 0; i < 5; i++) test_func(array, 0);
+  %OptimizeFunctionOnNextCall(test_func);
+  var sum = 0;
+  for (var i = 0; i < kRuns; i++) {
+    sum = test_func(array, sum);
+  }
+  assertEquals(expected_result, sum);
+  %DeoptimizeFunction(test_func);
+  gc();  // Makes V8 forget about type information for test_func.
+}
+
+function run_bounds_test(test_func, array, expected_result) {
+  assertEquals(undefined, a[kElementCount]);
+  a[kElementCount] = 456;
+  assertEquals(undefined, a[kElementCount]);
+  assertEquals(undefined, a[kElementCount+1]);
+  a[kElementCount+1] = 456;
+  assertEquals(undefined, a[kElementCount+1]);
+}
+
+for (var t = 0; t < types.length; t++) {
+  var type = types[t];
+  var a = new type(kElementCount);
+
+  for (var i = 0; i < kElementCount; i++) {
+    a[i] = i;
+  }
+
+  // Run test functions defined above.
+  run_test(test_load, a, 780 * kRuns);
+  run_test(test_load_const_key, a, 3 * kRuns);
+  run_test(test_store, a, 820 * kRuns);
+  run_test(test_store_const_key, a, 6 * kRuns);
+  run_test(test_store_low_int, a, test_result_low_int[t]);
+  run_test(test_store_low_double, a, test_result_low_double[t]);
+  run_test(test_store_low_tagged, a, test_result_low_double[t]);
+  run_test(test_store_high_int, a, test_result_high_int[t]);
+  run_test(test_store_nan, a, test_result_nan[t]);
+  run_test(test_store_middle_double, a, test_result_middle[t]);
+  run_test(test_store_middle_tagged, a, test_result_middle[t]);
+  run_test(test_store_high_double, a, test_result_high_double[t]);
+  run_test(test_store_high_tagged, a, test_result_high_double[t]);
+
+  // Test the correct behavior of the |length| property (which is read-only).
+  if (t != 0) {
+    assertEquals(kElementCount, a.length);
+    a.length = 2;
+    assertEquals(kElementCount, a.length);
+    assertTrue(delete a.length);
+    a.length = 2;
+    assertEquals(2, a.length);
+
+    // Make sure bounds checks are handled correctly for external arrays.
+    run_bounds_test(a);
+    run_bounds_test(a);
+    run_bounds_test(a);
+    %OptimizeFunctionOnNextCall(run_bounds_test);
+    run_bounds_test(a);
+    %DeoptimizeFunction(run_bounds_test);
+    gc();  // Makes V8 forget about type information for test_func.
+
+  }
+
+  function array_load_set_smi_check(a) {
+    return a[0] = a[0] = 1;
+  }
+
+  array_load_set_smi_check(a);
+  array_load_set_smi_check(0);
+
+  function array_load_set_smi_check2(a) {
+    return a[0] = a[0] = 1;
+  }
+
+  array_load_set_smi_check2(a);
+  %OptimizeFunctionOnNextCall(array_load_set_smi_check2);
+  array_load_set_smi_check2(a);
+  array_load_set_smi_check2(0);
+  %DeoptimizeFunction(array_load_set_smi_check2);
+  gc();  // Makes V8 forget about type information for array_load_set_smi_check.
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/fast-element-smi-check.js
similarity index 62%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/fast-element-smi-check.js
index aa93b25..d0c45fe 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/fast-element-smi-check.js
@@ -25,12 +25,46 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax --expose-gc
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var a = new Array(10);
+
+function test_load_set_smi(a) {
+  return a[0] = a[0] = 1;
 }
 
-test();
+test_load_set_smi(a);
+test_load_set_smi(a);
+test_load_set_smi(123);
+
+function test_load_set_smi_2(a) {
+  return a[0] = a[0] = 1;
+}
+
+test_load_set_smi_2(a);
+%OptimizeFunctionOnNextCall(test_load_set_smi_2);
+test_load_set_smi_2(a);
+test_load_set_smi_2(0);
+%DeoptimizeFunction(test_load_set_smi_2);
+gc();  // Makes V8 forget about type information for test_load_set_smi.
+
+var b = new Object();
+
+function test_load_set_smi_3(b) {
+  return b[0] = b[0] = 1;
+}
+
+test_load_set_smi_3(b);
+test_load_set_smi_3(b);
+test_load_set_smi_3(123);
+
+function test_load_set_smi_4(b) {
+  return b[0] = b[0] = 1;
+}
+
+test_load_set_smi_4(b);
+%OptimizeFunctionOnNextCall(test_load_set_smi_4);
+test_load_set_smi_4(b);
+test_load_set_smi_4(0);
+%DeoptimizeFunction(test_load_set_smi_4);
+gc();  // Makes V8 forget about type information for test_load_set_smi.
diff --git a/test/mjsunit/function-bind.js b/test/mjsunit/function-bind.js
index 7a72cd5..e9d0221 100644
--- a/test/mjsunit/function-bind.js
+++ b/test/mjsunit/function-bind.js
@@ -62,7 +62,7 @@
 
 function f_bound_this(z) {
   return z + this.y - this.x;
-} 
+}
 
 assertEquals(3, f_bound_this(1))
 f = f_bound_this.bind(obj);
@@ -75,7 +75,7 @@
 
 // Test chained binds.
 
-// When only giving the thisArg, any number of binds should have 
+// When only giving the thisArg, any number of binds should have
 // the same effect.
 f = foo.bind(foo);
 assertEquals(3, f(1, 1, 1));
@@ -181,4 +181,3 @@
 // Test instanceof obj2 is bar, not f.
 assertTrue(obj2 instanceof bar);
 assertFalse(obj2 instanceof f);
-
diff --git a/test/mjsunit/function-call.js b/test/mjsunit/function-call.js
new file mode 100644
index 0000000..06479ad
--- /dev/null
+++ b/test/mjsunit/function-call.js
@@ -0,0 +1,359 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+var should_throw_on_null_and_undefined =
+    [Object.prototype.toLocaleString,
+     Object.prototype.valueOf,
+     Object.prototype.hasOwnProperty,
+     Object.prototype.isPrototypeOf,
+     Object.prototype.propertyIsEnumerable,
+     Array.prototype.concat,
+     Array.prototype.join,
+     Array.prototype.pop,
+     Array.prototype.push,
+     Array.prototype.reverse,
+     Array.prototype.shift,
+     Array.prototype.slice,
+     Array.prototype.sort,
+     Array.prototype.splice,
+     Array.prototype.unshift,
+     Array.prototype.indexOf,
+     Array.prototype.lastIndexOf,
+     Array.prototype.every,
+     Array.prototype.some,
+     Array.prototype.forEach,
+     Array.prototype.map,
+     Array.prototype.filter,
+     Array.prototype.reduce,
+     Array.prototype.reduceRight,
+     String.prototype.charAt,
+     String.prototype.charCodeAt,
+     String.prototype.concat,
+     String.prototype.indexOf,
+     String.prototype.lastIndexOf,
+     String.prototype.localeCompare,
+     String.prototype.match,
+     String.prototype.replace,
+     String.prototype.search,
+     String.prototype.slice,
+     String.prototype.split,
+     String.prototype.substring,
+     String.prototype.toLowerCase,
+     String.prototype.toLocaleLowerCase,
+     String.prototype.toUpperCase,
+     String.prototype.toLocaleUpperCase,
+     String.prototype.trim,
+     Number.prototype.toLocaleString,
+     Error.prototype.toString];
+
+// Non generic natives do not work on any input other than the specific
+// type, but since this change will allow call to be invoked with undefined
+// or null as this we still explicitly test that we throw on these here.
+var non_generic =
+    [Array.prototype.toString,
+     Array.prototype.toLocaleString,
+     Function.prototype.toString,
+     Function.prototype.call,
+     Function.prototype.apply,
+     String.prototype.toString,
+     String.prototype.valueOf,
+     Boolean.prototype.toString,
+     Boolean.prototype.valueOf,
+     Number.prototype.toString,
+     Number.prototype.valueOf,
+     Number.prototype.toFixed,
+     Number.prototype.toExponential,
+     Number.prototype.toPrecision,
+     Date.prototype.toString,
+     Date.prototype.toDateString,
+     Date.prototype.toTimeString,
+     Date.prototype.toLocaleString,
+     Date.prototype.toLocaleDateString,
+     Date.prototype.toLocaleTimeString,
+     Date.prototype.valueOf,
+     Date.prototype.getTime,
+     Date.prototype.getFullYear,
+     Date.prototype.getUTCFullYear,
+     Date.prototype.getMonth,
+     Date.prototype.getUTCMonth,
+     Date.prototype.getDate,
+     Date.prototype.getUTCDate,
+     Date.prototype.getDay,
+     Date.prototype.getUTCDay,
+     Date.prototype.getHours,
+     Date.prototype.getUTCHours,
+     Date.prototype.getMinutes,
+     Date.prototype.getUTCMinutes,
+     Date.prototype.getSeconds,
+     Date.prototype.getUTCSeconds,
+     Date.prototype.getMilliseconds,
+     Date.prototype.getUTCMilliseconds,
+     Date.prototype.getTimezoneOffset,
+     Date.prototype.setTime,
+     Date.prototype.setMilliseconds,
+     Date.prototype.setUTCMilliseconds,
+     Date.prototype.setSeconds,
+     Date.prototype.setUTCSeconds,
+     Date.prototype.setMinutes,
+     Date.prototype.setUTCMinutes,
+     Date.prototype.setHours,
+     Date.prototype.setUTCHours,
+     Date.prototype.setDate,
+     Date.prototype.setUTCDate,
+     Date.prototype.setMonth,
+     Date.prototype.setUTCMonth,
+     Date.prototype.setFullYear,
+     Date.prototype.setUTCFullYear,
+     Date.prototype.toUTCString,
+     Date.prototype.toISOString,
+     Date.prototype.toJSON,
+     RegExp.prototype.exec,
+     RegExp.prototype.test,
+     RegExp.prototype.toString];
+
+
+// Mapping functions.
+var mapping_functions =
+    [Array.prototype.every,
+     Array.prototype.some,
+     Array.prototype.forEach,
+     Array.prototype.map,
+     Array.prototype.filter];
+
+// Reduce functions.
+var reducing_functions =
+    [Array.prototype.reduce,
+     Array.prototype.reduceRight];
+
+// Test that all natives using the ToObject call throw the right exception.
+for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+  // Sanity check that all functions are correct
+  assertEquals(typeof(should_throw_on_null_and_undefined[i]), "function");
+
+  var exception = false;
+  try {
+    // We call all functions with no parameters, which means that essential
+    // parameters will have the undefined value.
+    // The test for whether the "this" value is null or undefined is always
+    // performed before access to the other parameters, so even if the
+    // undefined value is an invalid argument value, it mustn't change
+    // the result of the test.
+    should_throw_on_null_and_undefined[i].call(null);
+  } catch (e) {
+    exception = true;
+    assertTrue("called_on_null_or_undefined" == e.type ||
+               "null_to_object" == e.type);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    should_throw_on_null_and_undefined[i].call(undefined);
+  } catch (e) {
+    exception = true;
+    assertTrue("called_on_null_or_undefined" == e.type ||
+               "null_to_object" == e.type);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    should_throw_on_null_and_undefined[i].apply(null);
+  } catch (e) {
+    exception = true;
+    assertTrue("called_on_null_or_undefined" == e.type ||
+               "null_to_object" == e.type);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    should_throw_on_null_and_undefined[i].apply(undefined);
+  } catch (e) {
+    exception = true;
+    assertTrue("called_on_null_or_undefined" == e.type ||
+               "null_to_object" == e.type);
+  }
+  assertTrue(exception);
+}
+
+// Test that all natives that are non generic throw on null and undefined.
+for (var i = 0; i < non_generic.length; i++) {
+  // Sanity check that all functions are correct
+  assertEquals(typeof(non_generic[i]), "function");
+
+  exception = false;
+  try {
+    non_generic[i].call(null);
+  } catch (e) {
+    exception = true;
+    assertTrue(e instanceof TypeError);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    non_generic[i].call(null);
+  } catch (e) {
+    exception = true;
+    assertTrue(e instanceof TypeError);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    non_generic[i].apply(null);
+  } catch (e) {
+    exception = true;
+    assertTrue(e instanceof TypeError);
+  }
+  assertTrue(exception);
+
+  exception = false;
+  try {
+    non_generic[i].apply(null);
+  } catch (e) {
+    exception = true;
+    assertTrue(e instanceof TypeError);
+  }
+  assertTrue(exception);
+}
+
+
+// Test that we still throw when calling with thisArg null or undefined
+// through an array mapping function.
+var array = [1,2,3,4,5];
+for (var j = 0; j < mapping_functions.length; j++) {
+  for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+    exception = false;
+    try {
+      mapping_functions[j].call(array,
+                                should_throw_on_null_and_undefined[i],
+                                null);
+    } catch (e) {
+      exception = true;
+      assertTrue("called_on_null_or_undefined" == e.type ||
+                 "null_to_object" == e.type);
+    }
+    assertTrue(exception);
+
+    exception = false;
+    try {
+      mapping_functions[j].call(array,
+                                should_throw_on_null_and_undefined[i],
+                                undefined);
+    } catch (e) {
+      exception = true;
+      assertTrue("called_on_null_or_undefined" == e.type ||
+                 "null_to_object" == e.type);
+    }
+    assertTrue(exception);
+  }
+}
+
+for (var j = 0; j < mapping_functions.length; j++) {
+  for (var i = 0; i < non_generic.length; i++) {
+    exception = false;
+    try {
+      mapping_functions[j].call(array,
+                                non_generic[i],
+                                null);
+    } catch (e) {
+      exception = true;
+      assertTrue(e instanceof TypeError);
+    }
+    assertTrue(exception);
+
+    exception = false;
+    try {
+      mapping_functions[j].call(array,
+                                non_generic[i],
+                                undefined);
+    } catch (e) {
+      exception = true;
+      assertTrue(e instanceof TypeError);
+    }
+    assertTrue(exception);
+  }
+}
+
+
+// Reduce functions do a call with null as this argument.
+for (var j = 0; j < reducing_functions.length; j++) {
+  for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+    exception = false;
+    try {
+      reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
+    } catch (e) {
+      exception = true;
+      assertTrue("called_on_null_or_undefined" == e.type ||
+                 "null_to_object" == e.type);
+    }
+    assertTrue(exception);
+
+    exception = false;
+    try {
+      reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
+    } catch (e) {
+      exception = true;
+      assertTrue("called_on_null_or_undefined" == e.type ||
+                 "null_to_object" == e.type);
+    }
+    assertTrue(exception);
+  }
+}
+
+for (var j = 0; j < reducing_functions.length; j++) {
+  for (var i = 0; i < non_generic.length; i++) {
+    exception = false;
+    try {
+      reducing_functions[j].call(array, non_generic[i]);
+    } catch (e) {
+      exception = true;
+      assertTrue(e instanceof TypeError);
+    }
+    assertTrue(exception);
+
+    exception = false;
+    try {
+      reducing_functions[j].call(array, non_generic[i]);
+    } catch (e) {
+      exception = true;
+      assertTrue(e instanceof TypeError);
+    }
+    assertTrue(exception);
+  }
+}
+
+
+// Object.prototype.toString()
+assertEquals(Object.prototype.toString.call(null),
+             '[object Null]')
+
+assertEquals(Object.prototype.toString.call(undefined),
+             '[object Undefined]')
diff --git a/test/mjsunit/function-caller.js b/test/mjsunit/function-caller.js
index ddc7b5d..bc01750 100644
--- a/test/mjsunit/function-caller.js
+++ b/test/mjsunit/function-caller.js
@@ -46,3 +46,10 @@
 // Check called from eval.
 eval('f(null)');
 
+// Check called from builtin functions. Only show the initially called
+// (publicly exposed) builtin function, not it's internal helper functions.
+[Array.prototype.sort, Array.prototype.sort].sort(f);
+
+"abel".replace(/b/g, function h() {
+   assertEquals(String.prototype.replace, h.caller);
+});
diff --git a/test/mjsunit/function-source.js b/test/mjsunit/function-source.js
index 7525775..8f2fc22 100644
--- a/test/mjsunit/function-source.js
+++ b/test/mjsunit/function-source.js
@@ -36,7 +36,7 @@
   }
   h();
 }
-  
+
 function g() {
   function h() {
     assertEquals(Debug.scriptSource(f), Debug.scriptSource(h));
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index cefef0a..ff6677e 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -141,9 +141,12 @@
   "EnableAccessChecks": true,
 
   // These functions should not be callable as runtime functions.
-  "NewContext": true,
+  "NewFunctionContext": true,
   "NewArgumentsFast": true,
-  "PushContext": true,
+  "NewStrictArgumentsFast": true,
+  "PushWithContext": true,
+  "PushCatchContext": true,
+  "PushBlockContext": true,
   "LazyCompile": true,
   "LazyRecompile": true,
   "NotifyDeoptimized": true,
@@ -160,12 +163,16 @@
   "PromoteScheduledException": true,
   "DeleteHandleScopeExtensions": true,
 
+  // Requires integer arguments to be non-negative.
+  "Apply": true,
+
   // That can only be invoked on Array.prototype.
   "FinishArrayPrototypeSetup": true,
 
   "_SwapElements": true,
 
-  // Performance critical function which cannot afford type checks.
+  // Performance critical functions which cannot afford type checks.
+  "_IsNativeOrStrictMode": true,
   "_CallFunction": true,
 
   // Tries to allocate based on argument, and (correctly) throws
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index 79c1fac..abb2420 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -27,7 +27,7 @@
 
 // This file only tests very simple descriptors that always have
 // configurable, enumerable, and writable set to true.
-// A range of more elaborate tests are performed in 
+// A range of more elaborate tests are performed in
 // object-define-property.js
 
 function get() { return x; }
diff --git a/test/mjsunit/getter-in-prototype.js b/test/mjsunit/getter-in-prototype.js
index 5563123..01a3473 100644
--- a/test/mjsunit/getter-in-prototype.js
+++ b/test/mjsunit/getter-in-prototype.js
@@ -25,8 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that exceptions are thrown when setting properties on object
-// that have only a getter in a prototype object.
+// Test that exceptions are not thrown when setting properties on object
+// that have only a getter in a prototype object, except when we are in strict
+// mode where exceptsions should be thrown.
 
 var o = {};
 var p = {};
@@ -34,25 +35,44 @@
 p.__defineGetter__(0, function(){});
 o.__proto__ = p;
 
-assertThrows("o.x = 42");
-assertThrows("o[0] = 42");
+assertDoesNotThrow("o.x = 42");
+assertDoesNotThrow("o[0] = 42");
+
+assertThrows(function() { 'use strict'; o.x = 42; });
+assertThrows(function() { 'use strict'; o[0] = 42; });
 
 function f() {
   with(o) {
     x = 42;
   }
 }
-assertThrows("f()");
+
+assertDoesNotThrow(f);
 
 __proto__ = p;
 function g() {
   eval('1');
   x = 42;
 }
-assertThrows("g()");
+
+function g_strict() {
+  'use strict';
+  eval('1');
+  x = 42;
+}
+
+assertDoesNotThrow(g);
+assertThrows(g_strict);
 
 __proto__ = p;
 function g2() {
   this[0] = 42;
 }
-assertThrows("g2()");
+
+function g2_strict() {
+  'use strict';
+  this[0] = 42;
+}
+
+assertDoesNotThrow(g2);
+assertThrows(g2_strict);
diff --git a/test/mjsunit/global-deleted-property-keyed.js b/test/mjsunit/global-deleted-property-keyed.js
index 1a1d3cb..dba3a4d 100644
--- a/test/mjsunit/global-deleted-property-keyed.js
+++ b/test/mjsunit/global-deleted-property-keyed.js
@@ -33,6 +33,6 @@
 var name = "fisk";
 natives[name] = name;
 function foo() { natives[name] + 12; }
-for(var i = 0; i < 3; i++) foo(); 
+for(var i = 0; i < 3; i++) foo();
 delete natives[name];
 for(var i = 0; i < 3; i++) foo();
diff --git a/test/mjsunit/harmony/block-conflicts.js b/test/mjsunit/harmony/block-conflicts.js
new file mode 100644
index 0000000..8d3de6f
--- /dev/null
+++ b/test/mjsunit/harmony/block-conflicts.js
@@ -0,0 +1,126 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-block-scoping
+
+// Test for conflicting variable bindings.
+
+function CheckException(e) {
+  var string = e.toString();
+  assertTrue(string.indexOf("has already been declared") >= 0 ||
+             string.indexOf("redeclaration") >= 0);  return 'Conflict';
+}
+
+
+function TestFunction(s,e) {
+  try {
+    return eval("(function(){" + s + ";return " + e + "})")();
+  } catch (x) {
+    return CheckException(x);
+  }
+}
+
+
+function TestBlock(s,e) {
+  try {
+    return eval("(function(){ if (true) { " + s + "; }; return " + e + "})")();
+  } catch (x) {
+    return CheckException(x);
+  }
+}
+
+function TestAll(expected,s,opt_e) {
+  var e = "";
+  var msg = s;
+  if (opt_e) { e = opt_e; msg += "; " + opt_e; }
+  assertEquals(expected, TestFunction(s,e), "function:'" + msg + "'");
+  assertEquals(expected, TestBlock(s,e), "block:'" + msg + "'");
+}
+
+
+function TestConflict(s) {
+  TestAll('Conflict', s);
+  TestAll('Conflict', 'eval("' + s + '")');
+}
+
+
+function TestNoConflict(s) {
+  TestAll('NoConflict', s, "'NoConflict'");
+  TestAll('NoConflict', 'eval("' + s + '")', "'NoConflict'");
+}
+
+var letbinds = [ "let x",
+                 "let x = 0",
+                 "let x = undefined",
+                 "function x() { }",
+                 "let x = function() {}",
+                 "let x, y",
+                 "let y, x",
+                 ];
+var varbinds = [ "var x",
+                 "var x = 0",
+                 "var x = undefined",
+                 "var x = function() {}",
+                 "var x, y",
+                 "var y, x",
+                 ];
+
+
+for (var l = 0; l < letbinds.length; ++l) {
+  // Test conflicting let/var bindings.
+  for (var v = 0; v < varbinds.length; ++v) {
+    // Same level.
+    TestConflict(letbinds[l] +'; ' + varbinds[v]);
+    TestConflict(varbinds[v] +'; ' + letbinds[l]);
+    // Different level.
+    TestConflict(letbinds[l] +'; {' + varbinds[v] + '; }');
+    TestConflict('{ ' + varbinds[v] +'; }' + letbinds[l]);
+  }
+
+  // Test conflicting let/let bindings.
+  for (var k = 0; k < letbinds.length; ++k) {
+    // Same level.
+    TestConflict(letbinds[l] +'; ' + letbinds[k]);
+    TestConflict(letbinds[k] +'; ' + letbinds[l]);
+    // Different level.
+    TestNoConflict(letbinds[l] +'; { ' + letbinds[k] + '; }');
+    TestNoConflict('{ ' + letbinds[k] +'; } ' + letbinds[l]);
+  }
+
+  // Test conflicting parameter/let bindings.
+  TestConflict('(function (x) { ' + letbinds[l] + '; })()');
+}
+
+// Test conflicting catch/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+  TestConflict('try {} catch (x) { ' + varbinds[v] + '; }');
+}
+
+// Test conflicting parameter/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+  TestConflict('(function (x) { ' + varbinds[v] + '; })()');
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/harmony/block-lazy-compile.js
similarity index 79%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/harmony/block-lazy-compile.js
index aa93b25..a6efcbf 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/harmony/block-lazy-compile.js
@@ -25,12 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
+// Test deserialization of block contexts during lazy compilation
+// of closures.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function f() {
+  var g;
+  {
+    // TODO(keuchel): introduce let
+    var x = 0;
+    g = function () {
+      x = x + 1;
+      return x;
+    }
+  }
+  return g;
 }
 
-test();
+var o = f();
+assertEquals(1, o());
+assertEquals(2, o());
+assertEquals(3, o());
+%OptimizeFunctionOnNextCall(o);
+assertEquals(4, o());
diff --git a/test/mjsunit/harmony/block-leave.js b/test/mjsunit/harmony/block-leave.js
new file mode 100644
index 0000000..73eaf29
--- /dev/null
+++ b/test/mjsunit/harmony/block-leave.js
@@ -0,0 +1,225 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-block-scoping
+
+// We want to test the context chain shape.  In each of the tests cases
+// below, the outer with is to force a runtime lookup of the identifier 'x'
+// to actually verify that the inner context has been discarded.  A static
+// lookup of 'x' might accidentally succeed.
+
+{
+  let x = 2;
+  L: {
+    let x = 3;
+    assertEquals(3, x);
+    break L;
+    assertTrue(false);
+  }
+  assertEquals(2, x);
+}
+
+do {
+  let x = 4;
+  assertEquals(4,x);
+  {
+    let x = 5;
+    assertEquals(5, x);
+    continue;
+    assertTrue(false);
+  }
+} while (false);
+
+var caught = false;
+try {
+  {
+    let xx = 18;
+    throw 25;
+    assertTrue(false);
+  }
+} catch (e) {
+  caught = true;
+  assertEquals(25, e);
+  with ({y:19}) {
+    assertEquals(19, y);
+    try {
+      // NOTE: This checks that the block scope containing xx has been
+      // removed from the context chain.
+      xx;
+      assertTrue(false);  // should not reach here
+    } catch (e2) {
+      assertTrue(e2 instanceof ReferenceError);
+    }
+  }
+}
+assertTrue(caught);
+
+
+with ({x: 'outer'}) {
+  label: {
+    let x = 'inner';
+    break label;
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  label: {
+    let x = 'middle';
+    {
+      let x = 'inner';
+      break label;
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  for (var i = 0; i < 10; ++i) {
+    let x = 'inner' + i;
+    continue;
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  label: for (var i = 0; i < 10; ++i) {
+    let x = 'middle' + i;
+    for (var j = 0; j < 10; ++j) {
+      let x = 'inner' + j;
+      continue label;
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    let x = 'inner';
+    throw 0;
+  } catch (e) {
+    assertEquals('outer', x);
+  }
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    let x = 'middle';
+    {
+      let x = 'inner';
+      throw 0;
+    }
+  } catch (e) {
+    assertEquals('outer', x);
+  }
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      let x = 'inner';
+      throw 0;
+    } finally {
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      let x = 'middle';
+      {
+        let x = 'inner';
+        throw 0;
+      }
+    } finally {
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+// Verify that the context is correctly set in the stack frame after exiting
+// from with.
+function f() {}
+
+with ({x: 'outer'}) {
+  label: {
+    let x = 'inner';
+    break label;
+  }
+  f();  // The context could be restored from the stack after the call.
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  for (var i = 0; i < 10; ++i) {
+    let x = 'inner';
+    continue;
+  }
+  f();
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    let x = 'inner';
+    throw 0;
+  } catch (e) {
+    f();
+    assertEquals('outer', x);
+  }
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      let x = 'inner';
+      throw 0;
+    } finally {
+      f();
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/harmony/block-let-crankshaft.js
similarity index 72%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/harmony/block-let-crankshaft.js
index aa93b25..c2fb96b 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,12 +25,39 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --harmony-block-scoping --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test that temporal dead zone semantics for function and block scoped
+// ket bindings are handled by the optimizing compiler.
+
+function f(x, b) {
+  let y = (b ? y : x) + 42;
+  return y;
 }
 
-test();
+function g(x, b) {
+  {
+    let y = (b ? y : x) + 42;
+    return y;
+  }
+}
+
+for (var i=0; i<10; i++) {
+  f(i, false);
+  g(i, false);
+}
+
+%OptimizeFunctionOnNextCall(f);
+%OptimizeFunctionOnNextCall(g);
+
+try {
+  f(42, true);
+} catch (e) {
+  assertInstanceof(e, ReferenceError);
+}
+
+try {
+  g(42, true);
+} catch (e) {
+  assertInstanceof(e, ReferenceError);
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/harmony/block-let-declaration.js
similarity index 67%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/harmony/block-let-declaration.js
index aa93b25..49b6348 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/harmony/block-let-declaration.js
@@ -25,12 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --harmony-block-scoping
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test let declarations in various settings.
+
+// Global
+let x;
+let y = 2;
+
+// Block local
+{
+  let y;
+  let x = 3;
 }
 
-test();
+assertEquals(undefined, x);
+assertEquals(2,y);
+
+if (true) {
+  let y;
+  assertEquals(undefined, y);
+}
+
+function TestLocalThrows(str, expect) {
+  assertThrows("(function(){" + str + "})()", expect);
+}
+
+function TestLocalDoesNotThrow(str) {
+  assertDoesNotThrow("(function(){" + str + "})()");
+}
+
+// Unprotected statement
+TestLocalThrows("if (true) let x;", SyntaxError);
+TestLocalThrows("do let x; while (false)", SyntaxError);
+TestLocalThrows("while (false) let x;", SyntaxError);
+
+TestLocalDoesNotThrow("if (true) var x;");
+TestLocalDoesNotThrow("do var x; while (false)");
+TestLocalDoesNotThrow("while (false) var x;");
diff --git a/test/mjsunit/harmony/block-let-semantics.js b/test/mjsunit/harmony/block-let-semantics.js
new file mode 100644
index 0000000..198c3b4
--- /dev/null
+++ b/test/mjsunit/harmony/block-let-semantics.js
@@ -0,0 +1,138 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-block-scoping
+
+// Test temporal dead zone semantics of let bound variables in
+// function and block scopes.
+
+function TestFunctionLocal(s) {
+  try {
+    eval("(function(){" + s + "; })")();
+  } catch (e) {
+    assertInstanceof(e, ReferenceError);
+    return;
+  }
+  assertUnreachable();
+}
+
+function TestBlockLocal(s,e) {
+  try {
+    eval("(function(){ {" + s + ";} })")();
+  } catch (e) {
+    assertInstanceof(e, ReferenceError);
+    return;
+  }
+  assertUnreachable();
+}
+
+
+function TestAll(s) {
+  TestBlockLocal(s);
+  TestFunctionLocal(s);
+}
+
+// Use before initialization in declaration statement.
+TestAll('let x = x + 1');
+TestAll('let x = x += 1');
+TestAll('let x = x++');
+TestAll('let x = ++x');
+
+// Use before initialization in prior statement.
+TestAll('x + 1; let x;');
+TestAll('x = 1; let x;');
+TestAll('x += 1; let x;');
+TestAll('++x; let x;');
+TestAll('x++; let x;');
+
+TestAll('f(); let x; function f() { return x + 1; }');
+TestAll('f(); let x; function f() { x = 1; }');
+TestAll('f(); let x; function f() { x += 1; }');
+TestAll('f(); let x; function f() { ++x; }');
+TestAll('f(); let x; function f() { x++; }');
+
+TestAll('f()(); let x; function f() { return function() { return x + 1; } }');
+TestAll('f()(); let x; function f() { return function() { x = 1; } }');
+TestAll('f()(); let x; function f() { return function() { x += 1; } }');
+TestAll('f()(); let x; function f() { return function() { ++x; } }');
+TestAll('f()(); let x; function f() { return function() { x++; } }');
+
+// Use in before initialization with a dynamic lookup.
+TestAll('eval("x + 1;"); let x;');
+TestAll('eval("x = 1;"); let x;');
+TestAll('eval("x += 1;"); let x;');
+TestAll('eval("++x;"); let x;');
+TestAll('eval("x++;"); let x;');
+
+// Test that variables introduced by function declarations are created and
+// initialized upon entering a function / block scope.
+function f() {
+  {
+    assertEquals(2, g1());
+    assertEquals(2, eval("g1()"));
+
+    // block scoped function declaration
+    function g1() {
+      return 2;
+    }
+  }
+
+  assertEquals(3, g2());
+  assertEquals(3, eval("g2()"));
+  // function scoped function declaration
+  function g2() {
+    return 3;
+  }
+}
+f();
+
+// Test that a function declaration introduces a block scoped variable.
+TestAll('{ function k() { return 0; } }; k(); ');
+
+// Test that a function declaration sees the scope it resides in.
+function f2() {
+  let m, n;
+  {
+    m = g;
+    function g() {
+      return a;
+    }
+    let a = 1;
+  }
+  assertEquals(1, m());
+
+  try {
+    throw 2;
+  } catch(b) {
+    n = h;
+    function h() {
+      return b + c;
+    }
+    let b = 3;
+  }
+  assertEquals(5, n());
+}
diff --git a/test/mjsunit/harmony/block-scoping.js b/test/mjsunit/harmony/block-scoping.js
new file mode 100644
index 0000000..266e380
--- /dev/null
+++ b/test/mjsunit/harmony/block-scoping.js
@@ -0,0 +1,216 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --harmony-block-scoping
+// Test functionality of block scopes.
+
+// Hoisting of var declarations.
+function f1() {
+  {
+    var x = 1;
+    var y;
+  }
+  assertEquals(1, x)
+  assertEquals(undefined, y)
+}
+f1();
+
+
+// Dynamic lookup in and through block contexts.
+function f2(one) {
+  var x = one + 1;
+  let y = one + 2;
+  {
+    let z = one + 3;
+    assertEquals(1, eval('one'));
+    assertEquals(2, eval('x'));
+    assertEquals(3, eval('y'));
+    assertEquals(4, eval('z'));
+  }
+}
+f2(1);
+
+
+// Lookup in and through block contexts.
+function f3(one) {
+  var x = one + 1;
+  let y = one + 2;
+  {
+    let z = one + 3;
+    assertEquals(1, one);
+    assertEquals(2, x);
+    assertEquals(3, y);
+    assertEquals(4, z);
+  }
+}
+f3(1);
+
+
+// Dynamic lookup from closure.
+function f4(one) {
+  var x = one + 1;
+  let y = one + 2;
+  {
+    let z = one + 3;
+    function f() {
+      assertEquals(1, eval('one'));
+      assertEquals(2, eval('x'));
+      assertEquals(3, eval('y'));
+      assertEquals(4, eval('z'));
+    };
+  }
+}
+f4(1);
+
+
+// Lookup from closure.
+function f5(one) {
+  var x = one + 1;
+  let y = one + 2;
+  {
+    let z = one + 3;
+    function f() {
+      assertEquals(1, one);
+      assertEquals(2, x);
+      assertEquals(3, y);
+      assertEquals(4, z);
+    };
+  }
+}
+f5(1);
+
+
+// Return from block.
+function f6() {
+  let x = 1;
+  {
+    let y = 2;
+    return x + y;
+  }
+}
+assertEquals(3, f6(6));
+
+
+// Variable shadowing and lookup.
+function f7(a) {
+  let b = 1;
+  var c = 1;
+  var d = 1;
+  { // let variables shadowing argument, let and var variables
+    let a = 2;
+    let b = 2;
+    let c = 2;
+    assertEquals(2,a);
+    assertEquals(2,b);
+    assertEquals(2,c);
+  }
+  try {
+    throw 'stuff1';
+  } catch (a) {
+    assertEquals('stuff1',a);
+    // catch variable shadowing argument
+    a = 2;
+    assertEquals(2,a);
+    {
+      // let variable shadowing catch variable
+      let a = 3;
+      assertEquals(3,a);
+      try {
+        throw 'stuff2';
+      } catch (a) {
+        assertEquals('stuff2',a);
+        // catch variable shadowing let variable
+        a = 4;
+        assertEquals(4,a);
+      }
+      assertEquals(3,a);
+    }
+    assertEquals(2,a);
+  }
+  try {
+    throw 'stuff3';
+  } catch (c) {
+    // catch variable shadowing var variable
+    assertEquals('stuff3',c);
+    try {
+      throw 'stuff4';
+    } catch(c) {
+      assertEquals('stuff4',c);
+      // catch variable shadowing catch variable
+      c = 3;
+      assertEquals(3,c);
+    }
+    (function(c) {
+      // argument shadowing catch variable
+      c = 3;
+      assertEquals(3,c);
+    })();
+    assertEquals('stuff3', c);
+    (function() {
+      // var variable shadowing catch variable
+      var c = 3;
+    })();
+    assertEquals('stuff3', c);
+    c = 2;
+  }
+  assertEquals(1,c);
+  (function(a,b,c) {
+    // arguments shadowing argument, let and var variable
+    a = 2;
+    b = 2;
+    c = 2;
+    assertEquals(2,a);
+    assertEquals(2,b);
+    assertEquals(2,c);
+    // var variable shadowing var variable
+    var d = 2;
+  })(1,1);
+  assertEquals(1,a);
+  assertEquals(1,b);
+  assertEquals(1,c);
+  assertEquals(1,d);
+}
+f7(1);
+
+
+// Ensure let variables are block local and var variables function local.
+function f8() {
+  var let_accessors = [];
+  var var_accessors = [];
+  for (var i = 0; i < 10; i++) {
+    let x = i;
+    var y = i;
+    let_accessors[i] = function() { return x; }
+    var_accessors[i] = function() { return y; }
+  }
+  for (var j = 0; j < 10; j++) {
+    y = j + 10;
+    assertEquals(j, let_accessors[j]());
+    assertEquals(y, var_accessors[j]());
+  }
+}
+f8();
diff --git a/test/mjsunit/harmony/debug-blockscopes.js b/test/mjsunit/harmony/debug-blockscopes.js
new file mode 100644
index 0000000..0230e84
--- /dev/null
+++ b/test/mjsunit/harmony/debug-blockscopes.js
@@ -0,0 +1,466 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --harmony-block-scoping
+// The functions used for testing backtraces. They are at the top to make the
+// testing of source line/column easier.
+
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var test_name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+
+// Debug event listener which delegates.
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      break_count++;
+      listener_called = true;
+      listener_delegate(exec_state);
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a new test.
+function BeginTest(name) {
+  test_name = name;
+  listener_delegate = null;
+  listener_called = false;
+  exception = null;
+  begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+  assertTrue(listener_called, "listerner not called for " + test_name);
+  assertNull(exception, test_name);
+  end_test_count++;
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+  assertEquals(scopes.length, exec_state.frame().scopeCount());
+  for (var i = 0; i < scopes.length; i++) {
+    var scope = exec_state.frame().scope(i);
+    assertTrue(scope.isScope());
+    assertEquals(scopes[i], scope.scopeType());
+
+    // Check the global object when hitting the global scope.
+    if (scopes[i] == debug.ScopeType.Global) {
+      // Objects don't have same class (one is "global", other is "Object",
+      // so just check the properties directly.
+      assertPropertiesEqual(this, scope.scopeObject().value());
+    }
+  }
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scopes request and check the result.
+  var json;
+  var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scopes.length, response.body.scopes.length);
+  for (var i = 0; i < scopes.length; i++) {
+    assertEquals(i, response.body.scopes[i].index);
+    assertEquals(scopes[i], response.body.scopes[i].type);
+    if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Closure) {
+      assertTrue(response.body.scopes[i].object.ref < 0);
+    } else {
+      assertTrue(response.body.scopes[i].object.ref >= 0);
+    }
+    var found = false;
+    for (var j = 0; j < response.refs.length && !found; j++) {
+      found = response.refs[j].handle == response.body.scopes[i].object.ref;
+    }
+    assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+  }
+}
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+  var scope = exec_state.frame().scope(number);
+  var count = 0;
+  for (var p in content) {
+    var property_mirror = scope.scopeObject().property(p);
+    if (property_mirror.isUndefined()) {
+      print('property ' + p + ' not found in scope');
+    }
+    assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+    if (typeof(content[p]) === 'function') {
+      assertTrue(property_mirror.value().isFunction());
+    } else {
+      assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+    }
+    count++;
+  }
+
+  // 'arguments' and might be exposed in the local and closure scope. Just
+  // ignore this.
+  var scope_size = scope.scopeObject().properties().length;
+  if (!scope.scopeObject().property('arguments').isUndefined()) {
+    scope_size--;
+  }
+  // Also ignore synthetic variable from catch block.
+  if (!scope.scopeObject().property('.catch-var').isUndefined()) {
+    scope_size--;
+  }
+  // Skip property with empty name.
+  if (!scope.scopeObject().property('').isUndefined()) {
+    scope_size--;
+  }
+  // Also ignore synthetic variable from block scopes.
+  if (!scope.scopeObject().property('.block').isUndefined()) {
+    scope_size--;
+  }
+
+  if (count != scope_size) {
+    print('Names found in scope:');
+    var names = scope.scopeObject().propertyNames();
+    for (var i = 0; i < names.length; i++) {
+      print(names[i]);
+    }
+  }
+  assertEquals(count, scope_size);
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scope request for information on a single scope and check the
+  // result.
+  var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
+  request_json += scope.scopeIndex();
+  request_json += '}}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scope.scopeType(), response.body.type);
+  assertEquals(number, response.body.index);
+  if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Closure) {
+    assertTrue(response.body.object.ref < 0);
+  } else {
+    assertTrue(response.body.object.ref >= 0);
+  }
+  var found = false;
+  for (var i = 0; i < response.refs.length && !found; i++) {
+    found = response.refs[i].handle == response.body.object.ref;
+  }
+  assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+// Simple empty block scope in local scope.
+BeginTest("Local block 1");
+
+function local_block_1() {
+  {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+};
+local_block_1();
+EndTest();
+
+
+// Simple empty block scope in local scope with a parameter.
+BeginTest("Local 2");
+
+function local_2(a) {
+  {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1}, 0, exec_state);
+};
+local_2(1);
+EndTest();
+
+
+// Local scope with a parameter and a local variable.
+BeginTest("Local 3");
+
+function local_3(a) {
+  let x = 3;
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,x:3}, 0, exec_state);
+};
+local_3(1);
+EndTest();
+
+
+// Local scope with parameters and local variables.
+BeginTest("Local 4");
+
+function local_4(a, b) {
+  let x = 3;
+  let y = 4;
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+};
+local_4(1, 2);
+EndTest();
+
+
+// Single variable in a block scope.
+BeginTest("Local 5");
+
+function local_5(a) {
+  {
+    let x = 5;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:5}, 0, exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+};
+local_5(1);
+EndTest();
+
+
+// Two variables in a block scope.
+BeginTest("Local 6");
+
+function local_6(a) {
+  {
+    let x = 6;
+    let y = 7;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:6,y:7}, 0, exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+};
+local_6(1);
+EndTest();
+
+
+// Two variables in a block scope.
+BeginTest("Local 7");
+
+function local_7(a) {
+  {
+    {
+      let x = 8;
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:8}, 0, exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+};
+local_7(1);
+EndTest();
+
+
+// Single empty with block.
+BeginTest("With block 1");
+
+function with_block_1() {
+  with({}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+};
+with_block_1();
+EndTest();
+
+
+// Nested empty with blocks.
+BeginTest("With block 2");
+
+function with_block_2() {
+  with({}) {
+    with({}) {
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+  CheckScopeContent({}, 2, exec_state);
+};
+with_block_2();
+EndTest();
+
+
+// With block using an in-place object literal.
+BeginTest("With block 3");
+
+function with_block_3() {
+  with({a:1,b:2}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2}, 0, exec_state);
+};
+with_block_3();
+EndTest();
+
+
+// Nested with blocks using in-place object literals.
+BeginTest("With block 4");
+
+function with_block_4() {
+  with({a:1,b:2}) {
+    with({a:2,b:1}) {
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:2,b:1}, 0, exec_state);
+  CheckScopeContent({a:1,b:2}, 1, exec_state);
+};
+with_block_4();
+EndTest();
+
+
+// With block and a block local variable.
+BeginTest("With block 5");
+
+function with_block_5() {
+  with({a:1}) {
+    let a = 2;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:2}, 0, exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+};
+with_block_5();
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering to an outer
+// block local variable and an outer function's parameter.
+BeginTest("Closure 1");
+
+function closure_1(a) {
+  var x = 2;
+  let y = 3;
+  if (true) {
+    let z = 4;
+    function f() {
+      debugger;
+      return a + x + y + z;
+    };
+    return f;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({a:1,x:2,y:3}, 2, exec_state);
+};
+closure_1(1)();
+EndTest();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
similarity index 63%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/harmony/debug-evaluate-blockscopes.js
index aa93b25..549960a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
@@ -25,12 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-debug-as debug --harmony-block-scoping
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Test debug evaluation for functions without local context, but with
+// nested catch contexts.
 
-test();
+function f() {
+  {                   // Line 1.
+    let i = 1;        // Line 2.
+    try {             // Line 3.
+      throw 'stuff';  // Line 4.
+    } catch (e) {     // Line 5.
+      x = 2;          // Line 6.
+    }
+  }
+};
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+// Set breakpoint on line 6.
+var bp = Debug.setBreakPoint(f, 6);
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    result = exec_state.frame().evaluate("i").value();
+  }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+result = -1;
+f();
+assertEquals(1, result);
+
+// Clear breakpoint.
+Debug.clearBreakPoint(bp);
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
new file mode 100644
index 0000000..3c4e5f6
--- /dev/null
+++ b/test/mjsunit/harmony/proxies.js
@@ -0,0 +1,1682 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies
+
+
+// TODO(rossberg): for-in for proxies not implemented.
+// TODO(rossberg): inheritance from proxies not implemented.
+// TODO(rossberg): function proxies as constructors not implemented.
+
+
+// Helper.
+
+function TestWithProxies(test, handler) {
+  test(handler, Proxy.create)
+  test(handler, function(h) {return Proxy.createFunction(h, function() {})})
+}
+
+
+// Getters.
+
+function TestGet(handler) {
+  TestWithProxies(TestGet2, handler)
+}
+
+function TestGet2(handler, create) {
+  var p = create(handler)
+  assertEquals(42, p.a)
+  assertEquals(42, p["b"])
+
+  // TODO(rossberg): inheritance from proxies not yet implemented.
+  // var o = Object.create(p, {x: {value: 88}})
+  // assertEquals(42, o.a)
+  // assertEquals(42, o["b"])
+  // assertEquals(88, o.x)
+  // assertEquals(88, o["x"])
+}
+
+TestGet({
+  get: function(r, k) { return 42 }
+})
+
+TestGet({
+  get: function(r, k) { return this.get2(r, k) },
+  get2: function(r, k) { return 42 }
+})
+
+TestGet({
+  getPropertyDescriptor: function(k) { return {value: 42} }
+})
+
+TestGet({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) { return {value: 42} }
+})
+
+TestGet({
+  getPropertyDescriptor: function(k) {
+    return {get value() { return 42 }}
+  }
+})
+
+TestGet({
+  get: undefined,
+  getPropertyDescriptor: function(k) { return {value: 42} }
+})
+
+TestGet(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k) { return 42 }
+  }
+}))
+
+
+function TestGetCall(handler) {
+  TestWithProxies(TestGetCall2, handler)
+}
+
+function TestGetCall2(handler, create) {
+  var p = create(handler)
+  assertEquals(55, p.f())
+  assertEquals(55, p.f("unused", "arguments"))
+  assertEquals(55, p.f.call(p))
+  assertEquals(55, p.withargs(45, 5))
+  assertEquals(55, p.withargs.call(p, 11, 22))
+  assertEquals("6655", "66" + p)  // calls p.toString
+}
+
+TestGetCall({
+  get: function(r, k) { return function() { return 55 } }
+})
+
+TestGetCall({
+  get: function(r, k) { return this.get2(r, k) },
+  get2: function(r, k) { return function() { return 55 } }
+})
+
+TestGetCall({
+  getPropertyDescriptor: function(k) {
+    return {value: function() { return 55 }}
+  }
+})
+
+TestGetCall({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) {
+    return {value: function() { return 55 }}
+  }
+})
+
+TestGetCall({
+  getPropertyDescriptor: function(k) {
+    return {get value() { return function() { return 55 } }}
+  }
+})
+
+TestGetCall({
+  get: undefined,
+  getPropertyDescriptor: function(k) {
+    return {value: function() { return 55 }}
+  }
+})
+
+TestGetCall({
+  get: function(r, k) {
+    if (k == "gg") {
+      return function() { return 55 }
+    } else if (k == "withargs") {
+      return function(n, m) { return n + m * 2 }
+    } else {
+      return function() { return this.gg() }
+    }
+  }
+})
+
+TestGetCall(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k) { return function() { return 55 } }
+  }
+}))
+
+
+function TestGetThrow(handler) {
+  TestWithProxies(TestGetThrow2, handler)
+}
+
+function TestGetThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ p.a }, "myexn")
+  assertThrows(function(){ p["b"] }, "myexn")
+}
+
+TestGetThrow({
+  get: function(r, k) { throw "myexn" }
+})
+
+TestGetThrow({
+  get: function(r, k) { return this.get2(r, k) },
+  get2: function(r, k) { throw "myexn" }
+})
+
+TestGetThrow({
+  getPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestGetThrow({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestGetThrow({
+  getPropertyDescriptor: function(k) {
+    return {get value() { throw "myexn" }}
+  }
+})
+
+TestGetThrow({
+  get: undefined,
+  getPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestGetThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestGetThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k) { throw "myexn" }
+  }
+}))
+
+
+
+// Setters.
+
+var key
+var val
+
+function TestSet(handler, create) {
+  TestWithProxies(TestSet2, handler)
+}
+
+function TestSet2(handler, create) {
+  var p = create(handler)
+  assertEquals(42, p.a = 42)
+  assertEquals("a", key)
+  assertEquals(42, val)
+  assertEquals(43, p["b"] = 43)
+  assertEquals("b", key)
+  assertEquals(43, val)
+}
+
+TestSet({
+  set: function(r, k, v) { key = k; val = v; return true }
+})
+
+TestSet({
+  set: function(r, k, v) { return this.set2(r, k, v) },
+  set2: function(r, k, v) { key = k; val = v; return true }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
+  defineProperty2: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) {
+    return {get writable() { return true }}
+  },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) {
+    return {set: function(v) { key = k; val = v }}
+  }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) {
+    return {get writable() { return true }}
+  },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) {
+    return {set: function(v) { key = k; val = v }}
+  }
+})
+
+TestSet({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) { return null },
+  defineProperty: function(k, desc) { key = k, val = desc.value }
+})
+
+TestSet(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k, v) { key = k; val = v; return true }
+  }
+}))
+
+
+
+function TestSetThrow(handler, create) {
+  TestWithProxies(TestSetThrow2, handler)
+}
+
+function TestSetThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ p.a = 42 }, "myexn")
+  assertThrows(function(){ p["b"] = 42 }, "myexn")
+}
+
+TestSetThrow({
+  set: function(r, k, v) { throw "myexn" }
+})
+
+TestSetThrow({
+  set: function(r, k, v) { return this.set2(r, k, v) },
+  set2: function(r, k, v) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" },
+  defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
+  defineProperty2: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
+  defineProperty2: function(k, desc) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {get writable() { return true }}
+  },
+  defineProperty: function(k, desc) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {set: function(v) { throw "myexn" }}
+  }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" },
+  getPropertyDescriptor: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) { throw "myexn" },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) { return {writable: true} },
+  defineProperty: function(k, desc) { throw "myexn" }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) {
+    return {get writable() { throw "myexn" }}
+  },
+  defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) {
+    return {set: function(v) { throw "myexn" }}
+  }
+})
+
+TestSetThrow({
+  getOwnPropertyDescriptor: function(k) { return null },
+  getPropertyDescriptor: function(k) { return null },
+  defineProperty: function(k, desc) { throw "myexn" }
+})
+
+TestSetThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestSetThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k, v) { throw "myexn" }
+  }
+}))
+
+
+
+// Property definition (Object.defineProperty and Object.defineProperties).
+
+var key
+var desc
+
+function TestDefine(handler) {
+  TestWithProxies(TestDefine2, handler)
+}
+
+function TestDefine2(handler, create) {
+  var p = create(handler)
+  assertEquals(p, Object.defineProperty(p, "a", {value: 44}))
+  assertEquals("a", key)
+  assertEquals(1, Object.getOwnPropertyNames(desc).length)
+  assertEquals(44, desc.value)
+
+  assertEquals(p, Object.defineProperty(p, "b", {value: 45, writable: false}))
+  assertEquals("b", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(45, desc.value)
+  assertEquals(false, desc.writable)
+
+  assertEquals(p, Object.defineProperty(p, "c", {value: 46, enumerable: false}))
+  assertEquals("c", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(46, desc.value)
+  assertEquals(false, desc.enumerable)
+
+  var attributes = {configurable: true, mine: 66, minetoo: 23}
+  assertEquals(p, Object.defineProperty(p, "d", attributes))
+  assertEquals("d", key)
+  // Modifying the attributes object after the fact should have no effect.
+  attributes.configurable = false
+  attributes.mine = 77
+  delete attributes.minetoo
+  assertEquals(3, Object.getOwnPropertyNames(desc).length)
+  assertEquals(true, desc.configurable)
+  assertEquals(66, desc.mine)
+  assertEquals(23, desc.minetoo)
+
+  assertEquals(p, Object.defineProperty(p, "e", {get: function(){ return 5 }}))
+  assertEquals("e", key)
+  assertEquals(1, Object.getOwnPropertyNames(desc).length)
+  assertEquals(5, desc.get())
+
+  assertEquals(p, Object.defineProperty(p, "zzz", {}))
+  assertEquals("zzz", key)
+  assertEquals(0, Object.getOwnPropertyNames(desc).length)
+
+// TODO(rossberg): This test requires for-in on proxies.
+//  var d = create({
+//    get: function(r, k) { return (k === "value") ? 77 : void 0 },
+//    getOwnPropertyNames: function() { return ["value"] }
+//  })
+//  assertEquals(1, Object.getOwnPropertyNames(d).length)
+//  assertEquals(77, d.value)
+//  assertEquals(p, Object.defineProperty(p, "p", d))
+//  assertEquals("p", key)
+//  assertEquals(1, Object.getOwnPropertyNames(desc).length)
+//  assertEquals(77, desc.value)
+
+  var props = {
+    'bla': {},
+    blub: {get: function() { return true }},
+    '': {get value() { return 20 }},
+    last: {value: 21, configurable: true, mine: "eyes"}
+  }
+  Object.defineProperty(props, "hidden", {value: "hidden", enumerable: false})
+  assertEquals(p, Object.defineProperties(p, props))
+  assertEquals("last", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(21, desc.value)
+  assertEquals(true, desc.configurable)
+  assertEquals(undefined, desc.mine)  // Arguably a bug in the spec...
+
+  var props = {bla: {get value() { throw "myexn" }}}
+  assertThrows(function(){ Object.defineProperties(p, props) }, "myexn")
+}
+
+TestDefine({
+  defineProperty: function(k, d) { key = k; desc = d; return true }
+})
+
+TestDefine({
+  defineProperty: function(k, d) { return this.defineProperty2(k, d) },
+  defineProperty2: function(k, d) { key = k; desc = d; return true }
+})
+
+TestDefine(Proxy.create({
+  get: function(pr, pk) {
+    return function(k, d) { key = k; desc = d; return true }
+  }
+}))
+
+
+function TestDefineThrow(handler) {
+  TestWithProxies(TestDefineThrow2, handler)
+}
+
+function TestDefineThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
+
+// TODO(rossberg): These tests require for-in on proxies.
+//  var d1 = create({
+//    get: function(r, k) { throw "myexn" },
+//    getOwnPropertyNames: function() { return ["value"] }
+//  })
+//  assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
+//  var d2 = create({
+//    get: function(r, k) { return 77 },
+//    getOwnPropertyNames: function() { throw "myexn" }
+//  })
+//  assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
+
+  var props = {bla: {get value() { throw "otherexn" }}}
+  assertThrows(function(){ Object.defineProperties(p, props) }, "otherexn")
+}
+
+TestDefineThrow({
+  defineProperty: function(k, d) { throw "myexn" }
+})
+
+TestDefineThrow({
+  defineProperty: function(k, d) { return this.defineProperty2(k, d) },
+  defineProperty2: function(k, d) { throw "myexn" }
+})
+
+TestDefineThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestDefineThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k, d) { throw "myexn" }
+  }
+}))
+
+
+
+// Property deletion (delete).
+
+var key
+
+function TestDelete(handler) {
+  TestWithProxies(TestDelete2, handler)
+}
+
+function TestDelete2(handler, create) {
+  var p = create(handler)
+  assertEquals(true, delete p.a)
+  assertEquals("a", key)
+  assertEquals(true, delete p["b"])
+  assertEquals("b", key)
+
+  assertEquals(false, delete p.z1)
+  assertEquals("z1", key)
+  assertEquals(false, delete p["z2"])
+  assertEquals("z2", key);
+
+  (function() {
+    "use strict"
+    assertEquals(true, delete p.c)
+    assertEquals("c", key)
+    assertEquals(true, delete p["d"])
+    assertEquals("d", key)
+
+    assertThrows(function(){ delete p.z3 }, TypeError)
+    assertEquals("z3", key)
+    assertThrows(function(){ delete p["z4"] }, TypeError)
+    assertEquals("z4", key)
+  })()
+}
+
+TestDelete({
+  delete: function(k) { key = k; return k < "z" }
+})
+
+TestDelete({
+  delete: function(k) { return this.delete2(k) },
+  delete2: function(k) { key = k; return k < "z" }
+})
+
+TestDelete(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return k < "z" }
+  }
+}))
+
+
+function TestDeleteThrow(handler) {
+  TestWithProxies(TestDeleteThrow2, handler)
+}
+
+function TestDeleteThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ delete p.a }, "myexn")
+  assertThrows(function(){ delete p["b"] }, "myexn");
+
+  (function() {
+    "use strict"
+    assertThrows(function(){ delete p.c }, "myexn")
+    assertThrows(function(){ delete p["d"] }, "myexn")
+  })()
+}
+
+TestDeleteThrow({
+  delete: function(k) { throw "myexn" }
+})
+
+TestDeleteThrow({
+  delete: function(k) { return this.delete2(k) },
+  delete2: function(k) { throw "myexn" }
+})
+
+TestDeleteThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestDeleteThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Property descriptors (Object.getOwnPropertyDescriptor).
+
+function TestDescriptor(handler) {
+  TestWithProxies(TestDescriptor2, handler)
+}
+
+function TestDescriptor2(handler, create) {
+  var p = create(handler)
+  var descs = [
+    {configurable: true},
+    {value: 34, enumerable: true, configurable: true},
+    {value: 3, writable: false, mine: "eyes", configurable: true},
+    {get value() { return 20 }, get configurable() { return true }},
+    {get: function() { "get" }, set: function() { "set" }, configurable: true}
+  ]
+  for (var i = 0; i < descs.length; ++i) {
+    assertEquals(p, Object.defineProperty(p, i, descs[i]))
+    var desc = Object.getOwnPropertyDescriptor(p, i)
+    for (prop in descs[i]) {
+      // TODO(rossberg): Ignore user attributes as long as the spec isn't
+      // fixed suitably.
+      if (prop != "mine") assertEquals(descs[i][prop], desc[prop])
+    }
+    assertEquals(undefined, Object.getOwnPropertyDescriptor(p, "absent"))
+  }
+}
+
+TestDescriptor({
+  defineProperty: function(k, d) { this["__" + k] = d; return true },
+  getOwnPropertyDescriptor: function(k) { return this["__" + k] }
+})
+
+TestDescriptor({
+  defineProperty: function(k, d) { this["__" + k] = d; return true },
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { return this["__" + k] }
+})
+
+
+function TestDescriptorThrow(handler) {
+  TestWithProxies(TestDescriptorThrow2, handler)
+}
+
+function TestDescriptorThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
+}
+
+TestDescriptorThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestDescriptorThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+
+
+// Comparison.
+
+function TestComparison(eq) {
+  TestWithProxies(TestComparison2, eq)
+}
+
+function TestComparison2(eq, create) {
+  var p1 = create({})
+  var p2 = create({})
+
+  assertTrue(eq(p1, p1))
+  assertTrue(eq(p2, p2))
+  assertTrue(!eq(p1, p2))
+  assertTrue(!eq(p1, {}))
+  assertTrue(!eq({}, p2))
+  assertTrue(!eq({}, {}))
+}
+
+TestComparison(function(o1, o2) { return o1 == o2 })
+TestComparison(function(o1, o2) { return o1 === o2 })
+TestComparison(function(o1, o2) { return !(o1 != o2) })
+TestComparison(function(o1, o2) { return !(o1 !== o2) })
+
+
+
+// Type (typeof).
+
+function TestTypeof() {
+  assertEquals("object", typeof Proxy.create({}))
+  assertTrue(typeof Proxy.create({}) == "object")
+  assertTrue("object" == typeof Proxy.create({}))
+
+  assertEquals("function", typeof Proxy.createFunction({}, function() {}))
+  assertTrue(typeof Proxy.createFunction({}, function() {}) == "function")
+  assertTrue("function" == typeof Proxy.createFunction({}, function() {}))
+}
+
+TestTypeof()
+
+
+
+// Membership test (in).
+
+var key
+
+function TestIn(handler) {
+  TestWithProxies(TestIn2, handler)
+}
+
+function TestIn2(handler, create) {
+  var p = create(handler)
+  assertTrue("a" in p)
+  assertEquals("a", key)
+  assertTrue(99 in p)
+  assertEquals("99", key)
+  assertFalse("z" in p)
+  assertEquals("z", key)
+
+  assertEquals(2, ("a" in p) ? 2 : 0)
+  assertEquals(0, !("a" in p) ? 2 : 0)
+  assertEquals(0, ("zzz" in p) ? 2 : 0)
+  assertEquals(2, !("zzz" in p) ? 2 : 0)
+
+  if ("b" in p) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("b", key)
+
+  if ("zz" in p) {
+    assertTrue(false)
+  }
+  assertEquals("zz", key)
+
+  if (!("c" in p)) {
+    assertTrue(false)
+  }
+  assertEquals("c", key)
+
+  if (!("zzz" in p)) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("zzz", key)
+}
+
+TestIn({
+  has: function(k) { key = k; return k < "z" }
+})
+
+TestIn({
+  has: function(k) { return this.has2(k) },
+  has2: function(k) { key = k; return k < "z" }
+})
+
+TestIn({
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestIn({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestIn({
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {get value() { return 42 }} : void 0
+  }
+})
+
+TestIn({
+  get: undefined,
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestIn(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return k < "z" }
+  }
+}))
+
+
+function TestInThrow(handler) {
+  TestWithProxies(TestInThrow2, handler)
+}
+
+function TestInThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ return "a" in o }, "myexn")
+  assertThrows(function(){ return !("a" in o) }, "myexn")
+  assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
+  assertThrows(function(){ if ("b" in o) {} }, "myexn")
+  assertThrows(function(){ if (!("b" in o)) {} }, "myexn")
+  assertThrows(function(){ if ("zzz" in o) {} }, "myexn")
+}
+
+TestInThrow({
+  has: function(k) { throw "myexn" }
+})
+
+TestInThrow({
+  has: function(k) { return this.has2(k) },
+  has2: function(k) { throw "myexn" }
+})
+
+TestInThrow({
+  getPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestInThrow({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestInThrow({
+  get: undefined,
+  getPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestInThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestInThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Own Properties (Object.prototype.hasOwnProperty).
+
+var key
+
+function TestHasOwn(handler) {
+  TestWithProxies(TestHasOwn2, handler)
+}
+
+function TestHasOwn2(handler, create) {
+  var p = create(handler)
+  assertTrue(Object.prototype.hasOwnProperty.call(p, "a"))
+  assertEquals("a", key)
+  assertTrue(Object.prototype.hasOwnProperty.call(p, 99))
+  assertEquals("99", key)
+  assertFalse(Object.prototype.hasOwnProperty.call(p, "z"))
+  assertEquals("z", key)
+}
+
+TestHasOwn({
+  hasOwn: function(k) { key = k; return k < "z" }
+})
+
+TestHasOwn({
+  hasOwn: function(k) { return this.hasOwn2(k) },
+  hasOwn2: function(k) { key = k; return k < "z" }
+})
+
+TestHasOwn({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestHasOwn({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestHasOwn({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {get value() { return 42 }} : void 0
+  }
+})
+
+TestHasOwn({
+  hasOwn: undefined,
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42} : void 0
+  }
+})
+
+TestHasOwn(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return k < "z" }
+  }
+}))
+
+
+function TestHasOwnThrow(handler) {
+  TestWithProxies(TestHasOwnThrow2, handler)
+}
+
+function TestHasOwnThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
+    "myexn")
+  assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, 99)},
+    "myexn")
+}
+
+TestHasOwnThrow({
+  hasOwn: function(k) { throw "myexn" }
+})
+
+TestHasOwnThrow({
+  hasOwn: function(k) { return this.hasOwn2(k) },
+  hasOwn2: function(k) { throw "myexn" }
+})
+
+TestHasOwnThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestHasOwnThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestHasOwnThrow({
+  hasOwn: undefined,
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestHasOwnThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestHasOwnThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Instanceof (instanceof)
+
+function TestInstanceof() {
+  var o = {}
+  var p1 = Proxy.create({})
+  var p2 = Proxy.create({}, o)
+  var p3 = Proxy.create({}, p2)
+
+  var f0 = function() {}
+  f0.prototype = o
+  var f1 = function() {}
+  f1.prototype = p1
+  var f2 = function() {}
+  f2.prototype = p2
+
+  assertTrue(o instanceof Object)
+  assertFalse(o instanceof f0)
+  assertFalse(o instanceof f1)
+  assertFalse(o instanceof f2)
+  assertFalse(p1 instanceof Object)
+  assertFalse(p1 instanceof f0)
+  assertFalse(p1 instanceof f1)
+  assertFalse(p1 instanceof f2)
+  assertTrue(p2 instanceof Object)
+  assertTrue(p2 instanceof f0)
+  assertFalse(p2 instanceof f1)
+  assertFalse(p2 instanceof f2)
+  assertTrue(p3 instanceof Object)
+  assertTrue(p3 instanceof f0)
+  assertFalse(p3 instanceof f1)
+  assertTrue(p3 instanceof f2)
+
+  var f = Proxy.createFunction({}, function() {})
+  assertTrue(f instanceof Function)
+}
+
+TestInstanceof()
+
+
+
+// Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
+
+function TestPrototype() {
+  var o = {}
+  var p1 = Proxy.create({})
+  var p2 = Proxy.create({}, o)
+  var p3 = Proxy.create({}, p2)
+  var p4 = Proxy.create({}, 666)
+
+  assertSame(Object.getPrototypeOf(o), Object.prototype)
+  assertSame(Object.getPrototypeOf(p1), null)
+  assertSame(Object.getPrototypeOf(p2), o)
+  assertSame(Object.getPrototypeOf(p3), p2)
+  assertSame(Object.getPrototypeOf(p4), null)
+
+  assertTrue(Object.prototype.isPrototypeOf(o))
+  assertFalse(Object.prototype.isPrototypeOf(p1))
+  assertTrue(Object.prototype.isPrototypeOf(p2))
+  assertTrue(Object.prototype.isPrototypeOf(p3))
+  assertFalse(Object.prototype.isPrototypeOf(p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(o, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
+  assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
+  assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
+
+  var f = Proxy.createFunction({}, function() {})
+  assertSame(Object.getPrototypeOf(f), Function.prototype)
+  assertTrue(Object.prototype.isPrototypeOf(f))
+  assertTrue(Object.prototype.isPrototypeOf.call(Function.prototype, f))
+}
+
+TestPrototype()
+
+
+
+// Property names (Object.getOwnPropertyNames, Object.keys).
+
+function TestPropertyNames(names, handler) {
+  TestWithProxies(TestPropertyNames2, [names, handler])
+}
+
+function TestPropertyNames2(names_handler, create) {
+  var p = create(names_handler[1])
+  assertArrayEquals(names_handler[0], Object.getOwnPropertyNames(p))
+}
+
+TestPropertyNames([], {
+  getOwnPropertyNames: function() { return [] }
+})
+
+TestPropertyNames(["a", "zz", " ", "0"], {
+  getOwnPropertyNames: function() { return ["a", "zz", " ", 0] }
+})
+
+TestPropertyNames(["throw", "function "], {
+  getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
+  getOwnPropertyNames2: function() { return ["throw", "function "] }
+})
+
+TestPropertyNames(["[object Object]"], {
+  get getOwnPropertyNames() {
+    return function() { return [{}] }
+  }
+})
+
+
+function TestPropertyNamesThrow(handler) {
+  TestWithProxies(TestPropertyNamesThrow2, handler)
+}
+
+function TestPropertyNamesThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.getOwnPropertyNames(p) }, "myexn")
+}
+
+TestPropertyNamesThrow({
+  getOwnPropertyNames: function() { throw "myexn" }
+})
+
+TestPropertyNamesThrow({
+  getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
+  getOwnPropertyNames2: function() { throw "myexn" }
+})
+
+
+function TestKeys(names, handler) {
+  TestWithProxies(TestKeys2, [names, handler])
+}
+
+function TestKeys2(names_handler, create) {
+  var p = create(names_handler[1])
+  assertArrayEquals(names_handler[0], Object.keys(p))
+}
+
+TestKeys([], {
+  keys: function() { return [] }
+})
+
+TestKeys(["a", "zz", " ", "0"], {
+  keys: function() { return ["a", "zz", " ", 0] }
+})
+
+TestKeys(["throw", "function "], {
+  keys: function() { return this.keys2() },
+  keys2: function() { return ["throw", "function "] }
+})
+
+TestKeys(["[object Object]"], {
+  get keys() {
+    return function() { return [{}] }
+  }
+})
+
+TestKeys(["a", "0"], {
+  getOwnPropertyNames: function() { return ["a", 23, "zz", "", 0] },
+  getOwnPropertyDescriptor: function(k) { return {enumerable: k.length == 1} }
+})
+
+TestKeys(["23", "zz", ""], {
+  getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
+  getOwnPropertyNames2: function() { return ["a", 23, "zz", "", 0] },
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { return {enumerable: k.length != 1} }
+})
+
+TestKeys(["a", "b", "c", "5"], {
+  get getOwnPropertyNames() {
+    return function() { return ["0", 4, "a", "b", "c", 5] }
+  },
+  get getOwnPropertyDescriptor() {
+    return function(k) { return {enumerable: k >= "44"} }
+  }
+})
+
+TestKeys([], {
+  get getOwnPropertyNames() {
+    return function() { return ["a", "b", "c"] }
+  },
+  getOwnPropertyDescriptor: function(k) { return {} }
+})
+
+
+function TestKeysThrow(handler) {
+  TestWithProxies(TestKeysThrow2, handler)
+}
+
+function TestKeysThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.keys(p) }, "myexn")
+}
+
+TestKeysThrow({
+  keys: function() { throw "myexn" }
+})
+
+TestKeysThrow({
+  keys: function() { return this.keys2() },
+  keys2: function() { throw "myexn" }
+})
+
+TestKeysThrow({
+  getOwnPropertyNames: function() { throw "myexn" },
+  getOwnPropertyDescriptor: function(k) { return true }
+})
+
+TestKeysThrow({
+  getOwnPropertyNames: function() { return [1, 2] },
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestKeysThrow({
+  getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
+  getOwnPropertyNames2: function() { throw "myexn" },
+})
+
+TestKeysThrow({
+  getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
+  getOwnPropertyNames2: function() { return [1, 2] },
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestKeysThrow({
+  get getOwnPropertyNames() { throw "myexn" }
+})
+
+TestKeysThrow({
+  get getOwnPropertyNames() {
+    return function() { throw "myexn" }
+  },
+})
+
+TestKeysThrow([], {
+  get getOwnPropertyNames() {
+    return function() { return [1, 2] }
+  },
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+
+
+// Fixing (Object.freeze, Object.seal, Object.preventExtensions,
+//         Object.isFrozen, Object.isSealed, Object.isExtensible)
+
+// TODO(rossberg): use TestWithProxies to include funciton proxies
+function TestFix(names, handler) {
+  var proto = {p: 77}
+  var assertFixing = function(o, s, f, e) {
+    assertEquals(s, Object.isSealed(o))
+    assertEquals(f, Object.isFrozen(o))
+    assertEquals(e, Object.isExtensible(o))
+  }
+
+  var p1 = Proxy.create(handler, proto)
+  assertFixing(p1, false, false, true)
+  Object.seal(p1)
+  assertFixing(p1, true, names.length === 0, false)
+  assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p1).sort())
+  assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
+                    Object.keys(p1).sort())
+  assertEquals(proto, Object.getPrototypeOf(p1))
+  assertEquals(77, p1.p)
+  for (var n in p1) {
+    var desc = Object.getOwnPropertyDescriptor(p1, n)
+    if (desc !== undefined) assertFalse(desc.configurable)
+  }
+
+  var p2 = Proxy.create(handler, proto)
+  assertFixing(p2, false, false, true)
+  Object.freeze(p2)
+  assertFixing(p2, true, true, false)
+  assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p2).sort())
+  assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
+                    Object.keys(p2).sort())
+  assertEquals(proto, Object.getPrototypeOf(p2))
+  assertEquals(77, p2.p)
+  for (var n in p2) {
+    var desc = Object.getOwnPropertyDescriptor(p2, n)
+    if (desc !== undefined) assertFalse(desc.writable)
+    if (desc !== undefined) assertFalse(desc.configurable)
+  }
+
+  var p3 = Proxy.create(handler, proto)
+  assertFixing(p3, false, false, true)
+  Object.preventExtensions(p3)
+  assertFixing(p3, names.length === 0, names.length === 0, false)
+  assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p3).sort())
+  assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
+                    Object.keys(p3).sort())
+  assertEquals(proto, Object.getPrototypeOf(p3))
+  assertEquals(77, p3.p)
+}
+
+TestFix([], {
+  fix: function() { return {} }
+})
+
+TestFix(["a", "b", "c", "d", "zz"], {
+  fix: function() {
+    return {
+      a: {value: "a", writable: true, configurable: false, enumerable: true},
+      b: {value: 33, writable: false, configurable: false, enumerable: true},
+      c: {value: 0, writable: true, configurable: true, enumerable: true},
+      d: {value: true, writable: false, configurable: true, enumerable: true},
+      zz: {value: 0, enumerable: false}
+    }
+  }
+})
+
+TestFix(["a"], {
+  fix: function() { return this.fix2() },
+  fix2: function() {
+    return {a: {value: 4, writable: true, configurable: true, enumerable: true}}
+  }
+})
+
+TestFix(["b"], {
+  get fix() {
+    return function() {
+      return {b: {configurable: true, writable: true, enumerable: true}}
+    }
+  }
+})
+
+
+function TestFixFunction(fix) {
+  var f1 = Proxy.createFunction({
+    fix: function() { return {} }
+  }, function() {})
+  fix(f1)
+  assertEquals(0, f1.length)
+
+  var f2 = Proxy.createFunction({
+    fix: function() { return {length: {value: 3}} }
+  }, function() {})
+  fix(f2)
+  assertEquals(3, f2.length)
+
+  var f3 = Proxy.createFunction({
+    fix: function() { return {length: {value: "huh"}} }
+  }, function() {})
+  fix(f3)
+  assertEquals(0, f1.length)
+}
+
+TestFixFunction(Object.seal)
+TestFixFunction(Object.freeze)
+TestFixFunction(Object.preventExtensions)
+
+
+function TestFixThrow(handler) {
+  TestWithProxies(TestFixThrow2, handler)
+}
+
+function TestFixThrow2(handler) {
+  var p = Proxy.create(handler, {})
+  assertThrows(function(){ Object.seal(p) }, "myexn")
+  assertThrows(function(){ Object.freeze(p) }, "myexn")
+  assertThrows(function(){ Object.preventExtensions(p) }, "myexn")
+}
+
+TestFixThrow({
+  fix: function() { throw "myexn" }
+})
+
+TestFixThrow({
+  fix: function() { return this.fix2() },
+  fix2: function() { throw "myexn" }
+})
+
+TestFixThrow({
+  get fix() { throw "myexn" }
+})
+
+TestFixThrow({
+  get fix() {
+    return function() { throw "myexn" }
+  }
+})
+
+
+
+// String conversion (Object.prototype.toString,
+//                    Object.prototype.toLocaleString,
+//                    Function.prototype.toString)
+
+var key
+
+function TestToString(handler) {
+  var p = Proxy.create(handler)
+  key = ""
+  assertEquals("[object Object]", Object.prototype.toString.call(p))
+  assertEquals("", key)
+  assertEquals("my_proxy", Object.prototype.toLocaleString.call(p))
+  assertEquals("toString", key)
+
+  var f = Proxy.createFunction(handler, function() {})
+  key = ""
+  assertEquals("[object Function]", Object.prototype.toString.call(f))
+  assertEquals("", key)
+  assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
+  assertEquals("toString", key)
+  assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
+}
+
+TestToString({
+  get: function(r, k) { key = k; return function() { return "my_proxy" } }
+})
+
+TestToString({
+  get: function(r, k) { return this.get2(r, k) },
+  get2: function(r, k) { key = k; return function() { return "my_proxy" } }
+})
+
+TestToString(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k) { key = k; return function() { return "my_proxy" } }
+  }
+}))
+
+
+function TestToStringThrow(handler) {
+  var p = Proxy.create(handler)
+  assertEquals("[object Object]", Object.prototype.toString.call(p))
+  assertThrows(function(){ Object.prototype.toLocaleString.call(p) }, "myexn")
+
+  var f = Proxy.createFunction(handler, function() {})
+  assertEquals("[object Function]", Object.prototype.toString.call(f))
+  assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
+}
+
+TestToStringThrow({
+  get: function(r, k) { throw "myexn" }
+})
+
+TestToStringThrow({
+  get: function(r, k) { return function() { throw "myexn" } }
+})
+
+TestToStringThrow({
+  get: function(r, k) { return this.get2(r, k) },
+  get2: function(r, k) { throw "myexn" }
+})
+
+TestToStringThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestToStringThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(r, k) { throw "myexn" }
+  }
+}))
+
+
+
+// Value conversion (Object.prototype.toValue)
+
+function TestValueOf(handler) {
+  TestWithProxies(TestValueOf2, handler)
+}
+
+function TestValueOf2(handler, create) {
+  var p = create(handler)
+  assertSame(p, Object.prototype.valueOf.call(p))
+}
+
+TestValueOf({})
+
+
+
+// Enumerability (Object.prototype.propertyIsEnumerable)
+
+var key
+
+function TestIsEnumerable(handler) {
+  TestWithProxies(TestIsEnumerable2, handler)
+}
+
+function TestIsEnumerable2(handler, create) {
+  var p = create(handler)
+  assertTrue(Object.prototype.propertyIsEnumerable.call(p, "a"))
+  assertEquals("a", key)
+  assertTrue(Object.prototype.propertyIsEnumerable.call(p, 2))
+  assertEquals("2", key)
+  assertFalse(Object.prototype.propertyIsEnumerable.call(p, "z"))
+  assertEquals("z", key)
+}
+
+TestIsEnumerable({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return {enumerable: k < "z", configurable: true}
+  },
+})
+
+TestIsEnumerable({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k; return {enumerable: k < "z", configurable: true}
+  },
+})
+
+TestIsEnumerable({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return {get enumerable() { return k < "z" }, configurable: true}
+  },
+})
+
+TestIsEnumerable(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) {
+      key = k; return {enumerable: k < "z", configurable: true}
+    }
+  }
+}))
+
+
+function TestIsEnumerableThrow(handler) {
+  TestWithProxies(TestIsEnumerableThrow2, handler)
+}
+
+function TestIsEnumerableThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, "a") },
+    "myexn")
+  assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, 11) },
+    "myexn")
+}
+
+TestIsEnumerableThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestIsEnumerableThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestIsEnumerableThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {get enumerable() { throw "myexn" }, configurable: true}
+  },
+})
+
+TestIsEnumerableThrow(Proxy.create({
+  get: function(pr, pk) { throw "myexn" }
+}))
+
+TestIsEnumerableThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Calling (call, Function.prototype.call, Function.prototype.apply,
+//          Function.prototype.bind).
+
+var global = this
+var receiver
+
+function TestCall(isStrict, callTrap) {
+  assertEquals(42, callTrap(5, 37))
+// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
+// assertEquals(isStrict ? undefined : global, receiver)
+
+  var f = Proxy.createFunction({fix: function() { return {} }}, callTrap)
+  receiver = 333
+  assertEquals(42, f(11, 31))
+  assertEquals(isStrict ? undefined : global, receiver)
+  var o = {}
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertEquals(o, receiver)
+  assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+  assertEquals(isStrict ? null : global, receiver)
+  assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+  assertEquals(2, receiver.valueOf())
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertEquals(o, receiver)
+  var ff = Function.prototype.bind.call(f, o, 12)
+  receiver = 333
+  assertEquals(42, ff(30))
+  assertEquals(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+  assertEquals(o, receiver)
+
+  Object.freeze(f)
+  receiver = 333
+  assertEquals(42, f(11, 31))
+// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
+// assertEquals(isStrict ? undefined : global, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertEquals(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertEquals(o, receiver)
+  receiver = 333
+  assertEquals(42, ff(30))
+  assertEquals(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+  assertEquals(o, receiver)
+}
+
+TestCall(false, function(x, y) {
+  receiver = this; return x + y
+})
+
+TestCall(true, function(x, y) {
+  "use strict";
+  receiver = this; return x + y
+})
+
+TestCall(false, Proxy.createFunction({}, function(x, y) {
+  receiver = this; return x + y
+}))
+
+TestCall(true, Proxy.createFunction({}, function(x, y) {
+  "use strict";
+  receiver = this; return x + y
+}))
+
+var p = Proxy.createFunction({fix: function() {return {}}}, function(x, y) {
+  receiver = this; return x + y
+})
+TestCall(false, p)
+Object.freeze(p)
+TestCall(false, p)
+
+
+function TestCallThrow(callTrap) {
+  var f = Proxy.createFunction({fix: function() {return {}}}, callTrap)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+
+  Object.freeze(f)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+}
+
+TestCallThrow(function() { throw "myexn" })
+TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+
+var p = Proxy.createFunction(
+  {fix: function() {return {}}}, function() { throw "myexn" })
+Object.freeze(p)
+TestCallThrow(p)
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/harmony/typeof.js
similarity index 86%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/harmony/typeof.js
index aa93b25..acde977 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/harmony/typeof.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --harmony-typeof
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+assertFalse(typeof null == 'object')
+assertFalse(typeof null === 'object')
+assertTrue(typeof null == 'null')
+assertTrue(typeof null === 'null')
+assertEquals("null", typeof null)
+assertSame("null", typeof null)
diff --git a/test/mjsunit/harmony/weakmaps.js b/test/mjsunit/harmony/weakmaps.js
new file mode 100644
index 0000000..7b5dcaf
--- /dev/null
+++ b/test/mjsunit/harmony/weakmaps.js
@@ -0,0 +1,167 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-weakmaps --expose-gc
+
+
+// Test valid getter and setter calls
+var m = new WeakMap;
+assertDoesNotThrow(function () { m.get(new Object) });
+assertDoesNotThrow(function () { m.set(new Object) });
+assertDoesNotThrow(function () { m.has(new Object) });
+assertDoesNotThrow(function () { m.delete(new Object) });
+
+
+// Test invalid getter and setter calls
+var m = new WeakMap;
+assertThrows(function () { m.get(undefined) }, TypeError);
+assertThrows(function () { m.set(undefined, 0) }, TypeError);
+assertThrows(function () { m.get(0) }, TypeError);
+assertThrows(function () { m.set(0, 0) }, TypeError);
+assertThrows(function () { m.get('a-key') }, TypeError);
+assertThrows(function () { m.set('a-key', 0) }, TypeError);
+
+
+// Test expected mapping behavior
+var m = new WeakMap;
+function TestMapping(map, key, value) {
+  map.set(key, value);
+  assertSame(value, map.get(key));
+}
+TestMapping(m, new Object, 23);
+TestMapping(m, new Object, 'the-value');
+TestMapping(m, new Object, new Object);
+
+
+// Test expected querying behavior
+var m = new WeakMap;
+var key = new Object;
+TestMapping(m, key, 'to-be-present');
+assertTrue(m.has(key));
+assertFalse(m.has(new Object));
+TestMapping(m, key, undefined);
+assertFalse(m.has(key));
+assertFalse(m.has(new Object));
+
+
+// Test expected deletion behavior
+var m = new WeakMap;
+var key = new Object;
+TestMapping(m, key, 'to-be-deleted');
+assertTrue(m.delete(key));
+assertFalse(m.delete(key));
+assertFalse(m.delete(new Object));
+assertSame(m.get(key), undefined);
+
+
+// Test GC of map with entry
+var m = new WeakMap;
+var key = new Object;
+m.set(key, 'not-collected');
+gc();
+assertSame('not-collected', m.get(key));
+
+
+// Test GC of map with chained entries
+var m = new WeakMap;
+var head = new Object;
+for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
+  m.set(key, new Object);
+}
+gc();
+var count = 0;
+for (key = head; key != undefined; key = m.get(key)) {
+  count++;
+}
+assertEquals(11, count);
+
+
+// Test property attribute [[Enumerable]]
+var m = new WeakMap;
+function props(x) {
+  var array = [];
+  for (var p in x) array.push(p);
+  return array.sort();
+}
+assertArrayEquals([], props(WeakMap));
+assertArrayEquals([], props(WeakMap.prototype));
+assertArrayEquals([], props(m));
+
+
+// Test arbitrary properties on weak maps
+var m = new WeakMap;
+function TestProperty(map, property, value) {
+  map[property] = value;
+  assertEquals(value, map[property]);
+}
+for (i = 0; i < 20; i++) {
+  TestProperty(m, i, 'val' + i);
+  TestProperty(m, 'foo' + i, 'bar' + i);
+}
+TestMapping(m, new Object, 'foobar');
+
+
+// Test direct constructor call
+var m = WeakMap();
+assertTrue(m instanceof WeakMap);
+
+
+// Test some common JavaScript idioms
+var m = new WeakMap;
+assertTrue(m instanceof WeakMap);
+assertTrue(WeakMap.prototype.set instanceof Function)
+assertTrue(WeakMap.prototype.get instanceof Function)
+assertTrue(WeakMap.prototype.has instanceof Function)
+assertTrue(WeakMap.prototype.delete instanceof Function)
+
+
+// Regression test for WeakMap prototype.
+assertTrue(WeakMap.prototype.constructor === WeakMap)
+assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
+
+
+// Regression test for issue 1617: The prototype of the WeakMap constructor
+// needs to be unique (i.e. different from the one of the Object constructor).
+assertFalse(WeakMap.prototype === Object.prototype);
+var o = Object.create({});
+assertFalse("get" in o);
+assertFalse("set" in o);
+assertEquals(undefined, o.get);
+assertEquals(undefined, o.set);
+var o = Object.create({}, { myValue: {
+  value: 10,
+  enumerable: false,
+  configurable: true,
+  writable: true
+}});
+assertEquals(10, o.myValue);
+
+
+// Stress Test
+// There is a proposed stress-test available at the es-discuss mailing list
+// which cannot be reasonably automated.  Check it out by hand if you like:
+// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/hex-parsing.js
similarity index 80%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/hex-parsing.js
index aa93b25..6a42f49 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/hex-parsing.js
@@ -25,12 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+var k = 0x1000000000000081;
+assertEquals(1152921504606847200, k);
+k = 0x1000000000000281;
+assertEquals(1152921504606847700, k);
+k = 0x10000000000002810;
+assertEquals(18446744073709564000, k);
+k = 0x10000000000002810000;
+assertEquals(7.555786372591437e+22, k);
+k = 0xffffffffffffffff;
+assertEquals(18446744073709552000, k);
+k = 0xffffffffffffffffffff;
+assertEquals(1.2089258196146292e+24, k);
diff --git a/test/mjsunit/html-string-funcs.js b/test/mjsunit/html-string-funcs.js
index 213b7f3..b640639 100644
--- a/test/mjsunit/html-string-funcs.js
+++ b/test/mjsunit/html-string-funcs.js
@@ -29,7 +29,7 @@
 // HTML.
 function CheckSimple(f, tag) {
   assertEquals('<' + tag + '>foo</' + tag + '>',
-               "foo"[f]().toLowerCase()); 
+               "foo"[f]().toLowerCase());
 };
 var simple = { big: 'big', blink: 'blink', bold: 'b',
                fixed: 'tt', italics: 'i', small: 'small',
diff --git a/test/mjsunit/in.js b/test/mjsunit/in.js
index f98db42..cca6187 100644
--- a/test/mjsunit/in.js
+++ b/test/mjsunit/in.js
@@ -86,7 +86,7 @@
 assertFalse(0 in a);
 assertTrue(1 in a);
 assertFalse(2 in a);
-assertFalse('0' in a); 
+assertFalse('0' in a);
 assertTrue('1' in a);
 assertFalse('2' in a);
 assertTrue('toString' in a, "toString");
diff --git a/test/mjsunit/indexed-accessors.js b/test/mjsunit/indexed-accessors.js
index 1634857..b69695a 100644
--- a/test/mjsunit/indexed-accessors.js
+++ b/test/mjsunit/indexed-accessors.js
@@ -81,10 +81,11 @@
 expected[0] = 111;
 testArray();
 
-// Using a setter where only a getter is defined throws an exception.
+// Using a setter where only a getter is defined does not throw an exception,
+// unless we are in strict mode.
 var q = {};
 q.__defineGetter__('0', function() { return 42; });
-assertThrows('q[0] = 7');
+assertDoesNotThrow('q[0] = 7');
 
 // Using a getter where only a setter is defined returns undefined.
 var q1 = {};
diff --git a/test/mjsunit/instanceof.js b/test/mjsunit/instanceof.js
index 01ea426..050ef2d 100644
--- a/test/mjsunit/instanceof.js
+++ b/test/mjsunit/instanceof.js
@@ -60,10 +60,10 @@
 
 function TestExceptions() {
   function F() { }
-  var items = [ 1, new Number(42), 
-                true, 
+  var items = [ 1, new Number(42),
+                true,
                 'string', new String('hest'),
-                {}, [], 
+                {}, [],
                 F, new F(),
                 Object, String ];
 
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 812ffeb..bead376 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -67,7 +67,7 @@
           valueOf: "not callable",
           toString: "not callable either",
           toISOString: function() { return 42; }};
-assertThrows("d4.toJSON()", TypeError);  // ToPrimitive throws. 
+assertThrows("d4.toJSON()", TypeError);  // ToPrimitive throws.
 
 var d5 = {toJSON: Date.prototype.toJSON,
           valueOf: "not callable",
@@ -196,9 +196,6 @@
 TestInvalid('"Unterminated string\\"');
 TestInvalid('"Unterminated string\\\\\\"');
 
-// JavaScript RegExp literals not valid in JSON.
-TestInvalid('/true/');
-
 // Test bad JSON that would be good JavaScript (ES5).
 TestInvalid("{true:42}");
 TestInvalid("{false:42}");
@@ -259,6 +256,7 @@
              JSON.stringify([1, 2, [3, [4], 5], 6, 7], null));
 assertEquals("[2,4,[6,[8],10],12,14]",
              JSON.stringify([1, 2, [3, [4], 5], 6, 7], DoubleNumbers));
+assertEquals('["a","ab","abc"]', JSON.stringify(["a","ab","abc"]));
 
 var circular = [1, 2, 3];
 circular[2] = circular;
@@ -382,7 +380,7 @@
 reJSON.toJSON = function() { return "has toJSON"; };
 
 assertEquals(
-    '[37,null,1,"foo","37","true",null,"has toJSON",null,"has toJSON"]',
+    '[37,null,1,"foo","37","true",null,"has toJSON",{},"has toJSON"]',
     JSON.stringify([num37, numFoo, numTrue,
                     strFoo, str37, strTrue,
                     func, funcJSON, re, reJSON]));
@@ -397,6 +395,9 @@
 var counter = { get toJSON() { getCount++;
                                return function() { callCount++;
                                                    return 42; }; } };
+
+// RegExps are not callable, so they are stringified as objects.
+assertEquals('{}', JSON.stringify(/regexp/));
 assertEquals('42', JSON.stringify(counter));
 assertEquals(1, getCount);
 assertEquals(1, callCount);
@@ -419,9 +420,9 @@
 // We don't currently allow plain properties called __proto__ in JSON
 // objects in JSON.parse. Instead we read them as we would JS object
 // literals. If we change that, this test should change with it.
-// 
-// Parse a non-object value as __proto__. This must not create a 
-// __proto__ property different from the original, and should not 
+//
+// Parse a non-object value as __proto__. This must not create a
+// __proto__ property different from the original, and should not
 // change the original.
 var o = JSON.parse('{"__proto__":5}');
 assertEquals(Object.prototype, o.__proto__);  // __proto__ isn't changed.
diff --git a/test/mjsunit/keyed-storage-extend.js b/test/mjsunit/keyed-storage-extend.js
index 04d2f04..d7e157b 100644
--- a/test/mjsunit/keyed-storage-extend.js
+++ b/test/mjsunit/keyed-storage-extend.js
@@ -37,7 +37,7 @@
 }
 
 function GrowKeyed(o) {
-  var names = ['a','b','c','d','e','f']; 
+  var names = ['a','b','c','d','e','f'];
   var i = 0;
   o[names[i++]] = i;
   o[names[i++]] = i;
diff --git a/test/mjsunit/keywords-and-reserved_words.js b/test/mjsunit/keywords-and-reserved_words.js
new file mode 100644
index 0000000..ade4329
--- /dev/null
+++ b/test/mjsunit/keywords-and-reserved_words.js
@@ -0,0 +1,177 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test proper handling of keywords, future reserved words and
+// future reserved words in strict mode as specific by 7.6.1 and 7.6.2
+// in ECMA-262.
+
+// This code is based on:
+// http://trac.webkit.org/export/89109/trunk/LayoutTests/fast/js/script-tests/keywords-and-reserved_words.js
+
+function isKeyword(x)
+{
+  try {
+    eval("var " + x + ";");
+  } catch(e) {
+    return true;
+  }
+
+  return false;
+}
+
+function isStrictKeyword(x)
+{
+  try {
+    eval("'use strict'; var "+x+";");
+  } catch(e) {
+    return true;
+  }
+
+  return false;
+}
+
+function classifyIdentifier(x)
+{
+  if (isKeyword(x)) {
+    // All non-strict keywords are also keywords in strict code.
+    if (!isStrictKeyword(x)) {
+      return "ERROR";
+    }
+    return "keyword";
+  }
+
+  // Check for strict mode future reserved words.
+  if (isStrictKeyword(x)) {
+    return "strict";
+  }
+
+  return "identifier";
+}
+
+function testKeyword(word) {
+  // Classify word
+  assertEquals("keyword", classifyIdentifier(word));
+
+  // Simple use of a keyword
+  assertThrows("var " + word + " = 1;", SyntaxError);
+  if (word != "this") {
+    assertThrows("typeof (" + word + ");", SyntaxError);
+  }
+
+  // object literal properties
+  eval("var x = { " + word + " : 42 };");
+  eval("var x = { get " + word + " () {} };");
+  eval("var x = { set " + word + " (value) {} };");
+
+  // object literal with string literal property names
+  eval("var x = { '" + word + "' : 42 };");
+  eval("var x = { get '" + word + "' () { } };");
+  eval("var x = { set '" + word + "' (value) { } };");
+
+  // Function names and arguments
+  assertThrows("function " + word + " () { }", SyntaxError);
+  assertThrows("function foo (" + word + ") {}", SyntaxError);
+  assertThrows("function foo (a, " + word + ") { }", SyntaxError);
+  assertThrows("function foo (" + word + ", a) { }", SyntaxError);
+  assertThrows("function foo (a, " + word + ", b) { }", SyntaxError);
+  assertThrows("var foo = function (" + word + ") { }", SyntaxError);
+
+  // setter parameter
+  assertThrows("var x = { set foo(" + word + ") { } };", SyntaxError);
+}
+
+// Not keywords - these are all just identifiers.
+var identifiers = [
+  "x",            "keyword",
+  "id",           "strict",
+  "identifier",   "use",
+  // The following are reserved in ES3 but not in ES5.
+  "abstract",     "int",
+  "boolean",      "long",
+  "byte",         "native",
+  "char",         "short",
+  "double",       "synchronized",
+  "final",        "throws",
+  "float",        "transient",
+  "goto",         "volatile" ];
+
+for (var i = 0; i < identifiers.length; i++) {
+  assertEquals ("identifier", classifyIdentifier(identifiers[i]));
+}
+
+// 7.6.1.1 Keywords
+var keywords = [
+  "break",        "in",
+  "case",         "instanceof",
+  "catch",        "new",
+  "continue",     "return",
+  "debugger",     "switch",
+  "default",      "this",
+  "delete",       "throw",
+  "do",           "try",
+  "else",         "typeof",
+  "finally",      "var",
+  "for",          "void",
+  "function",     "while",
+  "if",           "with",
+  // In ES5 "const" is a "future reserved word" but we treat it as a keyword.
+  "const" ];
+
+for (var i = 0; i < keywords.length; i++) {
+  testKeyword(keywords[i]);
+}
+
+// 7.6.1.2 Future Reserved Words (without "const")
+var future_reserved_words = [
+  "class",
+  "enum",
+  "export",
+  "extends",
+  "import",
+  "super" ];
+
+for (var i = 0; i < future_reserved_words.length; i++) {
+  testKeyword(future_reserved_words[i]);
+}
+
+// 7.6.1.2 Future Reserved Words, in strict mode only.
+var future_strict_reserved_words = [
+  "implements",
+  "interface",
+  "let",
+  "package",
+  "private",
+  "protected",
+  "public",
+  "static",
+  "yield" ];
+
+for (var i = 0; i < future_strict_reserved_words.length; i++) {
+  assertEquals ("strict", classifyIdentifier(future_strict_reserved_words[i]));
+}
+
+// More strict mode specific tests can be found in mjsunit/strict-mode.js.
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/logical.js
similarity index 70%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/logical.js
index aa93b25..23c5390 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/logical.js
@@ -25,12 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+var undef = void 0;
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+assertEquals(1, 1 || 0);
+assertEquals(2, 0 || 2);
+assertEquals('foo', 0 || 'foo');
+assertEquals(undef, undef || undef);
+assertEquals('foo', 'foo' || 'bar');
+assertEquals('bar', undef || 'bar');
+assertEquals('bar', undef || 'bar' || 'baz');
+assertEquals('baz', undef || undef || 'baz');
 
-test();
+assertEquals(0, 1 && 0);
+assertEquals(0, 0 && 2);
+assertEquals(0, 0 && 'foo');
+assertEquals(undef, undef && undef);
+assertEquals('bar', 'foo' && 'bar');
+assertEquals(undef, undef && 'bar');
+assertEquals('baz', 'foo' && 'bar' && 'baz');
+assertEquals(undef, 'foo' && undef && 'baz');
+
+assertEquals(0, undef && undef || 0);
+assertEquals('bar', undef && 0 || 'bar');
diff --git a/test/mjsunit/math-abs.js b/test/mjsunit/math-abs.js
index 174622e..2b07954 100644
--- a/test/mjsunit/math-abs.js
+++ b/test/mjsunit/math-abs.js
@@ -35,13 +35,13 @@
 function test() {
   assertEquals(0, Math.abs(0));
   assertEquals(0, Math.abs(zero()));
-  assertEquals(1/0, 1/Math.abs(-0));  // 0 == -0, so we use reciprocals.
+  assertEquals(0, Math.abs(-0));
   assertEquals(Infinity, Math.abs(Infinity));
   assertEquals(Infinity, Math.abs(-Infinity));
-  assertNaN(Math.abs(NaN));
-  assertNaN(Math.abs(-NaN));
-  assertEquals('Infinity', Math.abs(Number('+Infinity').toString()));
-  assertEquals('Infinity', Math.abs(Number('-Infinity').toString()));
+  assertEquals(NaN, Math.abs(NaN));
+  assertEquals(NaN, Math.abs(-NaN));
+  assertEquals('Infinity', Math.abs(Number('+Infinity')).toString());
+  assertEquals('Infinity', Math.abs(Number('-Infinity')).toString());
   assertEquals('NaN', Math.abs(NaN).toString());
   assertEquals('NaN', Math.abs(-NaN).toString());
 
@@ -85,8 +85,8 @@
   assertEquals(two_31 - 1, Math.abs(two_31 - 1));
   assertEquals(two_31 - 1, Math.abs(-two_31 + 1));
 
-  assertNaN(Math.abs("not a number"));
-  assertNaN(Math.abs([1, 2, 3]));
+  assertEquals(NaN, Math.abs("not a number"));
+  assertEquals(NaN, Math.abs([1, 2, 3]));
   assertEquals(42, Math.abs({valueOf: function() { return 42; } }));
   assertEquals(42, Math.abs({valueOf: function() { return -42; } }));
 }
diff --git a/test/mjsunit/math-floor.js b/test/mjsunit/math-floor.js
index 0d1c0ac..f211ce2 100644
--- a/test/mjsunit/math-floor.js
+++ b/test/mjsunit/math-floor.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --max-new-space-size=256
+// Flags: --max-new-space-size=256 --allow-natives-syntax
+
+var test_id = 0;
+
+function testFloor(expect, input) {
+  var test = new Function('n',
+                          '"' + (test_id++) + '";return Math.floor(n)');
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  %OptimizeFunctionOnNextCall(test);
+  assertEquals(expect, test(input));
+}
 
 function zero() {
   var x = 0.5;
@@ -33,82 +45,95 @@
 }
 
 function test() {
-  assertEquals(0, Math.floor(0));
-  assertEquals(0, Math.floor(zero()));
-  assertEquals(1/-0, 1/Math.floor(-0));  // 0 == -0, so we use reciprocals.
-  assertEquals(Infinity, Math.floor(Infinity));
-  assertEquals(-Infinity, Math.floor(-Infinity));
-  assertNaN(Math.floor(NaN));
+  testFloor(0, 0);
+  testFloor(0, zero());
+  testFloor(-0, -0);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
+  testFloor(NaN, NaN);
 
-  assertEquals(0, Math.floor(0.1));
-  assertEquals(0, Math.floor(0.5));
-  assertEquals(0, Math.floor(0.7));
-  assertEquals(-1, Math.floor(-0.1));
-  assertEquals(-1, Math.floor(-0.5));
-  assertEquals(-1, Math.floor(-0.7));
-  assertEquals(1, Math.floor(1));
-  assertEquals(1, Math.floor(1.1));
-  assertEquals(1, Math.floor(1.5));
-  assertEquals(1, Math.floor(1.7));
-  assertEquals(-1, Math.floor(-1));
-  assertEquals(-2, Math.floor(-1.1));
-  assertEquals(-2, Math.floor(-1.5));
-  assertEquals(-2, Math.floor(-1.7));
+  // Ensure that a negative zero coming from Math.floor is properly handled
+  // by other operations.
+  function ifloor(x) {
+    return 1 / Math.floor(x);
+  }
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  %OptimizeFunctionOnNextCall(ifloor);
+  assertEquals(-Infinity, ifloor(-0));
 
-  assertEquals(0, Math.floor(Number.MIN_VALUE));
-  assertEquals(-1, Math.floor(-Number.MIN_VALUE));
-  assertEquals(Number.MAX_VALUE, Math.floor(Number.MAX_VALUE));
-  assertEquals(-Number.MAX_VALUE, Math.floor(-Number.MAX_VALUE));
-  assertEquals(Infinity, Math.floor(Infinity));
-  assertEquals(-Infinity, Math.floor(-Infinity));
+  testFloor(0, 0.1);
+  testFloor(0, 0.49999999999999994);
+  testFloor(0, 0.5);
+  testFloor(0, 0.7);
+  testFloor(-1, -0.1);
+  testFloor(-1, -0.49999999999999994);
+  testFloor(-1, -0.5);
+  testFloor(-1, -0.7);
+  testFloor(1, 1);
+  testFloor(1, 1.1);
+  testFloor(1, 1.5);
+  testFloor(1, 1.7);
+  testFloor(-1, -1);
+  testFloor(-2, -1.1);
+  testFloor(-2, -1.5);
+  testFloor(-2, -1.7);
+
+  testFloor(0, Number.MIN_VALUE);
+  testFloor(-1, -Number.MIN_VALUE);
+  testFloor(Number.MAX_VALUE, Number.MAX_VALUE);
+  testFloor(-Number.MAX_VALUE, -Number.MAX_VALUE);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
 
   // 2^30 is a smi boundary.
   var two_30 = 1 << 30;
 
-  assertEquals(two_30, Math.floor(two_30));
-  assertEquals(two_30, Math.floor(two_30 + 0.1));
-  assertEquals(two_30, Math.floor(two_30 + 0.5));
-  assertEquals(two_30, Math.floor(two_30 + 0.7));
+  testFloor(two_30, two_30);
+  testFloor(two_30, two_30 + 0.1);
+  testFloor(two_30, two_30 + 0.5);
+  testFloor(two_30, two_30 + 0.7);
 
-  assertEquals(two_30 - 1, Math.floor(two_30 - 1));
-  assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.1));
-  assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.5));
-  assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.7));
+  testFloor(two_30 - 1, two_30 - 1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.5);
+  testFloor(two_30 - 1, two_30 - 1 + 0.7);
 
-  assertEquals(-two_30, Math.floor(-two_30));
-  assertEquals(-two_30, Math.floor(-two_30 + 0.1));
-  assertEquals(-two_30, Math.floor(-two_30 + 0.5));
-  assertEquals(-two_30, Math.floor(-two_30 + 0.7));
+  testFloor(-two_30, -two_30);
+  testFloor(-two_30, -two_30 + 0.1);
+  testFloor(-two_30, -two_30 + 0.5);
+  testFloor(-two_30, -two_30 + 0.7);
 
-  assertEquals(-two_30 + 1, Math.floor(-two_30 + 1));
-  assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.1));
-  assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.5));
-  assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.7));
+  testFloor(-two_30 + 1, -two_30 + 1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.5);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.7);
 
   // 2^52 is a precision boundary.
   var two_52 = (1 << 30) * (1 << 22);
 
-  assertEquals(two_52, Math.floor(two_52));
-  assertEquals(two_52, Math.floor(two_52 + 0.1));
+  testFloor(two_52, two_52);
+  testFloor(two_52, two_52 + 0.1);
   assertEquals(two_52, two_52 + 0.5);
-  assertEquals(two_52, Math.floor(two_52 + 0.5));
+  testFloor(two_52, two_52 + 0.5);
   assertEquals(two_52 + 1, two_52 + 0.7);
-  assertEquals(two_52 + 1, Math.floor(two_52 + 0.7));
+  testFloor(two_52 + 1, two_52 + 0.7);
 
-  assertEquals(two_52 - 1, Math.floor(two_52 - 1));
-  assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.1));
-  assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.5));
-  assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.7));
+  testFloor(two_52 - 1, two_52 - 1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.5);
+  testFloor(two_52 - 1, two_52 - 1 + 0.7);
 
-  assertEquals(-two_52, Math.floor(-two_52));
-  assertEquals(-two_52, Math.floor(-two_52 + 0.1));
-  assertEquals(-two_52, Math.floor(-two_52 + 0.5));
-  assertEquals(-two_52, Math.floor(-two_52 + 0.7));
+  testFloor(-two_52, -two_52);
+  testFloor(-two_52, -two_52 + 0.1);
+  testFloor(-two_52, -two_52 + 0.5);
+  testFloor(-two_52, -two_52 + 0.7);
 
-  assertEquals(-two_52 + 1, Math.floor(-two_52 + 1));
-  assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.1));
-  assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.5));
-  assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.7));
+  testFloor(-two_52 + 1, -two_52 + 1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.5);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.7);
 }
 
 
@@ -116,3 +141,19 @@
 for (var i = 0; i < 500; i++) {
   test();
 }
+
+
+// Regression test for a bug where a negative zero coming from Math.floor
+// was not properly handled by other operations.
+function floorsum(i, n) {
+  var ret = Math.floor(n);
+  while (--i > 0) {
+    ret += Math.floor(n);
+  }
+  return ret;
+}
+assertEquals(-0, floorsum(1, -0));
+%OptimizeFunctionOnNextCall(floorsum);
+// The optimized function will deopt.  Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, floorsum(100000, -0));
diff --git a/test/mjsunit/math-min-max.js b/test/mjsunit/math-min-max.js
index 13d54a3..0833c5c 100644
--- a/test/mjsunit/math-min-max.js
+++ b/test/mjsunit/math-min-max.js
@@ -76,9 +76,9 @@
 assertEquals(-1, Math.min(-1, +0, -0));
 assertEquals(-1, Math.min(+0, -1, -0));
 assertEquals(-1, Math.min(-0, -1, +0));
-assertNaN(Math.min('oxen'));
-assertNaN(Math.min('oxen', 1));
-assertNaN(Math.min(1, 'oxen'));
+assertEquals(NaN, Math.min('oxen'));
+assertEquals(NaN, Math.min('oxen', 1));
+assertEquals(NaN, Math.min(1, 'oxen'));
 
 
 // Test Math.max().
@@ -109,9 +109,9 @@
 assertEquals(1, Math.max(+1, +0, -0));
 assertEquals(1, Math.max(+0, +1, -0));
 assertEquals(1, Math.max(-0, +1, +0));
-assertNaN(Math.max('oxen'));
-assertNaN(Math.max('oxen', 1));
-assertNaN(Math.max(1, 'oxen'));
+assertEquals(NaN, Math.max('oxen'));
+assertEquals(NaN, Math.max('oxen', 1));
+assertEquals(NaN, Math.max(1, 'oxen'));
 
 assertEquals(Infinity, 1/Math.max(ZERO, -0));
 assertEquals(Infinity, 1/Math.max(-0, ZERO));
diff --git a/test/mjsunit/math-round.js b/test/mjsunit/math-round.js
index 3b06088..102c970 100644
--- a/test/mjsunit/math-round.js
+++ b/test/mjsunit/math-round.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,77 +25,150 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-assertEquals(0, Math.round(0));
-assertEquals(-0, Math.round(-0));
-assertEquals(Infinity, Math.round(Infinity));
-assertEquals(-Infinity, Math.round(-Infinity));
-assertNaN(Math.round(NaN));
+// Flags: --allow-natives-syntax
 
-assertEquals(1, Math.round(0.5));
-assertEquals(1, Math.round(0.7));
-assertEquals(1, Math.round(1));
-assertEquals(1, Math.round(1.1));
-assertEquals(1, Math.round(1.49999));
-assertEquals(1/-0, 1/Math.round(-0.5));  // Test for -0 result.
-assertEquals(-1, Math.round(-0.5000000000000001));
-assertEquals(-1, Math.round(-0.7));
-assertEquals(-1, Math.round(-1));
-assertEquals(-1, Math.round(-1.1));
-assertEquals(-1, Math.round(-1.49999));
-assertEquals(-1, Math.round(-1.5));
+var test_id = 0;
+function testRound(expect, input) {
+  // Make source code different on each invocation to make
+  // sure it gets optimized each time.
+  var doRound = new Function('input',
+                             '"' + (test_id++) + '";return Math.round(input)');
+  assertEquals(expect, doRound(input));
+  assertEquals(expect, doRound(input));
+  assertEquals(expect, doRound(input));
+  %OptimizeFunctionOnNextCall(doRound);
+  assertEquals(expect, doRound(input));
+}
 
-assertEquals(9007199254740990, Math.round(9007199254740990));
-assertEquals(9007199254740991, Math.round(9007199254740991));
-assertEquals(-9007199254740990, Math.round(-9007199254740990));
-assertEquals(-9007199254740991, Math.round(-9007199254740991));
-assertEquals(Number.MAX_VALUE, Math.round(Number.MAX_VALUE));
-assertEquals(-Number.MAX_VALUE, Math.round(-Number.MAX_VALUE));
+testRound(0, 0);
+testRound(-0, -0);
+testRound(Infinity, Infinity);
+testRound(-Infinity, -Infinity);
+testRound(NaN, NaN);
 
-assertEquals(536870911, Math.round(536870910.5));
-assertEquals(536870911, Math.round(536870911));
-assertEquals(536870911, Math.round(536870911.4));
-assertEquals(536870912, Math.round(536870911.5));
-assertEquals(536870912, Math.round(536870912));
-assertEquals(536870912, Math.round(536870912.4));
-assertEquals(536870913, Math.round(536870912.5));
-assertEquals(536870913, Math.round(536870913));
-assertEquals(536870913, Math.round(536870913.4));
-assertEquals(1073741823, Math.round(1073741822.5));
-assertEquals(1073741823, Math.round(1073741823));
-assertEquals(1073741823, Math.round(1073741823.4));
-assertEquals(1073741824, Math.round(1073741823.5));
-assertEquals(1073741824, Math.round(1073741824));
-assertEquals(1073741824, Math.round(1073741824.4));
-assertEquals(1073741825, Math.round(1073741824.5));
-assertEquals(2147483647, Math.round(2147483646.5));
-assertEquals(2147483647, Math.round(2147483647));
-assertEquals(2147483647, Math.round(2147483647.4));
-assertEquals(2147483648, Math.round(2147483647.5));
-assertEquals(2147483648, Math.round(2147483648));
-assertEquals(2147483648, Math.round(2147483648.4));
-assertEquals(2147483649, Math.round(2147483648.5));
+// Regression test for a bug where a negative zero coming from Math.round
+// was not properly handled by other operations.
+function roundsum(i, n) {
+  var ret = Math.round(n);
+  while (--i > 0) {
+    ret += Math.round(n);
+  }
+  return ret;
+}
+assertEquals(-0, roundsum(1, -0));
+%OptimizeFunctionOnNextCall(roundsum);
+// The optimized function will deopt.  Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, roundsum(100000, -0));
+
+testRound(1, 0.5);
+testRound(1, 0.7);
+testRound(1, 1);
+testRound(1, 1.1);
+testRound(1, 1.49999);
+testRound(-0, -0.5);
+testRound(-1, -0.5000000000000001);
+testRound(-1, -0.7);
+testRound(-1, -1);
+testRound(-1, -1.1);
+testRound(-1, -1.49999);
+testRound(-1, -1.5);
+
+testRound(9007199254740990, 9007199254740990);
+testRound(9007199254740991, 9007199254740991);
+testRound(-9007199254740990, -9007199254740990);
+testRound(-9007199254740991, -9007199254740991);
+testRound(Number.MAX_VALUE, Number.MAX_VALUE);
+testRound(-Number.MAX_VALUE, -Number.MAX_VALUE);
+
+testRound(536870911, 536870910.5);
+testRound(536870911, 536870911);
+testRound(536870911, 536870911.4);
+testRound(536870912, 536870911.5);
+testRound(536870912, 536870912);
+testRound(536870912, 536870912.4);
+testRound(536870913, 536870912.5);
+testRound(536870913, 536870913);
+testRound(536870913, 536870913.4);
+testRound(1073741823, 1073741822.5);
+testRound(1073741823, 1073741823);
+testRound(1073741823, 1073741823.4);
+testRound(1073741824, 1073741823.5);
+testRound(1073741824, 1073741824);
+testRound(1073741824, 1073741824.4);
+testRound(1073741825, 1073741824.5);
+testRound(2147483647, 2147483646.5);
+testRound(2147483647, 2147483647);
+testRound(2147483647, 2147483647.4);
+testRound(2147483648, 2147483647.5);
+testRound(2147483648, 2147483648);
+testRound(2147483648, 2147483648.4);
+testRound(2147483649, 2147483648.5);
 
 // Tests based on WebKit LayoutTests
 
-assertEquals(0, Math.round(0.4));
-assertEquals(-0, Math.round(-0.4));
-assertEquals(-0, Math.round(-0.5));
-assertEquals(1, Math.round(0.6));
-assertEquals(-1, Math.round(-0.6));
-assertEquals(2, Math.round(1.5));
-assertEquals(2, Math.round(1.6));
-assertEquals(-2, Math.round(-1.6));
-assertEquals(8640000000000000, Math.round(8640000000000000));
-assertEquals(8640000000000001, Math.round(8640000000000001));
-assertEquals(8640000000000002, Math.round(8640000000000002));
-assertEquals(9007199254740990, Math.round(9007199254740990));
-assertEquals(9007199254740991, Math.round(9007199254740991));
-assertEquals(1.7976931348623157e+308, Math.round(1.7976931348623157e+308));
-assertEquals(-8640000000000000, Math.round(-8640000000000000));
-assertEquals(-8640000000000001, Math.round(-8640000000000001));
-assertEquals(-8640000000000002, Math.round(-8640000000000002));
-assertEquals(-9007199254740990, Math.round(-9007199254740990));
-assertEquals(-9007199254740991, Math.round(-9007199254740991));
-assertEquals(-1.7976931348623157e+308, Math.round(-1.7976931348623157e+308));
-assertEquals(Infinity, Math.round(Infinity));
-assertEquals(-Infinity, Math.round(-Infinity));
+testRound(0, 0.4);
+testRound(-0, -0.4);
+testRound(-0, -0.5);
+testRound(1, 0.6);
+testRound(-1, -0.6);
+testRound(2, 1.5);
+testRound(2, 1.6);
+testRound(-2, -1.6);
+testRound(8640000000000000, 8640000000000000);
+testRound(8640000000000001, 8640000000000001);
+testRound(8640000000000002, 8640000000000002);
+testRound(9007199254740990, 9007199254740990);
+testRound(9007199254740991, 9007199254740991);
+testRound(1.7976931348623157e+308, 1.7976931348623157e+308);
+testRound(-8640000000000000, -8640000000000000);
+testRound(-8640000000000001, -8640000000000001);
+testRound(-8640000000000002, -8640000000000002);
+testRound(-9007199254740990, -9007199254740990);
+testRound(-9007199254740991, -9007199254740991);
+testRound(-1.7976931348623157e+308, -1.7976931348623157e+308);
+testRound(Infinity, Infinity);
+testRound(-Infinity, -Infinity);
+
+  // Some special double number cases.
+var ulp = Math.pow(2, -1022 - 52);
+var max_denormal = (Math.pow(2, 52) - 1) * ulp;
+var min_normal = Math.pow(2, -1022);
+var max_fraction = Math.pow(2, 52) - 0.5;
+var min_nonfraction = Math.pow(2, 52);
+var max_non_infinite = Number.MAX_VALUE;
+
+var max_smi31 = Math.pow(2,30) - 1;
+var min_smi31 = -Math.pow(2,30);
+var max_smi32 = Math.pow(2,31) - 1;
+var min_smi32 = -Math.pow(2,31);
+
+testRound(0, ulp);
+testRound(0, max_denormal);
+testRound(0, min_normal);
+testRound(0, 0.49999999999999994);
+testRound(1, 0.5);
+testRound(Math.pow(2,52), max_fraction);
+testRound(min_nonfraction, min_nonfraction);
+testRound(max_non_infinite, max_non_infinite);
+
+testRound(max_smi31, max_smi31 - 0.5);
+testRound(max_smi31 + 1, max_smi31 + 0.5);
+testRound(max_smi32, max_smi32 - 0.5);
+testRound(max_smi32 + 1, max_smi32 + 0.5);
+
+testRound(-0, -ulp);
+testRound(-0, -max_denormal);
+testRound(-0, -min_normal);
+testRound(-0, -0.49999999999999994);
+testRound(-0, -0.5);
+testRound(-Math.pow(2,52)+1, -max_fraction);
+testRound(-min_nonfraction, -min_nonfraction);
+testRound(-max_non_infinite, -max_non_infinite);
+
+testRound(min_smi31, min_smi31 - 0.5);
+testRound(min_smi31 + 1, min_smi31 + 0.5);
+testRound(min_smi32, min_smi32 - 0.5);
+testRound(min_smi32 + 1, min_smi32 + 0.5);
+
+
diff --git a/test/mjsunit/mirror-array.js b/test/mjsunit/mirror-array.js
index eb8f72a..92e3913 100644
--- a/test/mjsunit/mirror-array.js
+++ b/test/mjsunit/mirror-array.js
@@ -64,7 +64,7 @@
   assertTrue(mirror.protoObject() instanceof debug.Mirror, 'Unexpected mirror hierachy');
   assertTrue(mirror.prototypeObject() instanceof debug.Mirror, 'Unexpected mirror hierachy');
   assertEquals(mirror.length(), a.length, "Length mismatch");
-  
+
   var indexedProperties = mirror.indexedPropertiesFromRange();
   assertEquals(indexedProperties.length, a.length);
   for (var i = 0; i < indexedProperties.length; i++) {
@@ -110,7 +110,7 @@
       var found = false;
       for (var j = 0; j < fromJSON.properties.length; j++) {
         if (names[i] == fromJSON.properties[j].name) {
-          found = true; 
+          found = true;
         }
       }
       assertTrue(found, names[i])
diff --git a/test/mjsunit/mirror-function.js b/test/mjsunit/mirror-function.js
index 58aee3d..cda815d 100644
--- a/test/mjsunit/mirror-function.js
+++ b/test/mjsunit/mirror-function.js
@@ -65,7 +65,7 @@
   assertTrue(mirror.constructorFunction() instanceof debug.ObjectMirror);
   assertTrue(mirror.protoObject() instanceof debug.Mirror);
   assertTrue(mirror.prototypeObject() instanceof debug.Mirror);
-  
+
   // Test text representation
   assertEquals(f.toString(), mirror.toText());
 
diff --git a/test/mjsunit/mirror-number.js b/test/mjsunit/mirror-number.js
index 2db5df4..fc71c12 100644
--- a/test/mjsunit/mirror-number.js
+++ b/test/mjsunit/mirror-number.js
@@ -50,10 +50,10 @@
   // Parse JSON representation and check.
   var fromJSON = eval('(' + json + ')');
   assertEquals('number', fromJSON.type);
-  if (!isNaN(n)) {
+  if (isFinite(n)) {
     assertEquals(n, fromJSON.value);
   } else {
-    // NaN values are encoded as strings.
+    // NaN and Infinity values are encoded as strings.
     assertTrue(typeof fromJSON.value == 'string');
     if (n === Infinity) {
       assertEquals('Infinity', fromJSON.value);
diff --git a/test/mjsunit/mirror-object.js b/test/mjsunit/mirror-object.js
index 1888554..d4d228c 100644
--- a/test/mjsunit/mirror-object.js
+++ b/test/mjsunit/mirror-object.js
@@ -38,7 +38,7 @@
 
 MirrorRefCache.prototype.lookup = function(handle) {
   return this.refs_[handle];
-}
+};
 
 function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
   // Create mirror and JSON representation.
@@ -66,7 +66,7 @@
   assertFalse(mirror.hasIndexedInterceptor(), 'No indexed interceptor expected');
 
   var names = mirror.propertyNames();
-  var properties = mirror.properties()
+  var properties = mirror.properties();
   assertEquals(names.length, properties.length);
   for (var i = 0; i < properties.length; i++) {
     assertTrue(properties[i] instanceof debug.Mirror, 'Unexpected mirror hierachy');
@@ -130,15 +130,20 @@
           assertTrue(typeof(fromJSON.properties[i].attributes) === 'undefined', 'Unexpected serialized attributes');
         }
 
-        // Lookup the serialized object from the handle reference.        
+        // Lookup the serialized object from the handle reference.
         var o = refs.lookup(fromJSON.properties[i].ref);
         assertTrue(o != void 0, 'Referenced object is not serialized');
 
         assertEquals(properties[i].value().type(), o.type, 'Unexpected serialized property type for ' + name);
         if (properties[i].value().isPrimitive()) {
-          // Special check for NaN as NaN == NaN is false.
-          if (properties[i].value().isNumber() && isNaN(properties[i].value().value())) {
-            assertEquals('NaN', o.value, 'Unexpected serialized property value for ' + name);
+          if (properties[i].value().type() == "null" ||
+              properties[i].value().type() == "undefined") {
+            // Null and undefined has no value property.
+            assertFalse("value" in o, 'Unexpected value property for ' + name);
+          } else if (properties[i].value().type() == "number" &&
+                     !isFinite(properties[i].value().value())) {
+            assertEquals(String(properties[i].value().value()), o.value,
+                         'Unexpected serialized property value for ' + name);
           } else {
             assertEquals(properties[i].value().value(), o.value, 'Unexpected serialized property value for ' + name);
           }
diff --git a/test/mjsunit/mirror-script.js b/test/mjsunit/mirror-script.js
index 7156170..1d64ac2 100644
--- a/test/mjsunit/mirror-script.js
+++ b/test/mjsunit/mirror-script.js
@@ -62,7 +62,7 @@
   if (eval_from_line) {
     assertEquals(eval_from_line,  mirror.evalFromLocation().line);
   }
-  
+
   // Parse JSON representation and check.
   var fromJSON = JSON.parse(json);
   assertEquals('script', fromJSON.type);
diff --git a/test/mjsunit/mirror-unresolved-function.js b/test/mjsunit/mirror-unresolved-function.js
index c1fe4a3..46f22a0 100644
--- a/test/mjsunit/mirror-unresolved-function.js
+++ b/test/mjsunit/mirror-unresolved-function.js
@@ -64,7 +64,7 @@
 assertEquals('undefined', mirror.constructorFunction().type());
 assertEquals('undefined', mirror.protoObject().type());
 assertEquals('undefined', mirror.prototypeObject().type());
-  
+
 // Parse JSON representation of unresolved functions and check.
 var fromJSON = eval('(' + json + ')');
 assertEquals('function', fromJSON.type, 'Unexpected mirror type in JSON');
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index 436bdc8..faa5a43 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -31,234 +31,325 @@
   this.stack = new Error("").stack;
 }
 
-MjsUnitAssertionError.prototype.toString = function () {
-  return this.message;
-}
-
 /*
  * This file is included in all mini jsunit test cases.  The test
  * framework expects lines that signal failed tests to start with
  * the f-word and ignore all other lines.
  */
 
-function MjsUnitToString(value) {
-  switch (typeof value) {
-    case "string":
-      return JSON.stringify(value);
-    case "number":
-      if (value === 0 && (1 / value) < 0) return "-0";
-    case "boolean":
-    case "null":
-    case "undefined":
-    case "function":
-      return String(value);
-    case "object":
-      if (value === null) return "null";
-      var clazz = Object.prototype.toString.call(value);
-      clazz = clazz.substring(8, clazz.length - 1);
-      switch (clazz) {
+
+MjsUnitAssertionError.prototype.toString = function () {
+  return this.message;
+};
+
+
+// Expected and found values the same objects, or the same primitive
+// values.
+// For known primitive values, please use assertEquals.
+var assertSame;
+
+// Expected and found values are identical primitive values or functions
+// or similarly structured objects (checking internal properties
+// of, e.g., Number and Date objects, the elements of arrays
+// and the properties of non-Array objects).
+var assertEquals;
+
+// The found object is an Array with the same length and elements
+// as the expected object. The expected object doesn't need to be an Array,
+// as long as it's "array-ish".
+var assertArrayEquals;
+
+// The found object must have the same enumerable properties as the
+// expected object. The type of object isn't checked.
+var assertPropertiesEqual;
+
+// Assert that the string conversion of the found value is equal to
+// the expected string. Only kept for backwards compatability, please
+// check the real structure of the found value.
+var assertToStringEquals;
+
+// Checks that the found value is true. Use with boolean expressions
+// for tests that doesn't have their own assertXXX function.
+var assertTrue;
+
+// Checks that the found value is false.
+var assertFalse;
+
+// Checks that the found value is null. Kept for historical compatability,
+// please just use assertEquals(null, expected).
+var assertNull;
+
+// Checks that the found value is *not* null.
+var assertNotNull;
+
+// Assert that the passed function or eval code throws an exception.
+// The optional second argument is an exception constructor that the
+// thrown exception is checked against with "instanceof".
+// The optional third argument is a message type string that is compared
+// to the type property on the thrown exception.
+var assertThrows;
+
+// Assert that the passed function or eval code does not throw an exception.
+var assertDoesNotThrow;
+
+// Asserts that the found value is an instance of the constructor passed
+// as the second argument.
+var assertInstanceof;
+
+// Assert that this code is never executed (i.e., always fails if executed).
+var assertUnreachable;
+
+(function () {  // Scope for utility functions.
+
+  function classOf(object) {
+    // Argument must not be null or undefined.
+    var string = Object.prototype.toString.call(object);
+    // String has format [object <ClassName>].
+    return string.substring(8, string.length - 1);
+  }
+
+
+  function PrettyPrint(value) {
+    switch (typeof value) {
+      case "string":
+        return JSON.stringify(value);
+      case "number":
+        if (value === 0 && (1 / value) < 0) return "-0";
+        // FALLTHROUGH.
+      case "boolean":
+      case "undefined":
+      case "function":
+        return String(value);
+      case "object":
+        if (value === null) return "null";
+        var objectClass = classOf(value);
+        switch (objectClass) {
         case "Number":
         case "String":
         case "Boolean":
         case "Date":
-          return clazz + "(" + MjsUnitToString(value.valueOf()) + ")";
+          return objectClass + "(" + PrettyPrint(value.valueOf()) + ")";
         case "RegExp":
           return value.toString();
         case "Array":
-          return "[" + value.map(MjsUnitArrayElementToString).join(",") + "]";
+          return "[" + value.map(PrettyPrintArrayElement).join(",") + "]";
         case "Object":
           break;
         default:
-          return clazz + "()";
-      }
-      // [[Class]] is "Object".
-      var constructor = value.constructor.name;
-      if (name) return name + "()";
-      return "Object()";
-    default:
-      return "-- unknown value --";
-  }
-}
-
-
-function MjsUnitArrayElementToString(value, index, array) {
-  if (value === undefined && !(index in array)) return "";
-  return MjsUnitToString(value);
-}
-
-
-function fail(expected, found, name_opt) {
-  var message = "Fail" + "ure";
-  if (name_opt) {
-    // Fix this when we ditch the old test runner.
-    message += " (" + name_opt + ")";
+          return objectClass + "()";
+        }
+        // [[Class]] is "Object".
+        var name = value.constructor.name;
+        if (name) return name + "()";
+        return "Object()";
+      default:
+        return "-- unknown value --";
+    }
   }
 
-  message += ": expected <" + MjsUnitToString(expected) +
-      "> found <" + MjsUnitToString(found) + ">";
-  throw new MjsUnitAssertionError(message);
-}
+
+  function PrettyPrintArrayElement(value, index, array) {
+    if (value === undefined && !(index in array)) return "";
+    return PrettyPrint(value);
+  }
 
 
-function deepObjectEquals(a, b) {
-  var aProps = [];
-  for (var key in a)
-    aProps.push(key);
-  var bProps = [];
-  for (var key in b)
-    bProps.push(key);
-  aProps.sort();
-  bProps.sort();
-  if (!deepEquals(aProps, bProps))
-    return false;
-  for (var i = 0; i < aProps.length; i++) {
-    if (!deepEquals(a[aProps[i]], b[aProps[i]]))
+  function fail(expectedText, found, name_opt) {
+    var message = "Fail" + "ure";
+    if (name_opt) {
+      // Fix this when we ditch the old test runner.
+      message += " (" + name_opt + ")";
+    }
+
+    message += ": expected <" + expectedText +
+        "> found <" + PrettyPrint(found) + ">";
+    throw new MjsUnitAssertionError(message);
+  }
+
+
+  function deepObjectEquals(a, b) {
+    var aProps = Object.keys(a);
+    aProps.sort();
+    var bProps = Object.keys(b);
+    bProps.sort();
+    if (!deepEquals(aProps, bProps)) {
       return false;
-  }
-  return true;
-}
-
-
-function deepEquals(a, b) {
-  if (a == b) {
-    // Check for -0.
-    if (a === 0 && b === 0) return (1 / a) === (1 / b);
-    return true;
-  }
-  if (typeof a == "number" && typeof b == "number" && isNaN(a) && isNaN(b)) {
-    return true;
-  }
-  if (a == null || b == null) return false;
-  if (a.constructor === RegExp || b.constructor === RegExp) {
-    return (a.constructor === b.constructor) && (a.toString() === b.toString());
-  }
-  if ((typeof a) !== 'object' || (typeof b) !== 'object' ||
-      (a === null) || (b === null))
-    return false;
-  if (a.constructor === Array) {
-    if (b.constructor !== Array)
-      return false;
-    if (a.length != b.length)
-      return false;
-    for (var i = 0; i < a.length; i++) {
-      if (i in a) {
-        if (!(i in b) || !(deepEquals(a[i], b[i])))
-          return false;
-      } else if (i in b) {
+    }
+    for (var i = 0; i < aProps.length; i++) {
+      if (!deepEquals(a[aProps[i]], b[aProps[i]])) {
         return false;
       }
     }
     return true;
-  } else {
+  }
+
+
+  function deepEquals(a, b) {
+    if (a === b) {
+      // Check for -0.
+      if (a === 0) return (1 / a) === (1 / b);
+      return true;
+    }
+    if (typeof a != typeof b) return false;
+    if (typeof a == "number") return isNaN(a) && isNaN(b);
+    if (typeof a !== "object" && typeof a !== "function") return false;
+    // Neither a nor b is primitive.
+    var objectClass = classOf(a);
+    if (objectClass !== classOf(b)) return false;
+    if (objectClass === "RegExp") {
+      // For RegExp, just compare pattern and flags using its toString.
+      return (a.toString() === b.toString());
+    }
+    // Functions are only identical to themselves.
+    if (objectClass === "Function") return false;
+    if (objectClass === "Array") {
+      var elementCount = 0;
+      if (a.length != b.length) {
+        return false;
+      }
+      for (var i = 0; i < a.length; i++) {
+        if (!deepEquals(a[i], b[i])) return false;
+      }
+      return true;
+    }
+    if (objectClass == "String" || objectClass == "Number" ||
+      objectClass == "Boolean" || objectClass == "Date") {
+      if (a.valueOf() !== b.valueOf()) return false;
+    }
     return deepObjectEquals(a, b);
   }
-}
 
 
-function assertSame(expected, found, name_opt) {
-  if (found !== expected) {
-    fail(expected, found, name_opt);
-  }
-}
-
-
-function assertEquals(expected, found, name_opt) {
-  if (!deepEquals(found, expected)) {
-    fail(expected, found, name_opt);
-  }
-}
-
-
-function assertArrayEquals(expected, found, name_opt) {
-  var start = "";
-  if (name_opt) {
-    start = name_opt + " - ";
-  }
-  assertEquals(expected.length, found.length, start + "array length");
-  if (expected.length == found.length) {
-    for (var i = 0; i < expected.length; ++i) {
-      assertEquals(expected[i], found[i], start + "array element at index " + i);
+  assertSame = function assertSame(expected, found, name_opt) {
+    if (found === expected) {
+      if (expected !== 0 || (1 / expected) == (1 / found)) return;
+    } else if (isNaN(expected) && isNaN(found)) {
+      return;
     }
-  }
-}
+    fail(PrettyPrint(expected), found, name_opt);
+  };
 
 
-function assertTrue(value, name_opt) {
-  assertEquals(true, value, name_opt);
-}
-
-
-function assertFalse(value, name_opt) {
-  assertEquals(false, value, name_opt);
-}
-
-
-function assertNaN(value, name_opt) {
-  if (!isNaN(value)) {
-    fail("NaN", value, name_opt);
-  }
-}
-
-
-function assertNull(value, name_opt) {
-  if (value !== null) {
-    fail("null", value, name_opt);
-  }
-}
-
-
-function assertNotNull(value, name_opt) {
-  if (value === null) {
-    fail("not null", value, name_opt);
-  }
-}
-
-
-function assertThrows(code, type_opt, cause_opt) {
-  var threwException = true;
-  try {
-    if (typeof code == 'function') {
-      code();
-    } else {
-      eval(code);
+  assertEquals = function assertEquals(expected, found, name_opt) {
+    if (!deepEquals(found, expected)) {
+      fail(PrettyPrint(expected), found, name_opt);
     }
-    threwException = false;
-  } catch (e) {
-    if (typeof type_opt == 'function')
-      assertInstanceof(e, type_opt);
-    if (arguments.length >= 3)
-      assertEquals(e.type, cause_opt);
-    // Do nothing.
-  }
-  if (!threwException) assertTrue(false, "did not throw exception");
-}
+  };
 
 
-function assertInstanceof(obj, type) {
-  if (!(obj instanceof type)) {
-    assertTrue(false, "Object <" + obj + "> is not an instance of <" + type + ">");
-  }
-}
-
-
-function assertDoesNotThrow(code) {
-  try {
-    if (typeof code == 'function') {
-      code();
-    } else {
-      eval(code);
+  assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
+    var start = "";
+    if (name_opt) {
+      start = name_opt + " - ";
     }
-  } catch (e) {
-    assertTrue(false, "threw an exception: " + (e.message || e));
-  }
-}
+    assertEquals(expected.length, found.length, start + "array length");
+    if (expected.length == found.length) {
+      for (var i = 0; i < expected.length; ++i) {
+        assertEquals(expected[i], found[i],
+                     start + "array element at index " + i);
+      }
+    }
+  };
 
 
-function assertUnreachable(name_opt) {
-  // Fix this when we ditch the old test runner.
-  var message = "Fail" + "ure: unreachable";
-  if (name_opt) {
-    message += " - " + name_opt;
-  }
-  throw new MjsUnitAssertionError(message);
-}
+  assertPropertiesEqual = function assertPropertiesEqual(expected, found,
+                                                         name_opt) {
+    // Check properties only.
+    if (!deepObjectEquals(expected, found)) {
+      fail(expected, found, name_opt);
+    }
+  };
+
+
+  assertToStringEquals = function assertToStringEquals(expected, found,
+                                                       name_opt) {
+    if (expected != String(found)) {
+      fail(expected, found, name_opt);
+    }
+  };
+
+
+  assertTrue = function assertTrue(value, name_opt) {
+    assertEquals(true, value, name_opt);
+  };
+
+
+  assertFalse = function assertFalse(value, name_opt) {
+    assertEquals(false, value, name_opt);
+  };
+
+
+  assertNull = function assertNull(value, name_opt) {
+    if (value !== null) {
+      fail("null", value, name_opt);
+    }
+  };
+
+
+  assertNotNull = function assertNotNull(value, name_opt) {
+    if (value === null) {
+      fail("not null", value, name_opt);
+    }
+  };
+
+
+  assertThrows = function assertThrows(code, type_opt, cause_opt) {
+    var threwException = true;
+    try {
+      if (typeof code == 'function') {
+        code();
+      } else {
+        eval(code);
+      }
+      threwException = false;
+    } catch (e) {
+      if (typeof type_opt == 'function') {
+        assertInstanceof(e, type_opt);
+      }
+      if (arguments.length >= 3) {
+        assertEquals(e.type, cause_opt);
+      }
+      // Success.
+      return;
+    }
+    throw new MjsUnitAssertionError("Did not throw exception");
+  };
+
+
+  assertInstanceof = function assertInstanceof(obj, type) {
+    if (!(obj instanceof type)) {
+      var actualTypeName = null;
+      var actualConstructor = Object.prototypeOf(obj).constructor;
+      if (typeof actualConstructor == "function") {
+        actualTypeName = actualConstructor.name || String(actualConstructor);
+      }
+      fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" +
+               (type.name || type) + ">" +
+               (actualTypeName ? " but of < " + actualTypeName + ">" : ""));
+    }
+  };
+
+
+   assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
+    try {
+      if (typeof code == 'function') {
+        code();
+      } else {
+        eval(code);
+      }
+    } catch (e) {
+      fail("threw an exception: ", e.message || e, name_opt);
+    }
+  };
+
+  assertUnreachable = function assertUnreachable(name_opt) {
+    // Fix this when we ditch the old test runner.
+    var message = "Fail" + "ure: unreachable";
+    if (name_opt) {
+      message += " - " + name_opt;
+    }
+    throw new MjsUnitAssertionError(message);
+  };
+
+})();
+
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 8f042ce..bae09b4 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -1,4 +1,4 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -30,6 +30,13 @@
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
 
+##############################################################################
+# Fails.
+regress/regress-1119: FAIL
+
+#############################################################################
+# Fails due to r10102 which reverts precise stepping on the 3.6 branch.
+debug-step-2: FAIL
 
 ##############################################################################
 # Too slow in debug mode with --stress-opt
@@ -112,6 +119,41 @@
 
 ##############################################################################
 [ $arch == mips ]
+# Run those tests, but expect them to time out.
+array-sort: PASS || TIMEOUT
+mirror-object: PASS || TIMEOUT
 
-# Skip all tests on MIPS.
-*: SKIP
+# Skip long-running tests.
+compiler/alloc-number: SKIP
+compiler/array-length: SKIP
+compiler/assignment-deopt: SKIP
+compiler/deopt-args: SKIP
+compiler/inline-compare: SKIP
+compiler/inline-global-access: SKIP
+compiler/optimized-function-calls: SKIP
+compiler/pic: SKIP
+compiler/property-calls: SKIP
+compiler/recursive-deopt: SKIP
+compiler/regress-4: SKIP
+compiler/regress-funcaller: SKIP
+compiler/regress-gvn: SKIP
+compiler/regress-rep-change: SKIP
+compiler/regress-arguments: SKIP
+compiler/regress-funarguments: SKIP
+compiler/regress-or: SKIP
+compiler/regress-3249650: SKIP
+compiler/simple-deopt: SKIP
+regress/regress-490: SKIP
+regress/regress-634: SKIP
+regress/regress-create-exception: SKIP
+regress/regress-3218915: SKIP
+regress/regress-3247124: SKIP
+regress/regress-1132: SKIP
+regress/regress-1257: SKIP
+regress/regress-91008: SKIP
+
+##############################################################################
+[ $isolates ]
+# d8-os writes temporary files that might interfer with each other when running
+# in multible threads. Skip this if running with isolates testing.
+d8-os: SKIP
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/multiline.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/multiline.js
index aa93b25..ba538db 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/multiline.js
@@ -25,12 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+var s = 'foo\
+bar';
+assertEquals("foobar", s);
diff --git a/test/mjsunit/no-semicolon.js b/test/mjsunit/no-semicolon.js
index fa6ccba..273ec4b 100644
--- a/test/mjsunit/no-semicolon.js
+++ b/test/mjsunit/no-semicolon.js
@@ -30,7 +30,7 @@
 
 function f() { return }
 
-function g() { 
+function g() {
   return
     4;
 }
@@ -42,4 +42,4 @@
 assertEquals(0, i);
 
 for (var i = 0; i < 10; i++) { continue }
-assertEquals(10, i);
\ No newline at end of file
+assertEquals(10, i);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/not.js
similarity index 74%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/not.js
index aa93b25..550a981 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/not.js
@@ -25,12 +25,35 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function testNot(x) {
+  // The VM constant folds so we use that to check the result.
+  var expected = eval("!(" + x + ")");
+  var actual = !x;
+  assertEquals(expected, actual, "x: " + x);
 }
 
-test();
+testNot(0);
+testNot(1);
+testNot(-1);
+testNot(-0);
+
+testNot(NaN);
+testNot(Infinity);
+testNot(-Infinity);
+
+testNot(true);
+testNot(false);
+
+assertTrue(!"");
+assertFalse(!"foo");
+
+assertFalse(![]);
+assertFalse(![1]);
+assertFalse(![1,2]);
+
+assertFalse(!{});
+assertFalse(!{foo:1});
+assertFalse(!{foo:1,bar:2});
+
+assertFalse(!!0);
+assertTrue(!!1);
diff --git a/test/mjsunit/number-string-index-call.js b/test/mjsunit/number-string-index-call.js
index 6f540c0..85b79d1 100644
--- a/test/mjsunit/number-string-index-call.js
+++ b/test/mjsunit/number-string-index-call.js
@@ -25,8 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --call_regexp
 var callbacks = [ function() {return 'foo'}, "nonobject", /abc/ ];
 assertEquals('foo', callbacks['0']());
 assertThrows("callbacks['1']()");
-assertEquals('abc', callbacks['2']("abcdefg"));
+assertThrows("callbacks['2']('abcdefg')");
diff --git a/test/mjsunit/number-tostring.js b/test/mjsunit/number-tostring.js
index 8312080..35e77e2 100644
--- a/test/mjsunit/number-tostring.js
+++ b/test/mjsunit/number-tostring.js
@@ -55,7 +55,7 @@
 assertEquals("-90.12", (-90.12).toString());
 assertEquals("-0.1", (-0.1).toString());
 assertEquals("-0.01", (-0.01).toString());
-assertEquals("-0.0123", (-0.0123).toString())
+assertEquals("-0.0123", (-0.0123).toString());
 assertEquals("-111111111111111110000", (-111111111111111111111).toString());
 assertEquals("-1.1111111111111111e+21", (-1111111111111111111111).toString());
 assertEquals("-1.1111111111111111e+22", (-11111111111111111111111).toString());
@@ -219,7 +219,7 @@
 // Test that we round up even when the last digit generated is even.
 // dtoa does not do this in its original form.
 assertEquals("1", 0.5.toFixed(0), "0.5.toFixed(0)");
-assertEquals("-1", -0.5.toFixed(0), "-0.5.toFixed(0)");
+assertEquals("-1", (-0.5).toFixed(0), "(-0.5).toFixed(0)");
 assertEquals("1.3", 1.25.toFixed(1), "1.25.toFixed(1)");
 // This is bizare, but Spidermonkey and KJS behave the same.
 assertEquals("234.2040", (234.20405).toFixed(4), "234.2040.toFixed(4)");
diff --git a/test/mjsunit/numops-fuzz.js b/test/mjsunit/numops-fuzz.js
new file mode 100644
index 0000000..bd7e4fa
--- /dev/null
+++ b/test/mjsunit/numops-fuzz.js
@@ -0,0 +1,4609 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f() {
+  var x = 0;
+  var tmp = 0;
+  assertEquals(0, x /= (tmp = 798469700.4090232, tmp));
+  assertEquals(0, x *= (2714102322.365509));
+  assertEquals(0, x *= x);
+  assertEquals(139516372, x -= (tmp = -139516372, tmp));
+  assertEquals(1, x /= (x%(2620399703.344006)));
+  assertEquals(0, x >>>= x);
+  assertEquals(-2772151192.8633175, x -= (tmp = 2772151192.8633175, tmp));
+  assertEquals(-2786298206.8633175, x -= (14147014));
+  assertEquals(1509750523, x |= ((1073767916)-(tmp = 919311632.2789925, tmp)));
+  assertEquals(2262404051.926751, x += ((752653528.9267509)%x));
+  assertEquals(-270926893, x |= (tmp = 1837232194, tmp));
+  assertEquals(0.17730273401688765, x /= ((tmp = -2657202795, tmp)-(((((x|(tmp = -1187733892.282897, tmp))-x)<<(556523578))-x)+(-57905508.42881298))));
+  assertEquals(122483.56550261026, x *= ((((tmp = 2570017060.15193, tmp)%((-1862621126.9968336)>>x))>>(x>>(tmp = 2388674677, tmp)))>>>(-2919657526.470434)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= (tmp = 2705124845.0455265, tmp));
+  assertEquals(0, x &= (-135286835.07069612));
+  assertEquals(-0, x *= ((tmp = -165810479.10020828, tmp)|x));
+  assertEquals(248741888, x += ((735976871.1308595)<<(-2608055185.0700903)));
+  assertEquals(139526144, x &= (tmp = -1454301068, tmp));
+  assertEquals(-0.047221345672746884, x /= (tmp = -2954726130.994727, tmp));
+  assertEquals(0, x <<= (x>>x));
+  assertEquals(0, x >>>= ((x+(912111201.488966))-(tmp = 1405800042.6070075, tmp)));
+  assertEquals(-1663642733, x |= (((-1663642733.5700119)<<(x^x))<<x));
+  assertEquals(-914358272, x <<= ((((-308411676)-(-618261840.9113789))%(-68488626.58621716))-x));
+  assertEquals(-1996488704, x &= (-1358622641.5848842));
+  assertEquals(-345978263, x += (1650510441));
+  assertEquals(3, x >>>= (-1106714178.701668));
+  assertEquals(1, x %= (((x>>(x>>(tmp = -3052773846.817114, tmp)))*(tmp = 1659218887.379526, tmp))&x));
+  assertEquals(-943225672, x += (-943225673));
+  assertEquals(-0.41714300120060854, x /= (tmp = 2261156652, tmp));
+  assertEquals(0, x >>>= ((3107060934.8863482)<<(tmp = 1902730887, tmp)));
+  assertEquals(0, x &= x);
+  assertEquals(1476628, x |= ((tmp = -2782899841.390033, tmp)>>>(2097653770)));
+  assertEquals(0.0008887648921591833, x /= ((tmp = 1661438264.5253348, tmp)%((tmp = 2555939813, tmp)*(-877024323.6515315))));
+  assertEquals(0, x <<= (tmp = -2366551345, tmp));
+  assertEquals(0, x &= (tmp = 1742843591, tmp));
+  assertEquals(0, x -= x);
+  assertEquals(4239, x += ((-3183564176.232031)>>>(349622674.1255014)));
+  assertEquals(-67560, x -= ((2352742295)>>>x));
+  assertEquals(-67560, x &= x);
+  assertEquals(-0.00003219917807302283, x /= (2098190203.699741));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= ((((tmp = -869086522.8358297, tmp)/(187820779))-(tmp = -2000970995.1931965, tmp))|(1853528755.6064696)));
+  assertEquals(0, x >>= (-3040509919));
+  assertEquals(0, x %= (((tmp = -2386688049.194946, tmp)<<(tmp = -669711391, tmp))|x));
+  assertEquals(0, x %= (tmp = -298431511.4839926, tmp));
+  assertEquals(0, x /= (2830845091.2793818));
+  assertEquals(0, x /= ((((-2529926178)|x)^((tmp = 2139313707.0894063, tmp)%((-1825768525.0541775)-(-952600362.7758243))))+x));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x -= x);
+  assertEquals(NaN, x /= (tmp = -432944480, tmp));
+  assertEquals(0, x <<= (((((x^((-1777523727)+(2194962794)))>>>(((((-590335134.8224905)%(x*(2198198974)))|(tmp = -2068556796, tmp))/(1060765637))*(-147051676)))/((tmp = -477350113.92686677, tmp)<<((x/(2018712621.0397925))^((tmp = 491163813.3921983, tmp)+(((x|((((x%(1990073256.812654))%((-2024388518.9599915)>>((tmp = 223182187, tmp)*(-722241065))))>>>(tmp = 2517147885.305745, tmp))%(1189996239.11222)))&x)%(-306932860))))))&((tmp = 1117802724.485684, tmp)+((-1391614045)-x)))%((((x>>((2958453447)*x))^(((410825859)|(((tmp = -1119269292.5495896, tmp)>>>(((((((x%(tmp = 648541746.6059314, tmp))*((-2304508480)<<((((x^(1408199888.1454597))|((251623937)|x))/((-382389946.9984102)|(tmp = -2082681143.5893767, tmp)))-(((tmp = 631243472, tmp)>>>(1407556544))/(((x>>>x)>>>(tmp = -6329025.47865057, tmp))>>>(tmp = 948664752.543093, tmp))))))/((((-183248880)>>x)&x)&x))>>x)&(((-978737284.8492057)%(tmp = 2983300011.737006, tmp))&(tmp = 2641937234.2954116, tmp)))<<x)>>(2795416632.9722223)))%((((tmp = -50926632, tmp)/x)&(((tmp = -2510786916, tmp)/x)/(-699755674)))|((((tmp = 1411792593, tmp)>>(924286570.2637128))>>((1609997725)>>(2735658951.0762663)))*(tmp = 726205435, tmp)))))<<(tmp = -2135055357.3156831, tmp)))/(tmp = 1408695065, tmp))^(tmp = -1343267739.8562133, tmp))));
+  assertEquals(0, x %= (-437232116));
+  assertEquals(-2463314518.2747326, x -= (2463314518.2747326));
+  assertEquals(109, x >>= (2401429560));
+  assertEquals(-2687641732.0253763, x += (-2687641841.0253763));
+  assertEquals(-2336375490019484000, x *= (tmp = 869303174.6678596, tmp));
+  assertEquals(5.458650430363785e+36, x *= x);
+  assertEquals(0, x |= ((((-1676972008.797291)*x)*((tmp = 2606991807, tmp)-x))<<x));
+  assertEquals(0, x &= ((-3053393759.3496876)+(-1431008367)));
+  assertEquals(-856728369, x |= (x-(((((764337872)/x)<<((x|(((tmp = 1409368192.1268077, tmp)+(tmp = -848083676, tmp))|(-2797102463.7915916)))^x))/x)^(tmp = 856728369.0589117, tmp))));
+  assertEquals(-0, x %= x);
+  assertEquals(1116550103, x ^= (-3178417193));
+  assertEquals(1116550103, x %= (tmp = -1482481942, tmp));
+  assertEquals(133, x >>>= x);
+  assertEquals(-1.381429241671034e-7, x /= ((tmp = -962771116.8101778, tmp)^x));
+  assertEquals(-1092268961, x |= ((tmp = 3202672531, tmp)-((x-(tmp = 845529357, tmp))>>(tmp = -868680593, tmp))));
+  assertEquals(-1092268961, x %= (tmp = 2670840415.304719, tmp));
+  assertEquals(-122794480, x %= (tmp = 969474481, tmp));
+  assertEquals(-297606521542193600, x *= (2423614820));
+  assertEquals(72460064, x >>>= (tmp = -1230798655, tmp));
+  assertEquals(-203714325373689600, x *= (-2811401400));
+  assertEquals(2154914048, x >>>= (((2241377026.001436)/x)+x));
+  assertEquals(1177864081, x ^= (tmp = -968513903, tmp));
+  assertEquals(35947664, x &= (-2086226758.2704995));
+  assertEquals(20795732539020670, x += (x*(578500247)));
+  assertEquals(-892004992, x >>= x);
+  assertEquals(-7023661.354330708, x /= ((((((1740714214)%((tmp = -459699286, tmp)+(tmp = -1700187400, tmp)))>>(tmp = -3170295237, tmp))+(tmp = -497509780, tmp))+((1971976144.6197853)+(661992813.6077721)))>>>(-1683802728)));
+  assertEquals(-1634205696, x <<= x);
+  assertEquals(-7, x >>= (-3187653764.930914));
+  assertEquals(-5.095345981491203, x -= ((tmp = 748315289, tmp)/(tmp = -392887780, tmp)));
+  assertEquals(1486531570, x &= (1486531570.9300508));
+  assertEquals(5670, x >>= (((tmp = -2486758205.26425, tmp)*(732510414))|x));
+  assertEquals(5670, x >>= (((-1811879946.2553763)%(1797475764))/(((tmp = -2159923884, tmp)|x)+(tmp = -1774410807, tmp))));
+  assertEquals(38, x %= (x>>>x));
+  assertEquals(-151134215, x ^= (((tmp = -2593085609.5622163, tmp)+((tmp = -814992345.7516887, tmp)-(534809571)))|(tmp = -232678571, tmp)));
+  assertEquals(-234881024, x <<= x);
+  assertEquals(-234881024, x <<= (x>>>x));
+  assertEquals(55169095435288580, x *= x);
+  assertEquals(0, x >>= (tmp = 1176612256, tmp));
+  assertEquals(0, x <<= (1321866341.2486475));
+  assertEquals(0, x %= (x-(-602577995)));
+  assertEquals(0, x >>>= (((((tmp = -125628635.79970193, tmp)^(tmp = 1294209955.229382, tmp))&(((tmp = -2353256654.0725203, tmp)|((-1136743028.9425385)|((((950703429.1110399)-(x>>>x))/((((x%(-252705869.21126103))/((tmp = 886957620, tmp)<<(x%((tmp = -1952249741, tmp)*(tmp = -1998149844, tmp)))))|(tmp = 1933366713, tmp))|((tmp = -2957141565, tmp)>>>(tmp = 1408598804, tmp))))+(((((((-2455002047.4910946)%(tmp = -528017836, tmp))&((-2693432769)/(tmp = 2484427670.9045153, tmp)))%(-356969659))-((((((tmp = 3104828644.0753174, tmp)%(x>>>(tmp = 820832137.8175925, tmp)))*((tmp = 763080553.9260503, tmp)+(3173597855)))<<(((-510785437)^x)<<(x|(((x*(x%((tmp = -1391951515, tmp)/x)))-x)|(x-((-522681793.93221474)/((2514619703.2162743)*(2936688324))))))))|x)>>>(-2093210042)))&(763129279.3651779))&x))))-x))%(((-1331164821)&(tmp = 1342684586, tmp))<<(x<<(tmp = 2675008614.588005, tmp))))>>((2625292569.8984914)+(-3185992401))));
+  assertEquals(0, x *= (tmp = 671817215.1147974, tmp));
+  assertEquals(-1608821121, x ^= ((tmp = 2686146175.04077, tmp)>>>x));
+  assertEquals(-0, x %= x);
+  assertEquals(-0, x /= ((tmp = 286794551.0720866, tmp)|(x%x)));
+  assertEquals(0, x <<= (x|(tmp = 1095503996.2285218, tmp)));
+  assertEquals(443296752, x ^= (443296752));
+  assertEquals(110824188, x >>= ((184708570)>>(x&x)));
+  assertEquals(0.7908194935161674, x /= ((((167151154.63381648)&((tmp = -1434120690, tmp)-(tmp = 2346173080, tmp)))/(56656051.87305987))^(140138414)));
+  assertEquals(-0.9027245492678485, x *= ((tmp = 1724366578, tmp)/(((2979477411)<<(((897038568)>>(tmp = 348960298, tmp))%(281056223.2037884)))^((((-1383133388)-(((-1379748375)-((x>>(x&(tmp = 2456582046, tmp)))>>>(-2923911755.565961)))&x))<<(-2825791731))^(tmp = -1979992970, tmp)))));
+  assertEquals(0, x &= (2482304279));
+  assertEquals(-0, x *= (-2284213673));
+  assertEquals(0, x <<= ((2874381218.015819)|x));
+  assertEquals(0, x *= (x>>>(tmp = 2172786480, tmp)));
+  assertEquals(0, x &= (-1638727867.2978938));
+  assertEquals(0, x %= ((tmp = -2213947368.285817, tmp)>>x));
+  assertEquals(0, x >>>= (tmp = -531324706, tmp));
+  assertEquals(0, x %= (tmp = -2338792486, tmp));
+  assertEquals(0, x <<= (((tmp = 351012164, tmp)<<(x|((tmp = -3023836638.5337825, tmp)^(-2678806692))))|x));
+  assertEquals(0, x %= (x-(tmp = -3220231305.45039, tmp)));
+  assertEquals(0, x <<= (-2132833261));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x %= ((2544970469)+(((-2633093458.5911965)&(644108176))-(x>>>(tmp = -949043718, tmp)))));
+  assertEquals(-2750531265, x += (-2750531265));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x *= ((tmp = 1299005700, tmp)-x));
+  assertEquals(0, x >>= x);
+  assertEquals(-1785515304, x -= (((((-806054462.5563161)/x)>>>x)+(1785515304))|((tmp = 2937069788.9396844, tmp)/x)));
+  assertEquals(-3810117159.173689, x -= (2024601855.1736891));
+  assertEquals(-6.276064139320051, x /= (607087033.3053156));
+  assertEquals(134217727, x >>>= (((x%(tmp = 924293127, tmp))^x)|((x>>>(x&((((tmp = -413386639, tmp)/(x>>(tmp = 599075308.8479941, tmp)))^(tmp = -1076703198, tmp))*((tmp = -2239117284, tmp)>>(655036983)))))-x)));
+  assertEquals(134217727, x %= (tmp = 2452642261.038778, tmp));
+  assertEquals(-569504740360507, x *= ((tmp = -1086243941, tmp)>>(tmp = 1850668904.4885683, tmp)));
+  assertEquals(113378806, x >>>= (tmp = -2558233435, tmp));
+  assertEquals(979264375, x -= (((x>>(1950008052))%((2917183569.0209)*(tmp = 1184250640.446752, tmp)))|((((tmp = -691875212, tmp)-(-2872881803))>>(tmp = 44162204.97461021, tmp))^(tmp = 865885647, tmp))));
+  assertEquals(-1127813632, x <<= ((((tmp = -2210499281, tmp)>>>x)-(tmp = 2359697240, tmp))-x));
+  assertEquals(-1707799657, x ^= (653518231.3995534));
+  assertEquals(2916579668449318000, x *= x);
+  assertEquals(2916579669254640600, x += (x&(tmp = 2986558026.399422, tmp)));
+  assertEquals(870995175, x ^= (2598813927.8991632));
+  assertEquals(870995175, x %= (-2857038782));
+  assertEquals(1869503575895591000, x *= (x|(x|(((tmp = 2478650307.4118147, tmp)*((tmp = 2576240847.476932, tmp)>>>x))<<x))));
+  assertEquals(-134947790, x |= ((tmp = 1150911808, tmp)*((2847735464)/(-2603172652.929262))));
+  assertEquals(-137053182, x -= ((tmp = 2155921819.0929346, tmp)>>>(x-(((-1960937402)-(-1907735074.2875962))%((1827808310)^(tmp = -2788307127, tmp))))));
+  assertEquals(-134824702, x |= (((2912578752.2395406)^(x%(((-2585660111.0638976)<<(((((tmp = 747742706, tmp)%(-1630261205))&((((x|(x|(-2619903144.278758)))|((2785710568.8651934)>>((-968301967.5982246)<<(x&x))))>>((x>>>((x>>>(tmp = -1402085797.0310762, tmp))*((tmp = -323729645.2250068, tmp)<<(tmp = 2234667799, tmp))))>>>(-167003745)))>>((924665972.4681011)<<x)))>>>x)<<((((x+x)+x)-(((tmp = 2399203431.0526247, tmp)-(-2872533271))-(((tmp = 914778794.2087344, tmp)-(tmp = 806353942.9502392, tmp))|(((tmp = 262924334.99231672, tmp)&x)|(tmp = -460248836.5602243, tmp)))))/x)))%((-1681000689)/(tmp = -2805054623.654228, tmp)))))*(tmp = 957346233.9619625, tmp)));
+  assertEquals(-3274838, x %= ((((tmp = 3155450543.3524327, tmp)>>>x)<<(tmp = 2103079652.3410985, tmp))>>x));
+  assertEquals(-3274838, x |= ((((tmp = 2148004645.639173, tmp)>>>(tmp = -1285119223, tmp))<<(((((-711596054)>>>(tmp = -2779776371.3473206, tmp))^(((((tmp = -1338880329.383915, tmp)<<((-1245247254.477341)>>x))*(tmp = -2649052844.20065, tmp))>>((1734345880.4600453)%(x/(2723093117.118899))))*(1252918475.3285656)))<<(2911356885))^x))<<(-1019761103)));
+  assertEquals(1703281954, x &= (((tmp = 1036570471.7412028, tmp)+((tmp = 3043119517, tmp)%(2374310816.8346715)))%(tmp = -2979155076, tmp)));
+  assertEquals(1741588391, x |= ((tmp = 1230009575.6003838, tmp)>>>(-1247515003.8152597)));
+  assertEquals(72869474.64782429, x %= (tmp = 1668718916.3521757, tmp));
+  assertEquals(770936242.104203, x += (698066767.4563787));
+  assertEquals(-0.2820604726420833, x /= (tmp = -2733230342, tmp));
+  assertEquals(403480578, x |= ((969730374)&(tmp = 1577889835, tmp)));
+  assertEquals(-1669557233, x ^= ((-1616812135)+(tmp = -456209292, tmp)));
+  assertEquals(-1630427, x >>= ((2327783031.1175823)/(226947662.4579488)));
+  assertEquals(131022, x >>>= ((tmp = -1325018897.2482083, tmp)>>(x&((((((-1588579772.9240348)<<(tmp = -1775580288.356329, tmp))<<(tmp = -1021528325.2075481, tmp))>>((tmp = 2373033451.079956, tmp)*(tmp = 810304612, tmp)))-((tmp = -639152097, tmp)<<(tmp = 513879484, tmp)))&(2593958513)))));
+  assertEquals(1, x >>= ((3033200222)-x));
+  assertEquals(-561146816.4851823, x += (tmp = -561146817.4851823, tmp));
+  assertEquals(-4.347990105831158, x /= ((((-1270435902)*x)%((tmp = 637328492.7386824, tmp)-(x>>(-749100689))))%(x+x)));
+  assertEquals(-1, x >>= x);
+  assertEquals(1, x *= x);
+  assertEquals(111316849706694460, x += ((966274056)*(x|(115202150))));
+  assertEquals(-1001883840, x >>= x);
+  assertEquals(-1001883840, x &= x);
+  assertEquals(-3006880758, x += ((((-2275110637.4054556)/((x+(tmp = -1390035090.4324536, tmp))>>(-5910593)))&(tmp = 378982420, tmp))|(tmp = 2289970378.568629, tmp)));
+  assertEquals(314474, x >>>= (x>>((tmp = -228007336.31281257, tmp)%(tmp = 1127648013, tmp))));
+  assertEquals(-17694827, x ^= ((tmp = 2095133598.1849852, tmp)|(-1978322311)));
+  assertEquals(1, x /= x);
+  assertEquals(1, x %= (-2323617209.7531185));
+  assertEquals(0, x >>>= (x*(tmp = -1574455400.489434, tmp)));
+  assertEquals(0, x >>= (3131854684));
+  assertEquals(2853609824, x += ((-231012098)-(tmp = -3084621922, tmp)));
+  assertEquals(8143089027629311000, x *= x);
+  assertEquals(313052685, x ^= (tmp = 2962303501, tmp));
+  assertEquals(4776, x >>= (tmp = 2271457232, tmp));
+  assertEquals(0.000002812258572702285, x /= (tmp = 1698279115, tmp));
+  assertEquals(0, x >>>= (tmp = 1698465782.0927145, tmp));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x |= ((x<<((-1824760240.3040407)<<(2798263764.39145)))&(tmp = 1795988253.0493627, tmp)));
+  assertEquals(1782206945, x ^= (-2512760351.7881565));
+  assertEquals(7610569113843172000, x *= (((tmp = -44415823.92972565, tmp)&(tmp = 1402483498.9421625, tmp))+(tmp = 2909778666, tmp)));
+  assertEquals(15221138227873292000, x += (x-(tmp = -186948658.394145, tmp)));
+  assertEquals(0, x -= x);
+  assertEquals(-2238823252, x -= ((tmp = 2238823252, tmp)+x));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= (2976069570));
+  assertEquals(0, x >>= ((tmp = -2358157433, tmp)/x));
+  assertEquals(-949967713, x ^= (tmp = -949967713, tmp));
+  assertEquals(-1, x >>= x);
+  assertEquals(-1522291702.1977966, x *= (1522291702.1977966));
+  assertEquals(-1522291702, x >>= ((((2290279800)|x)|(1793154434.6798015))&((-1161390929.0766077)>>>x)));
+  assertEquals(83894274, x &= (tmp = 1571058486, tmp));
+  assertEquals(43186847.90522933, x += ((tmp = -1131332988.0947707, tmp)%x));
+  assertEquals(0, x >>= (tmp = -1968312707.269359, tmp));
+  assertEquals(0, x &= (2507747643.26175));
+  assertEquals(0, x %= (tmp = 3190525303.366887, tmp));
+  assertEquals(-1968984602, x ^= (((x/(x|(-1607062026.5338054)))<<(tmp = 2207669861.8770065, tmp))+(tmp = 2325982694.956348, tmp)));
+  assertEquals(554, x >>>= (((tmp = -2302283871.993821, tmp)>>>(-3151835112))|(((((x%(-1534374264))/((731246012)<<(((883830997.1194847)<<(((-1337895080.1937215)/(tmp = 3166402571.8157315, tmp))^(tmp = -1563897595.5799441, tmp)))>>(tmp = -556816951.0537591, tmp))))>>(-2682203577))<<(x/((1654294674.865079)+x)))/((x^(-2189474695.4259806))/(-475915245.7363057)))));
+  assertEquals(1372586111, x ^= (1372586581));
+  assertEquals(1166831229, x -= ((-834168138)&(762573579)));
+  assertEquals(2333662456, x -= ((x>>x)-x));
+  assertEquals(-1961304840, x &= x);
+  assertEquals(-2130143128, x &= (2982852718.0711775));
+  assertEquals(1073741824, x <<= (-1446978661.6426942));
+  assertEquals(2097152, x >>>= ((-1424728215)-(((127872198)%(tmp = -2596923298, tmp))&x)));
+  assertEquals(2097152, x >>>= x);
+  assertEquals(0, x &= (x/(tmp = -518419194.42994523, tmp)));
+  assertEquals(0, x >>= ((x/(-1865078245))%(tmp = 2959239210, tmp)));
+  assertEquals(-0, x *= ((x|(-1721307400))|(-3206147171.9491577)));
+  assertEquals(0, x >>>= ((-694741143)&(tmp = -2196513947.699142, tmp)));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x &= ((tmp = 2037824385.8836646, tmp)+((tmp = 1203034986.4647732, tmp)/(x>>>(((-1374881234)/(899771270.3237157))+((-2296524362.8020077)|(-1529870870)))))));
+  assertEquals(0, x >>= (tmp = 2770637816, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(-1861843456, x |= ((632402668)*((x|(tmp = -1032952662.8269436, tmp))|(tmp = 2671272511, tmp))));
+  assertEquals(-1861843456, x >>= (((x>>>x)+x)<<(-1600908842)));
+  assertEquals(-58182608, x >>= (x-(tmp = -2496617861, tmp)));
+  assertEquals(-3636413, x >>= (tmp = -400700028, tmp));
+  assertEquals(-7272826, x += x);
+  assertEquals(-1, x >>= ((tmp = -3184897005.3614545, tmp)-((-1799843014)|(tmp = 2832132915, tmp))));
+  assertEquals(-121800925.94209385, x *= (121800925.94209385));
+  assertEquals(-30450232, x >>= (-979274206.6261561));
+  assertEquals(-30450232, x >>= (tmp = -1028204832.5078967, tmp));
+  assertEquals(-30450232, x |= x);
+  assertEquals(965888871, x ^= (((((-2157753481.3375635)*((tmp = -1810667184.8165767, tmp)&((tmp = 2503908344.422232, tmp)|x)))>>(x>>(1601560785)))<<x)^(tmp = 943867311.6380403, tmp)));
+  assertEquals(7546006, x >>>= x);
+  assertEquals(7546006, x <<= ((tmp = 1388931761.780241, tmp)*(x-(tmp = -1245147647.0070577, tmp))));
+  assertEquals(12985628, x += (x&(-1520746354)));
+  assertEquals(12985628, x &= x);
+  assertEquals(12985628, x %= (tmp = 308641965, tmp));
+  assertEquals(685733278, x |= ((tmp = -1275653544, tmp)-((tmp = -1956798010.3773859, tmp)%(tmp = 2086889575.643448, tmp))));
+  assertEquals(679679376, x &= (2860752368));
+  assertEquals(1770773904, x |= (x<<(3200659207)));
+  assertEquals(1224886544, x &= (-585733767.6876519));
+  assertEquals(1224886544, x %= ((tmp = -114218494, tmp)-x));
+  assertEquals(1208109328, x &= (tmp = 1854361593, tmp));
+  assertEquals(18434, x >>>= x);
+  assertEquals(-349394636955256100, x *= (x*(-1028198742)));
+  assertEquals(-519536600.7713163, x %= (-1054085356.9120367));
+  assertEquals(-1610612736, x ^= ((tmp = -3126078854, tmp)&x));
+  assertEquals(-2637321565906333700, x *= (1637464740.5658746));
+  assertEquals(-2637321568051070500, x -= ((tmp = -1006718806, tmp)<<(3005848133.106345)));
+  assertEquals(368168695, x ^= (x^(tmp = 368168695.6881037, tmp)));
+  assertEquals(43, x >>>= x);
+  assertEquals(-2081297089, x |= ((167169305.77248895)+(-2248466405.3199244)));
+  assertEquals(-2474622167, x -= (tmp = 393325078, tmp));
+  assertEquals(-135109701, x %= (-1169756233));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= (((((tmp = -164768854, tmp)/(tmp = -1774989993.1909926, tmp))+x)-((-921438912)>>(tmp = -191772028.69249105, tmp)))-(tmp = 558728578.22033, tmp)));
+  assertEquals(0, x %= (tmp = 2188003745, tmp));
+  assertEquals(0, x <<= (((tmp = -999335540, tmp)>>((((325101977)/(tmp = -3036991542, tmp))<<(tmp = -213302488, tmp))+x))|(tmp = -1054204587, tmp)));
+  assertEquals(0, x &= ((2844053429.4720345)>>>x));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x -= (-1481729275.9118822));
+  assertEquals(NaN, x *= (tmp = 1098314618.2397528, tmp));
+  assertEquals(-1073741824, x ^= ((tmp = 1718545772, tmp)<<(((tmp = -81058910, tmp)-(2831123087.424368))+(tmp = 576710057.2361784, tmp))));
+  assertEquals(-2921155898.4793186, x -= (1847414074.4793184));
+  assertEquals(-1295646720, x <<= (2178621744));
+  assertEquals(-0.8906779709597907, x /= ((tmp = -2840292585.6837263, tmp)<<(x&((tmp = 892527695.6172305, tmp)>>>x))));
+  assertEquals(0, x <<= (((tmp = 3149667213.298993, tmp)>>(tmp = 1679370761.7226725, tmp))^(115417747.21537328)));
+  assertEquals(0, x |= x);
+  assertEquals(0, x %= ((-1112849427)>>(-1245508870.7514496)));
+  assertEquals(0, x &= x);
+  assertEquals(0, x |= x);
+  assertEquals(0, x >>>= ((3144100694.930459)>>>(tmp = 2408610503, tmp)));
+  assertEquals(0, x <<= ((tmp = 2671709754.0318713, tmp)%x));
+  assertEquals(0, x >>>= (x|((tmp = -3048578701, tmp)-(674147224))));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= ((tmp = -2084883715, tmp)|(((((-3008427069)+(875536047.4283574))>>>x)%(tmp = -450003426.1091652, tmp))%(((-2956878433.269356)|(x/((((x%((((((x<<(((tmp = -1581063482.510351, tmp)^x)-(tmp = 1364458217, tmp)))^((tmp = 1661446342, tmp)+(1307091014)))/(342270750.9901335))>>>(x&((1760980812.898993)&((tmp = 2878165745.6401143, tmp)/(((tmp = -981178013, tmp)/(-2338761668.29912))>>(-958462630))))))*((1807522840)^((tmp = 1885835034, tmp)^(-2538647938))))*(1673607540.0854697)))%x)>>x)<<x)))<<(853348877.2407281)))));
+  assertEquals(0, x >>>= x);
+  assertEquals(-1162790279, x -= (1162790279));
+  assertEquals(-1162790279, x >>= (((-490178658)*x)/((((((tmp = -1883861998.6699312, tmp)/(tmp = -2369967345.240594, tmp))+(3142759868.266447))&(508784917.8158537))&x)>>(-2129532322))));
+  assertEquals(-1360849740.9829152, x -= (x+(1360849740.9829152)));
+  assertEquals(1928392181, x ^= (-602670783));
+  assertEquals(19478708.898989897, x /= (((-2617861994)>>(tmp = 797256920, tmp))%(-1784987906)));
+  assertEquals(-8648903.575540157, x *= (((tmp = 673979276, tmp)/(-1517908716))%(x/x)));
+  assertEquals(-8648903.575540157, x %= ((((643195610.4221292)>>>(tmp = 2342669302, tmp))>>>(tmp = -1682965878, tmp))^((tmp = -208158937.63443017, tmp)>>((907286989)&(x<<(448634893))))));
+  assertEquals(1399288769, x ^= (tmp = -1407486728, tmp));
+  assertEquals(0, x &= (((1999255838.815517)/(tmp = 564646001, tmp))/(-3075888101.3274765)));
+  assertEquals(0, x ^= ((-78451711.59404826)%x));
+  assertEquals(-1351557131, x |= (2943410165));
+  assertEquals(1715626371, x -= (-3067183502));
+  assertEquals(71434240, x &= ((-1800066426)<<(((((x<<(-324796375))+x)<<(tmp = 2696824955.735132, tmp))^x)%(tmp = 444916469, tmp))));
+  assertEquals(71434240, x >>>= (((x&((x%x)|x))+(tmp = 2226992348.3050146, tmp))<<(-305526260)));
+  assertEquals(0, x -= (x%(tmp = 582790928.5832802, tmp)));
+  assertEquals(0, x *= ((x%(1865155340))>>>((x<<(2600488191))^(-308995123))));
+  assertEquals(0, x >>= (x&(-3120043868.8531103)));
+  assertEquals(0, x |= x);
+  assertEquals(-0, x *= (tmp = -172569944, tmp));
+  assertEquals(0, x <<= (-1664372874));
+  assertEquals(1377713344.6784928, x += (tmp = 1377713344.6784928, tmp));
+  assertEquals(1377713344, x |= x);
+  assertEquals(-232833282, x |= (tmp = 2685870654, tmp));
+  assertEquals(84639, x -= (((((2778531079.998492)%(2029165314))>>>(tmp = -468881172.3729558, tmp))^x)|((x>>>((((x%(3044318992.943596))&(1996754328.2214756))^(1985227172.7485228))%(tmp = -1984848676.1347625, tmp)))|((tmp = 2637662639, tmp)<<x))));
+  assertEquals(0, x ^= x);
+  assertEquals(1237720303, x -= (-1237720303));
+  assertEquals(2, x >>= (-2148785379.428976));
+  assertEquals(2, x &= (tmp = -3087007874, tmp));
+  assertEquals(0, x %= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x &= (2055693082));
+  assertEquals(-1349456492, x += (x^(-1349456492.315998)));
+  assertEquals(671088640, x <<= (x>>(-2030805724.5472062)));
+  assertEquals(-417654580004782100, x *= (tmp = -622353822, tmp));
+  assertEquals(1538160360, x |= (195983080.56698656));
+  assertEquals(733, x >>>= (tmp = 661085269, tmp));
+  assertEquals(657, x &= (-1611460943.993404));
+  assertEquals(431649, x *= x);
+  assertEquals(863298, x += x);
+  assertEquals(0, x &= ((1899423003)/((472439729)>>((tmp = 2903738952, tmp)+(tmp = 2164601630.3456993, tmp)))));
+  assertEquals(0, x &= (x>>>(tmp = 1939167951.2828958, tmp)));
+  assertEquals(1557813284, x |= (x-(-1557813284)));
+  assertEquals(72876068, x &= (662438974.2372154));
+  assertEquals(0.6695448637501589, x /= (tmp = 108844189.45702457, tmp));
+  assertEquals(0, x -= x);
+  assertEquals(2944889412, x += (2944889412));
+  assertEquals(3787980288, x -= ((((tmp = -2003814373.2301111, tmp)<<x)>>>(tmp = -3088357284.4405823, tmp))-(843090884)));
+  assertEquals(1, x >>>= (729274079));
+  assertEquals(1, x %= (-148002187.33869123));
+  assertEquals(3073988415.673201, x *= (tmp = 3073988415.673201, tmp));
+  assertEquals(4839166225.673201, x += (tmp = 1765177810, tmp));
+  assertEquals(4529373898.673201, x += (-309792327));
+  assertEquals(3097903.090496063, x %= (-150875866.51942348));
+  assertEquals(1270874112, x <<= ((((((tmp = -960966763.1418135, tmp)>>((((-3208596981.613482)>>>(tmp = 746403937.6913509, tmp))>>>(-2190042854.066803))/(2449323432)))*(-1272232665.791577))<<(-99306767.7209444))^((-1942103828)/((1570981655)/(tmp = 2381666337, tmp))))+(tmp = -1946759395.1558368, tmp)));
+  assertEquals(1273845956, x |= (tmp = -3197282108.6120167, tmp));
+  assertEquals(159230744, x >>= (((tmp = -1036031403.8108604, tmp)>>>(((3084964493)>>((x*x)^x))+(((2980108409.352001)^x)-(tmp = -2501685423.513927, tmp))))&(326263839)));
+  assertEquals(-370091747145550100, x *= (tmp = -2324248055.674161, tmp));
+  assertEquals(143384219.54999557, x /= (tmp = -2581119096, tmp));
+  assertEquals(1843396287, x |= (tmp = 1842718767, tmp));
+  assertEquals(2.4895593465813803, x /= (740450831));
+  assertEquals(2.4895593465813803, x %= ((((((((-3175333618)>>>((tmp = -1403880166, tmp)<<(tmp = -134875360, tmp)))>>>(2721317334.998084))<<(x&(tmp = 2924634208.1484184, tmp)))*((((x>>(tmp = -200319931.15328693, tmp))-(tmp = -495128933, tmp))+((-788052518.6610589)*((((tmp = 107902557, tmp)&(1221562660))%(x<<(((3155498059)*(((tmp = -1354381139.4897022, tmp)^(tmp = 3084557138.332852, tmp))*((((tmp = 1855251464.8464525, tmp)/((-1857403525.2008865)>>x))|x)-(-2061968455.0023944))))*(1917481864.84619))))^(x-(-508176709.52712965)))))+((((x%(-1942063404))+(x%(tmp = 855152281.180481, tmp)))|(-522863804))>>x)))>>>((tmp = -2515550553, tmp)&(((((-801095375)-(tmp = -2298729336.9792976, tmp))^x)/(tmp = 2370468053, tmp))>>(x|(tmp = -900008879, tmp)))))>>>(((tmp = -810295719.9509168, tmp)*((tmp = -1306212963.6226444, tmp)/(((tmp = 3175881540.9514832, tmp)|(-1439142297.819246))+((tmp = -134415617, tmp)|((-245801870)+x)))))>>(tmp = 1889815478, tmp)))-(((tmp = 597031177, tmp)%(858071823.7655672))+((tmp = 2320838665.8243756, tmp)|((938555608)<<(2351739219.6461897))))));
+  assertEquals(6.197905740150709, x *= x);
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= (-1639664165.9076233));
+  assertEquals(0, x >>= (-3135317748.801177));
+  assertEquals(0, x &= (3185479232.5325994));
+  assertEquals(-0, x *= ((-119759439.19668174)/(tmp = 2123964608, tmp)));
+  assertEquals(0, x /= (-1183061929.2827876));
+  assertEquals(0, x <<= (-1981831198));
+  assertEquals(0, x >>= ((((x<<(((((((-2133752838)&((tmp = -3045157736.9331336, tmp)>>>(x%x)))>>x)%(tmp = 3082217039, tmp))&(tmp = 270770770.97558427, tmp))|((-2212037556)^((((((2089224421)|(tmp = 360979560, tmp))<<x)%((tmp = -1679487690.6940534, tmp)+((173021423)|((tmp = 560900612, tmp)+((244376267.58977115)^x)))))<<(tmp = 2534513699, tmp))^x)))>>>(2915907189.4873834)))+(x*x))%(1637581117))%(tmp = 2363861105.3786244, tmp)));
+  assertEquals(0, x &= ((-2765495757.873004)&(1727406493)));
+  assertEquals(NaN, x -= (((((-1419667515.2616255)|x)-(150530256.48022234))%((((x|x)<<x)>>>(x^x))+x))-((-1216384577.3749187)*(495244398))));
+  assertEquals(NaN, x += (x^((tmp = 2472035493, tmp)+x)));
+  assertEquals(NaN, x %= ((tmp = -1753037412.885754, tmp)|((tmp = 2507058310, tmp)<<(1475945705))));
+  assertEquals(-1008981005, x |= ((tmp = -1140889842.6099494, tmp)-(tmp = -131908837, tmp)));
+  assertEquals(999230327.5872104, x -= (tmp = -2008211332.5872104, tmp));
+  assertEquals(975810, x >>= (((-1211913874)*x)>>>((-2842129009)>>(x&(tmp = -1410865834, tmp)))));
+  assertEquals(7623, x >>= ((tmp = -1051327071, tmp)-(((tmp = -237716102.8005445, tmp)|((2938903833.416546)&x))|(((-1831064579)^x)/((tmp = 2999232092, tmp)-(981996301.2875179))))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= (x|(tmp = -666201160.5810485, tmp)));
+  assertEquals(-1347124100, x |= (-1347124100));
+  assertEquals(-0, x %= (x&x));
+  assertEquals(-661607963, x ^= (tmp = -661607963.3794863, tmp));
+  assertEquals(3465, x >>>= (-828119020.8056595));
+  assertEquals(-268431991, x -= (((tmp = -1386256352, tmp)^((tmp = 743629575, tmp)%((x*((tmp = -1719517658, tmp)>>(2019516558)))<<((2637317661)|x))))<<(tmp = -51637065, tmp)));
+  assertEquals(1578876380, x += ((tmp = 1847308371, tmp)&(((((((tmp = 1487934776.1893163, tmp)%(tmp = 1423264469.3137975, tmp))|(((2653260792.5668964)/(-2417905016.043802))>>>(2097411118.4501896)))^x)^(((tmp = -71334226, tmp)|x)>>>(tmp = -2771758874.7696714, tmp)))^((tmp = -1464849031.3240793, tmp)%(tmp = 2349739690.6430283, tmp)))/x)));
+  assertEquals(3269293934, x += (1690417554));
+  assertEquals(4025392608.031957, x -= (((tmp = 268501120.7225704, tmp)<<(tmp = 2841620654.8903794, tmp))+((tmp = 1606704462.8455591, tmp)/((-2601879963)/(tmp = 2966620168.989736, tmp)))));
+  assertEquals(7, x >>>= (x^(-1913800035)));
+  assertEquals(1.4326776816275493e-8, x /= ((((tmp = -2703417892, tmp)/x)^((-2693772270.396241)>>>((x-(tmp = 615999818.5666655, tmp))>>((((2308121439.3702726)<<((-1794701502)>>(x+(tmp = -2253406035.972883, tmp))))<<((tmp = -197103799.0624652, tmp)|(629975898)))>>>x))))>>>((tmp = 2833656803, tmp)^(x^(tmp = -1580436025, tmp)))));
+  assertEquals(0, x >>>= (tmp = 1525372830.2126007, tmp));
+  assertEquals(0, x %= ((2354010949.24469)>>>(x<<x)));
+  assertEquals(0, x ^= (((1112335059.6922574)*(tmp = -1874363935, tmp))&(((((2154894295.8360596)<<x)&(tmp = -270736315.13505507, tmp))&x)>>>(-2205692260.552064))));
+  assertEquals(0, x >>>= (x<<((1488533932)*(tmp = 1707754286, tmp))));
+  assertEquals(0, x >>= (((tmp = 1232547376.463387, tmp)%((x>>(711691823.1608362))>>>x))>>(((895039781.7478573)*(((((-334946524)&x)*(tmp = -1214529640, tmp))^(tmp = -1586820245, tmp))*(1062595445)))+x)));
+  assertEquals(0, x *= (1863299863.2631998));
+  assertEquals(0, x /= (tmp = 1858428705.1330547, tmp));
+  assertEquals(0, x &= x);
+  assertEquals(611788028, x += (x^(611788028.1510412)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= ((tmp = -1617320707.1784317, tmp)-((-2139400380)-(-1402777976))));
+  assertEquals(0, x >>= (415866827.34665));
+  assertEquals(-1990811897, x -= (tmp = 1990811897, tmp));
+  assertEquals(-1990811895, x += ((x>>>(tmp = -2175453282.769696, tmp))&(tmp = -1459450498.7327478, tmp)));
+  assertEquals(-2377017935.149517, x += (-386206040.1495173));
+  assertEquals(1946129845, x |= (tmp = -2890956796.936539, tmp));
+  assertEquals(0, x %= x);
+  assertEquals(0, x <<= (1616188263));
+  assertEquals(-1081213596, x ^= (tmp = 3213753700, tmp));
+  assertEquals(3213753700, x >>>= (tmp = -3211181312, tmp));
+  assertEquals(-1081213596, x &= x);
+  assertEquals(-1081213583, x ^= (((tmp = 1599988273.4926577, tmp)>>((((-1061394954.6331315)^x)+((-1835761078)*x))+(x%(tmp = -696221869, tmp))))/((tmp = -1156966790.3436491, tmp)^x)));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x += (-1257400530.9263027));
+  assertEquals(NaN, x /= (753062089));
+  assertEquals(NaN, x *= ((tmp = 305418865.57012296, tmp)^(((-2797769706)+((((tmp = -33288276.988654375, tmp)%(tmp = 1242979846, tmp))|(-316574800))-((tmp = -1766083579.4203427, tmp)*(((x*(tmp = -2400342309.2349987, tmp))>>(tmp = 2632061795, tmp))^(tmp = -1001440809, tmp)))))^((((x-(tmp = -1469542637.6925495, tmp))-x)-(3184196890))%(((((((633226688)*((tmp = -2692547856, tmp)>>(((tmp = -1244311756, tmp)>>>x)+((1746013631.405202)>>>(941829464.1962085)))))%(x-x))+(995681795))-(tmp = -3047070551.3642616, tmp))/(1968259705))-((-2853237880)^(tmp = -2746628223.4540343, tmp)))))));
+  assertEquals(0, x >>= x);
+  assertEquals(0.5713172378854926, x += (((x+(((x+x)/(tmp = 2642822318, tmp))*(-2590095885.4280834)))|(tmp = -1769210836, tmp))/(tmp = -3096722308.8665104, tmp)));
+  assertEquals(-0.000002311097780334994, x /= ((2269858877.9010344)>>(-2992512915.984787)));
+  assertEquals(-0.000002311097780334994, x %= (-1139222821));
+  assertEquals(-0.000004622195560669988, x += x);
+  assertEquals(1, x /= x);
+  assertEquals(1, x >>>= (((3002169429.6061807)/(-3068577366))>>>((tmp = -1844537620, tmp)%((((tmp = 2087505119, tmp)>>>x)+x)&(2179989542)))));
+  assertEquals(-534213071, x *= (-534213071));
+  assertEquals(-534213077.3716287, x -= (((tmp = -2390432951.154034, tmp)^x)/(-290501980)));
+  assertEquals(1836305, x >>>= (x&x));
+  assertEquals(1836305, x %= ((x|((3070123855)^(49986396)))+((-1863644960.4202995)>>>((tmp = 1886126804.6019692, tmp)^x))));
+  assertEquals(28692, x >>>= ((2561362139.491764)>>(((((tmp = -1347469854.7413375, tmp)/(((x|(x+x))^((x^(tmp = -2737413775.4595394, tmp))^x))<<(((tmp = 225344844.07128417, tmp)&x)&(tmp = 145794498, tmp))))*x)<<(1424529187))/((-2924344715)/(tmp = -2125770148, tmp)))));
+  assertEquals(-2089419535.2717648, x += (-2089448227.2717648));
+  assertEquals(18957929, x ^= (tmp = 2186590872, tmp));
+  assertEquals(-708972800, x -= (727930729));
+  assertEquals(-4198593, x |= (799483455.1885371));
+  assertEquals(-1, x >>= (-2330654693.6413193));
+  assertEquals(-1, x |= (((tmp = -116877155, tmp)>>>((((tmp = -1677422314.1333556, tmp)/(tmp = -3108738499.0798397, tmp))%((x&(x/x))%((tmp = -695607185.1561592, tmp)-(tmp = 2302449181.622259, tmp))))^(((-1482743646.5604773)^((897705064)>>>x))-(tmp = -2933836669, tmp))))%(((tmp = -2991584625, tmp)|(((x>>x)+(-1101066835))-x))>>(-33192973.819939613))));
+  assertEquals(-1, x &= x);
+  assertEquals(-524288, x <<= (-1177513101.3087924));
+  assertEquals(1978770334.9189441, x += (tmp = 1979294622.9189441, tmp));
+  assertEquals(901783582, x &= ((-368584615)^(((((-478030699.2647903)<<x)<<x)+(tmp = 708725752, tmp))^((tmp = -3081556856, tmp)/(tmp = 1149958711.0676727, tmp)))));
+  assertEquals(-1480333211.8654308, x += (tmp = -2382116793.865431, tmp));
+  assertEquals(956930239.6783283, x *= ((tmp = 956930239.6783283, tmp)/x));
+  assertEquals(1277610.4668602513, x /= ((tmp = 1571029828, tmp)>>(tmp = 2417481141, tmp)));
+  assertEquals(-1077333228, x ^= (tmp = 3218755006, tmp));
+  assertEquals(-50218, x |= (tmp = -1044436526.6435988, tmp));
+  assertEquals(-1, x >>= (-154655245.18921852));
+  assertEquals(0.00006276207290978003, x *= (((tmp = 2234286992.9800305, tmp)>>(tmp = 2132564046.0696363, tmp))/((((tmp = -2565534644.3428087, tmp)>>>(tmp = 2622809851.043325, tmp))>>>((tmp = 311277386, tmp)&x))-(tmp = -2003980974, tmp))));
+  assertEquals(0, x %= x);
+  assertEquals(1282114076, x += ((((422838227)>>>((tmp = 1024613366.1899053, tmp)-((368275340)<<(((tmp = -3066121318, tmp)+(-2319101378))&x))))^(x>>(tmp = 1920136319.803412, tmp)))^(1282264803.3968434)));
+  assertEquals(-277097604, x |= (-283585688.9123297));
+  assertEquals(553816692, x &= (x&(tmp = 554082036.676608, tmp)));
+  assertEquals(658505728, x <<= x);
+  assertEquals(658505728, x &= (x%(2846071230)));
+  assertEquals(39, x >>= (334728536.5172192));
+  assertEquals(0, x -= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x &= (tmp = -335285336, tmp));
+  assertEquals(0, x <<= (tmp = 1255594828.3430014, tmp));
+  assertEquals(0, x %= (-630772751.1248167));
+  assertEquals(NaN, x /= ((((x&(tmp = -1576090612, tmp))%x)>>>x)*((-1038073094.2787619)>>>x)));
+  assertEquals(NaN, x += x);
+  assertEquals(NaN, x -= (((tmp = -2663887803, tmp)&((x+(-1402421046))/x))/(-2675654483)));
+  assertEquals(NaN, x %= (x&(tmp = 672002093, tmp)));
+  assertEquals(0, x |= x);
+  assertEquals(-2698925754, x += (tmp = -2698925754, tmp));
+  assertEquals(-2057748993, x += ((tmp = -2263466497, tmp)^x));
+  assertEquals(1, x /= x);
+  assertEquals(-2769559719.4045835, x -= (2769559720.4045835));
+  assertEquals(-1.3964174646069973, x /= (tmp = 1983332198, tmp));
+  assertEquals(-2140716624.3964174, x += (tmp = -2140716623, tmp));
+  assertEquals(0, x <<= ((2589073007)-(-816764911.8571186)));
+  assertEquals(-2837097288.161354, x -= (tmp = 2837097288.161354, tmp));
+  assertEquals(-1445059927.161354, x += (tmp = 1392037361, tmp));
+  assertEquals(155197984, x &= (tmp = -2694712730.924674, tmp));
+  assertEquals(155197984, x |= (x>>>(tmp = 69118015.20305443, tmp)));
+  assertEquals(155197984, x >>>= (((x^(-1353660241))*x)<<(((((x%(tmp = -1905584634, tmp))>>>(tmp = -860171244.5963638, tmp))&(-1084415001.7039547))+(x-(((tmp = 298064661, tmp)>>x)>>((tmp = 378629912.383446, tmp)-(x%x)))))+(((3212580683)/(((((x^x)>>(tmp = -1502887218, tmp))<<x)%(-142779025))|(((tmp = 1361745708, tmp)*(((((tmp = 1797072528.0673332, tmp)+x)%(tmp = 167297609, tmp))%(-287345856.1791787))^(((((((x*(tmp = -640510459.1514752, tmp))<<(x^(tmp = 1387982082.5646644, tmp)))>>(tmp = 2473373497.467914, tmp))^((234025940)*x))+(tmp = 520098202.9546956, tmp))*(x*(tmp = -362929250.1775775, tmp)))^(-2379972900))))*(tmp = -1385817972, tmp))))+(-1788631834)))));
+  assertEquals(0, x >>= ((tmp = -18671049, tmp)/((tmp = 651261550.6716013, tmp)>>(-58105114.70740628))));
+  assertEquals(0, x *= ((((x>>(tmp = 2256492150.737681, tmp))<<(x<<(((-2738910707)&x)<<(1892428322))))*(tmp = 1547934638, tmp))>>((((319464033.7888391)|(((((tmp = 2705641070, tmp)<<((tmp = 1566904759.36666, tmp)*((-682175559.7540412)&(-691692016.3021002))))%(tmp = 1118101737, tmp))|(902774462))<<x))^((tmp = -388997180, tmp)<<(x<<((((((-88462733)+(x>>>x))%x)*(tmp = -20297481.556210756, tmp))>>>(1927423855.1719701))-((2047811185.6278129)-(tmp = 2952219346.72126, tmp))))))|(-1685518403.7513878))));
+  assertEquals(0, x /= (tmp = 1858074757.563318, tmp));
+  assertEquals(-1351623058, x ^= (-1351623058.4756806));
+  assertEquals(1, x /= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x -= (x&(997878144.9798675)));
+  assertEquals(-0, x /= (-2769731277));
+  assertEquals(0, x >>>= ((-2598508325)>>(-1355571351)));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x -= (x&(tmp = 1672810223, tmp)));
+  assertEquals(-924449908.1999881, x -= (924449908.1999881));
+  assertEquals(-0, x %= x);
+  assertEquals(-0, x /= (tmp = 2007131382.059545, tmp));
+  assertEquals(-0, x += x);
+  assertEquals(225132064, x += ((((tmp = -2422670578.1260514, tmp)|x)+x)^(1660142894.7066057)));
+  assertEquals(Infinity, x /= (x-x));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= x);
+  assertEquals(-2455424946.732606, x -= (2455424946.732606));
+  assertEquals(1208029258, x &= ((tmp = 1823728509, tmp)+x));
+  assertEquals(1.3682499724725645, x /= ((((tmp = 1267938464.3854322, tmp)%((tmp = 2510853574, tmp)+(((2979355693.866435)-(tmp = 1989726095.7746763, tmp))<<x)))%((-1382092141.1627176)+(((-901799353)+((-2936414080.8254457)>>>(2515004943.0865674)))-(2532799222.353197))))<<(tmp = -2168058960.2694826, tmp)));
+  assertEquals(0.13799826710735907, x %= ((-1090423235)/(tmp = 2659024727, tmp)));
+  assertEquals(0, x >>= (1688542889.082693));
+  assertEquals(0, x <<= x);
+  assertEquals(NaN, x %= ((((tmp = 1461037539, tmp)<<((x<<(tmp = 2101282906.5302017, tmp))>>(-2792197742)))%(((x%x)^(((tmp = 1399565526, tmp)^(tmp = 643902, tmp))-((tmp = -1449543738, tmp)|x)))/x))*(x<<(471967867))));
+  assertEquals(0, x &= ((tmp = -2121748100.6824129, tmp)>>(tmp = -2817271480.6497793, tmp)));
+  assertEquals(0, x &= (3169130964.6291866));
+  assertEquals(-0, x /= (-2303316806));
+  assertEquals(0, x <<= (tmp = 120185946.51617038, tmp));
+  assertEquals(449448375, x ^= ((((tmp = -836410266.014014, tmp)/x)&((x>>>(tmp = -2602671283, tmp))+x))+(tmp = 449448375, tmp)));
+  assertEquals(202003841790140640, x *= x);
+  assertEquals(202003840800829020, x += (((tmp = -1339865843, tmp)+(tmp = 350554234.15375435, tmp))<<((((((tmp = -1798499687.8208885, tmp)>>(((x-(x^x))|((tmp = 463627396.23932934, tmp)/(2714928060)))&(tmp = 3048222568.1103754, tmp)))&(-3127578553))<<(tmp = -2569797028.8299003, tmp))&x)<<((tmp = 2104393646, tmp)/((tmp = 2314471015.742891, tmp)<<((2704090554.1746845)>>(((tmp = 1935999696, tmp)*(((1348554815)>>>x)>>>(146665093.82445252)))%x)))))));
+  assertEquals(202003841764125400, x -= (tmp = -963296372.2846234, tmp));
+  assertEquals(-413485056, x <<= (tmp = -2474480506.6054573, tmp));
+  assertEquals(-3171894580.186845, x += ((tmp = -1261111102, tmp)+(tmp = -1497298422.1868448, tmp)));
+  assertEquals(17136, x >>= (tmp = 3055058160, tmp));
+  assertEquals(17136, x %= (tmp = 1706784063.3577294, tmp));
+  assertEquals(17136, x >>= ((tmp = 2161213808, tmp)*x));
+  assertEquals(-17136, x /= ((((tmp = -1492618154, tmp)>>x)|(1381949066))>>(tmp = 2014457960, tmp)));
+  assertEquals(-34272, x += x);
+  assertEquals(-1498690902, x += (-1498656630));
+  assertEquals(-1168674482, x ^= (486325220));
+  assertEquals(-1168674482, x <<= ((x^x)*x));
+  assertEquals(794521557347068000, x *= (-679848469));
+  assertEquals(1.3330392590424505e+26, x *= (tmp = 167778866, tmp));
+  assertEquals(0, x <<= (tmp = -2501540637.3664584, tmp));
+  assertEquals(0, x >>>= (x-(x*(-890638026.1825848))));
+  assertEquals(0, x %= ((-285010538.2813468)&(1314684460.7634423)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x *= x);
+  assertEquals(NaN, x %= (x*(x<<x)));
+  assertEquals(NaN, x %= (x<<(((tmp = -1763171810.601149, tmp)&(-138151449.18303752))^(x|x))));
+  assertEquals(0, x |= (x>>x));
+  assertEquals(0, x &= (tmp = 1107152048, tmp));
+  assertEquals(0, x >>= (1489117056.8200984));
+  assertEquals(518749976, x ^= (518749976.20107937));
+  assertEquals(356718654, x += (tmp = -162031322, tmp));
+  assertEquals(356718654, x %= (((x>>>((tmp = -373747439.09634733, tmp)*(tmp = 563665566, tmp)))*(tmp = 2853322586.588251, tmp))*((1303537213)%(-2995314284))));
+  assertEquals(5573728, x >>= (tmp = -2095997978, tmp));
+  assertEquals(5573728, x <<= x);
+  assertEquals(5573728, x >>= (((((tmp = 1745399178.334154, tmp)<<(tmp = 2647999783.8219824, tmp))^(tmp = 1571286759, tmp))%x)/(2166250345.181711)));
+  assertEquals(10886, x >>>= ((682837289)+(x*x)));
+  assertEquals(170, x >>>= x);
+  assertEquals(169.95167497151652, x -= (((tmp = 527356024.19706845, tmp)+((tmp = 1263164619.2954736, tmp)|(tmp = 2942471886, tmp)))/((3017909419.131321)+(tmp = 2137746252.8006272, tmp))));
+  assertEquals(-1915170061, x ^= (tmp = -1915170214, tmp));
+  assertEquals(206045792, x &= (((tmp = 887031922, tmp)>>>x)-((-1861922770)|(9633541))));
+  assertEquals(-1940321674, x |= (tmp = -2012149162.1817405, tmp));
+  assertEquals(-1940321674, x &= x);
+  assertEquals(1128412272.160699, x += (tmp = 3068733946.160699, tmp));
+  assertEquals(0.47486363523180236, x /= (tmp = 2376286976.807289, tmp));
+  assertEquals(-1.4931079540252477e-10, x /= (tmp = -3180370407.5892467, tmp));
+  assertEquals(0, x |= (((1220765170.5933602)*(884017786))*((x%(tmp = -2538196897.226384, tmp))<<(x^x))));
+  assertEquals(-525529894, x += (tmp = -525529894, tmp));
+  assertEquals(1621426184, x &= ((3046517714)*(((((-162481040.8033898)+(x/((x&(1489724492))/((x|(tmp = 943542303, tmp))>>>((-1840491388.1365871)<<(2338177232))))))+(((-2268887573.2430763)>>>(((tmp = 2919141667, tmp)+((tmp = 1326295559.692003, tmp)<<(-2256653815)))>>>(((((tmp = 1602731976.7514615, tmp)*(856036244.3730336))^x)>>>((((2846316421.252943)&(915324162))%(tmp = 1144577211.0221815, tmp))%x))*(x*x))))%(tmp = -2641416560, tmp)))*(x+(x>>>x)))>>x)));
+  assertEquals(1621426184, x %= (tmp = 1898223948, tmp));
+  assertEquals(-3.383396676504762, x /= ((tmp = 2211088034.5234556, tmp)^x));
+  assertEquals(7120923705.122882, x *= (((((tmp = 2632382342.914504, tmp)/(-615440284.1762738))&(2162453853.6658797))<<(-849038082.5298986))|(tmp = -2104667110.5603983, tmp)));
+  assertEquals(-1469010887, x &= x);
+  assertEquals(850767635866964700, x *= (tmp = -579143179.5338116, tmp));
+  assertEquals(0, x %= x);
+  assertEquals(-571457, x |= ((2849326490.8464212)|(tmp = 1450592063, tmp)));
+  assertEquals(-571457, x &= x);
+  assertEquals(-0.00018638416434019244, x /= (3066016912.021368));
+  assertEquals(0, x <<= (2058262829));
+  assertEquals(NaN, x %= ((x|((x%x)>>>x))%((tmp = -2970314895.6974382, tmp)+x)));
+  assertEquals(NaN, x *= (-698693934.9483855));
+  assertEquals(NaN, x += (-100150720.64391875));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x -= (-530301478));
+  assertEquals(NaN, x /= (1507673244));
+  assertEquals(0, x <<= (x%(tmp = 2977838420.857235, tmp)));
+  assertEquals(0, x <<= (tmp = 3200877763, tmp));
+  assertEquals(0, x <<= (tmp = -2592127060, tmp));
+  assertEquals(NaN, x -= (((((((1930632619)*(3018666359))<<((tmp = 2676511886, tmp)&(-2786714482.25468)))%x)-(-633193192))<<((tmp = 403293598, tmp)*(-2765170226)))%x));
+  assertEquals(530062092, x |= (tmp = 530062092, tmp));
+  assertEquals(129409, x >>>= x);
+  assertEquals(-152430382316341.78, x *= (-1177896300.229055));
+  assertEquals(-304860764632683.56, x += x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= (tmp = -63071565.367660046, tmp));
+  assertEquals(0, x &= ((((tmp = -1007464338, tmp)<<(x<<((x^(tmp = -726826835, tmp))|x)))>>>x)*(((tmp = 469293335.9161849, tmp)<<(((((tmp = 1035077379, tmp)*(tmp = -555174353.7567515, tmp))&(3109222796.8286266))-(((((x-(tmp = 1128900353.6650414, tmp))|(tmp = 3119921303, tmp))&((-1353827690)&(x%((-924615958)&x))))>>>x)+(tmp = 1167787910, tmp)))+x))%((605363594)>>(1784370958.269381)))));
+  assertEquals(0, x %= (2953812835.9781704));
+  assertEquals(0, x -= x);
+  assertEquals(0, x <<= x);
+  assertEquals(-901209266, x += (-901209266));
+  assertEquals(-901209266, x &= x);
+  assertEquals(404, x >>>= (-3195686249));
+  assertEquals(824237108, x ^= (824237472));
+  assertEquals(497790936.1853996, x /= ((tmp = 1253776028, tmp)/(757207285)));
+  assertEquals(497790936, x >>>= ((tmp = -2212598336, tmp)<<(x^(1335355792.9363852))));
+  assertEquals(0, x %= x);
+  assertEquals(-2659887352.6415873, x += (tmp = -2659887352.6415873, tmp));
+  assertEquals(1635079945, x |= ((x&(1234659380))>>((((tmp = 2694276886.979136, tmp)|x)^((tmp = 132795582, tmp)<<((-1089828902)>>>x)))<<((((tmp = -2098728613.0310376, tmp)<<(x/(tmp = -2253865599, tmp)))*((x+(x>>>((48633053.82579231)-(385301592))))*(tmp = -1847454853.333535, tmp)))/((-540428068.8583717)+x)))));
+  assertEquals(1, x /= x);
+  assertEquals(33554432, x <<= ((((2803140769)<<x)|(tmp = -1965793804, tmp))>>>(tmp = -2273336965.575082, tmp)));
+  assertEquals(67108864, x += x);
+  assertEquals(9007199254740992, x *= (x+((x>>x)%(2674760854))));
+  assertEquals(55369784, x %= (x|(-170725544.20038843)));
+  assertEquals(55369784, x %= (-1186186787));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= x);
+  assertEquals(NaN, x /= ((-2968110098)-((x/(x|(((((x|((x&((-130329882)>>>(((-135670650)|(x<<(tmp = 1280371822, tmp)))^x)))-(-1183024707.2230911)))&(-1072829280))>>>(-340696948.41492534))>>>(tmp = 436308526.4938295, tmp))<<(((tmp = 3113787500, tmp)*((2038309320)>>>(-1818917055)))&((2808000707)/(774731251))))))%x)));
+  assertEquals(0, x |= (x*(tmp = -843074864, tmp)));
+  assertEquals(0, x &= (tmp = -752261173.8090212, tmp));
+  assertEquals(0, x >>>= (tmp = 1532349931.7517128, tmp));
+  assertEquals(0, x <<= ((tmp = -8628768, tmp)-((((tmp = 225928543, tmp)%(x>>>(x+x)))^((tmp = -2051536806.5249376, tmp)-x))-((tmp = -2274310376.9964137, tmp)%(tmp = 2251342739, tmp)))));
+  assertEquals(0, x >>= (1011388449));
+  assertEquals(0, x += x);
+  assertEquals(0, x >>>= x);
+  assertEquals(-0, x *= ((-1781234179.8663826)>>(((1514201119.9761915)>>(((((1174857164.90042)^(tmp = 1124973934, tmp))^x)+((-1059246013.8834443)<<(2997611138.4876065)))%(((798188010)*(-1428293122))>>>(tmp = -3087267036.8035297, tmp))))<<x)));
+  assertEquals(1752554372, x ^= (tmp = -2542412924, tmp));
+  assertEquals(1752554372, x %= (tmp = 3037553410.2298307, tmp));
+  assertEquals(1859383977, x -= (x^(2446603103)));
+  assertEquals(1183048193, x &= ((tmp = -962336957, tmp)/(x/x)));
+  assertEquals(67738157, x %= ((((tmp = -1813911745.5223546, tmp)+x)<<(x-(((-1980179168)^x)|x)))|(1913769561.1308007)));
+  assertEquals(67698724, x &= ((1801574998.3142045)*((tmp = -2057492249, tmp)/((1713854494.72282)>>x))));
+  assertEquals(0, x -= x);
+  assertEquals(-25232836, x -= ((tmp = 25232836, tmp)|x));
+  assertEquals(-49, x >>= (x+((tmp = 2201204630.2897243, tmp)|(-1929326509))));
+  assertEquals(-1605632, x <<= x);
+  assertEquals(-165965313, x += (tmp = -164359681, tmp));
+  assertEquals(9.220413724941365e-10, x /= (((((tmp = 2579760013.0808706, tmp)*(tmp = -2535370639.9805303, tmp))>>((tmp = 2138199747.0301933, tmp)-(tmp = -2698019325.0972376, tmp)))*(tmp = -425284716, tmp))/((-1951538149.6611228)/(x^(2632919130)))));
+  assertEquals(0, x &= x);
+  assertEquals(0, x &= ((-645189137)/(tmp = 800952748, tmp)));
+  assertEquals(0, x &= (tmp = -1773606925, tmp));
+  assertEquals(0, x += x);
+  assertEquals(0, x >>>= (tmp = 211399355.0741787, tmp));
+  assertEquals(0, x <<= ((-1317040231.5737965)/((((((tmp = 838897586.0147077, tmp)|((-1902447594)|(tmp = 404942728.83034873, tmp)))^(2462760692.2907705))%((((((x%(tmp = -2888980287, tmp))<<(-368505224.49609876))-((x>>>(532513369))&(((((((tmp = -1298067543, tmp)^(tmp = -3130435881.100909, tmp))>>x)/(tmp = -3041161992, tmp))>>(x|(-431685991.95776653)))^((tmp = 1031777777, tmp)^((-105610810)>>>((-631433779)>>(tmp = -2577780871.167671, tmp)))))%(tmp = -3170517650.088039, tmp))))-(((tmp = 2175146237.968785, tmp)-((384631158.50508535)>>((893912279.4646157)|(tmp = -1478803924.5338967, tmp))))%(x/(-1089156420))))<<(tmp = -2024709456, tmp))>>x))*(tmp = -1423824994.6993582, tmp))%(tmp = 1739143409, tmp))));
+  assertEquals(-1799353648, x |= ((-1799353648.3589036)>>>((((x&(-923571640.1012449))%x)+((tmp = 971885508, tmp)>>((tmp = -2207464428.2123804, tmp)+(-3108177894.0459776))))-(-2048954486.7014258))));
+  assertEquals(-3666808032.2958965, x -= (tmp = 1867454384.2958965, tmp));
+  assertEquals(-260069478915415100, x *= (tmp = 70925305.23136711, tmp));
+  assertEquals(1142096768, x &= (tmp = 1866401706.9144325, tmp));
+  assertEquals(1, x >>>= (tmp = 2701377150.5717473, tmp));
+  assertEquals(1865946805, x |= (tmp = -2429020492, tmp));
+  assertEquals(1424222287, x ^= ((((tmp = 433781338, tmp)>>(x>>>((-2914418422.4829016)/(tmp = 1600920669, tmp))))|(tmp = 588320482.9566053, tmp))>>>((((((x+(tmp = -2556387365.5071325, tmp))+(tmp = -2381889946.1830974, tmp))/(3154278191))>>>(-1069701268.8022757))>>(((tmp = 182049089.28866422, tmp)>>x)>>>(tmp = -447146173, tmp)))/(x-(2103883357.0929923)))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x -= (x%(3036884806)));
+  assertEquals(0, x >>>= (tmp = -652793480.3870945, tmp));
+  assertEquals(0, x += x);
+  assertEquals(304031003, x ^= ((tmp = -900156495, tmp)^(-666397014.0711515)));
+  assertEquals(1, x /= x);
+  assertEquals(-1974501681, x |= (x^(-1974501681.4628205)));
+  assertEquals(-1.3089278317616264, x /= (((-1723703186.962839)>>>x)|((2061022161.6239533)<<x)));
+  assertEquals(-1, x |= (tmp = -1987006457, tmp));
+  assertEquals(-0.14285714285714285, x /= ((((((x|(-1767793799.7595732))-(-1391656680))<<x)|(x>>(tmp = -2301588485.2811003, tmp)))>>>(((tmp = 1812723993, tmp)>>>((x^(((tmp = -3154100157.951021, tmp)%((tmp = -1254955564.4553523, tmp)-(((x>>>(((-1762886343)*x)*x))*(x^(x*(-750918563.4387553))))*x)))|((x>>x)>>(x<<((((-1766797454.5634143)^(tmp = -2251474340, tmp))-(-787637516.5276759))<<((1390653368)^(-1937605249.245374)))))))|(((tmp = 1156611894, tmp)<<x)<<(x>>((((x+(tmp = 2170166060.881797, tmp))&(x>>>(tmp = -1749295923.1498983, tmp)))>>(((-1014973878)|x)&(1302866805.684057)))*(tmp = 560439074.4002491, tmp))))))|(-2758270803.4510045)))&x));
+  assertEquals(0, x |= x);
+  assertEquals(0, x += ((x>>((x+(tmp = -2776680860.870219, tmp))-(((688502468)<<(((tmp = 475364260.57888806, tmp)<<x)+(329071671)))/(-1097134948))))*(tmp = -1281834214.3416953, tmp)));
+  assertEquals(0, x *= ((((1159762330)<<(tmp = -1892429200, tmp))%x)<<x));
+  assertEquals(0, x >>>= (-770595225));
+  assertEquals(NaN, x += (((x>>x)/(tmp = 281621135, tmp))/x));
+  assertEquals(0, x >>= (1363890241));
+  assertEquals(1639023942.9945002, x += (1639023942.9945002));
+  assertEquals(-2568590958567747000, x *= (-1567146697));
+  assertEquals(1793554700, x ^= (tmp = 3215813388.405799, tmp));
+  assertEquals(437879, x >>= x);
+  assertEquals(1339485943, x |= (1339220210));
+  assertEquals(1, x /= x);
+  assertEquals(512, x <<= (2509226729.1477118));
+  assertEquals(512, x <<= ((x>>(1326274040.7181284))<<(tmp = -760670199, tmp)));
+  assertEquals(1, x /= (x<<(x^x)));
+  assertEquals(0, x >>>= (((((1382512625.8298302)&(x>>>x))*(tmp = -815316595, tmp))>>>x)-(-95538051)));
+  assertEquals(-544344229.3548596, x -= (tmp = 544344229.3548596, tmp));
+  assertEquals(-1088688458.7097192, x += x);
+  assertEquals(-1022850479579041900, x *= (939525418.3104812));
+  assertEquals(2069622661, x |= (-2632744187.7721186));
+  assertEquals(-1353480538017756400, x -= ((tmp = 1308085980, tmp)*((x>>>(-629663391.5165792))&(tmp = 3182319856.674114, tmp))));
+  assertEquals(1.3702811563654176e+27, x *= ((((3061414617.6321163)/(tmp = 2628865442, tmp))+(-1549548261))+(x&((tmp = 809684398, tmp)|(x^(tmp = 801765002, tmp))))));
+  assertEquals(0, x >>>= ((-2988504159)&((tmp = -260444190.02252054, tmp)^(2178729442.260293))));
+  assertEquals(-1518607002, x -= (tmp = 1518607002, tmp));
+  assertEquals(724566016, x <<= (tmp = 1042915731.7055794, tmp));
+  assertEquals(707584, x >>>= (-208959862.93305588));
+  assertEquals(0, x >>>= (((tmp = 877181764, tmp)>>(-970697753.3318911))%x));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x /= (x^((x/(-2903618412.4936123))+(tmp = 1169288899, tmp))));
+  assertEquals(0, x >>>= x);
+  assertEquals(-1302645245, x ^= ((1855892732.3544865)+(tmp = 1136429319.5633948, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= (-1384534597.409375));
+  assertEquals(-0, x /= (tmp = -680466419.8289509, tmp));
+  assertEquals(-0, x *= (318728599.95017374));
+  assertEquals(NaN, x %= (x>>(2019695267)));
+  assertEquals(0, x >>= (tmp = 1280789995, tmp));
+  assertEquals(0, x *= (tmp = 2336951458, tmp));
+  assertEquals(0, x >>= ((2981466013.758637)%(731947033)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x /= ((((3068070149.1452317)>>x)%(((1448965452)*((tmp = -2961594129, tmp)+(1829082104.0681171)))>>(-2331499703)))>>>(tmp = -3206314941.2626476, tmp)));
+  assertEquals(0, x >>= (x%(1869217101.9823673)));
+  assertEquals(0, x <<= (x+x));
+  assertEquals(0, x >>>= ((1202130282)>>>x));
+  assertEquals(0, x += x);
+  assertEquals(2603245248.6273212, x += (tmp = 2603245248.6273212, tmp));
+  assertEquals(-1691864471, x ^= (x>>>(2504513614.117516)));
+  assertEquals(136835305, x -= ((-1618979896)&(-746953306)));
+  assertEquals(-2568499564.1261334, x += (tmp = -2705334869.1261334, tmp));
+  assertEquals(1038075700, x ^= (1530399136));
+  assertEquals(2076151400, x += x);
+  assertEquals(-524018410.1751909, x -= ((2398973627.175191)-(-201196183)));
+  assertEquals(0.327110599608614, x /= ((3181340288.602796)&x));
+  assertEquals(0.327110599608614, x %= (tmp = -2284484060, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(403217947.5779772, x += (tmp = 403217947.5779772, tmp));
+  assertEquals(403217947, x |= x);
+  assertEquals(-Infinity, x *= ((58693583.845808744)+(((tmp = -1527787016, tmp)*x)/((((2532689893.3191843)/(tmp = 2781746479.850424, tmp))|(((((460850355.9211761)/((((tmp = 626683450, tmp)<<((tmp = 1349974710, tmp)-((tmp = -1349602292, tmp)/(-2199808871.1229663))))>>((x/(-3092436372.3078623))&(tmp = -1190631012.0323825, tmp)))^((-2907082828.4552956)-(tmp = 1858683340.1157017, tmp))))^(-1513755598.5398848))%x)/x))&(1147739260.136806)))));
+  assertEquals(0, x &= (tmp = -3047356844.109563, tmp));
+  assertEquals(637934616, x -= (tmp = -637934616, tmp));
+  assertEquals(-1553350083, x ^= (-2056266203.094929));
+  assertEquals(-0.13467351026547192, x %= ((tmp = 824736251, tmp)/(2544186314)));
+  assertEquals(1, x /= x);
+  assertEquals(1, x |= x);
+  assertEquals(0, x >>>= (2166609431.9515543));
+  assertEquals(0, x <<= (x|(tmp = 121899222.14603412, tmp)));
+  assertEquals(0, x *= (1300447849.6595674));
+  assertEquals(0, x %= (tmp = -2360500865.3944597, tmp));
+  assertEquals(0, x %= (tmp = -1693401247, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x /= (471265307));
+  assertEquals(257349748, x ^= (257349748.689448));
+  assertEquals(257349748, x &= x);
+  assertEquals(981, x >>>= (tmp = -1959001422, tmp));
+  assertEquals(0, x >>= ((-79932778.18114972)/x));
+  assertEquals(0, x <<= (((-2599621472)^(tmp = 662071103, tmp))%(tmp = -2675822640.7641535, tmp)));
+  assertEquals(0, x &= (tmp = 2582354953.878623, tmp));
+  assertEquals(0, x /= ((-953254484)/((-2571632163.376176)-(tmp = -342034471, tmp))));
+  assertEquals(0, x <<= ((x-(tmp = -3013057672, tmp))&(tmp = -3204761036, tmp)));
+  assertEquals(0, x ^= ((x&((515934453)>>>x))/x));
+  assertEquals(1, x |= ((-1914707646.2075093)>>>(tmp = -1918045025, tmp)));
+  assertEquals(-2002844120.8792589, x += (tmp = -2002844121.8792589, tmp));
+  assertEquals(573030794, x >>>= (tmp = 1707788162, tmp));
+  assertEquals(1.917619109627369, x /= ((1909436830.484202)%((123114323)<<(tmp = -1288988388.6444468, tmp))));
+  assertEquals(-1400358045, x |= (-1400358046));
+  assertEquals(-2043022529.4273133, x += (tmp = -642664484.4273133, tmp));
+  assertEquals(-81408068.86728716, x %= (tmp = -980807230.2800131, tmp));
+  assertEquals(0.1436896445024992, x /= (((tmp = 3201789924.913518, tmp)%(tmp = -962242528.6008646, tmp))^((tmp = -338830119.55884504, tmp)*(tmp = -916120166, tmp))));
+  assertEquals(0.1436896445024992, x %= (tmp = 2598469263, tmp));
+  assertEquals(0, x *= (x-x));
+  assertEquals(-1409286144, x += (((-111514798.64745283)|(2372059654))<<(tmp = 175644313, tmp)));
+  assertEquals(-2393905467.0073113, x += (-984619323.0073113));
+  assertEquals(-835111172.0073113, x %= (x^(-765900532.5585573)));
+  assertEquals(-835111172.0073113, x %= (tmp = -946478116, tmp));
+  assertEquals(-100, x >>= ((-1020515908)>>(((x&((x^(169474253.53811646))>>(-221739002)))+x)*((201939882.92880356)/(tmp = -50402570, tmp)))));
+  assertEquals(2131506964, x &= (tmp = -2163460268, tmp));
+  assertEquals(1074275840, x &= ((-1561930379.8719592)*(tmp = -2871750052.876917, tmp)));
+  assertEquals(-954232605.5377102, x -= (tmp = 2028508445.5377102, tmp));
+  assertEquals(-29, x >>= (-279577351.87217045));
+  assertEquals(-232, x <<= x);
+  assertEquals(-70, x |= (215185578));
+  assertEquals(-1, x >>= (x>>(-1691303095)));
+  assertEquals(1, x /= x);
+  assertEquals(3149465364.2236686, x *= (3149465364.2236686));
+  assertEquals(3304787832.3790073, x += (tmp = 155322468.15533853, tmp));
+  assertEquals(100068712.23500109, x %= (tmp = 3204719120.1440063, tmp));
+  assertEquals(91628864, x &= (tmp = 629090241, tmp));
+  assertEquals(-113202292046379710, x *= (-1235443583));
+  assertEquals(122, x >>>= (tmp = 3196555256, tmp));
+  assertEquals(122, x >>>= (((2226535734)-x)^(2248399036.393125)));
+  assertEquals(6.904199169070746e-8, x /= (tmp = 1767040564.9149356, tmp));
+  assertEquals(-212687449.99999994, x += ((((2244322375)*(((2515994102)^x)>>x))<<(x-(-832407685.3251972)))^(2266670502)));
+  assertEquals(366515938514778750, x *= (tmp = -1723260768.3940866, tmp));
+  assertEquals(366515938514778750, x += ((-1643386193.9159095)/(tmp = 425161225.95316494, tmp)));
+  assertEquals(654872716.4123061, x /= ((-1377382984)-(tmp = -1937058061.811642, tmp)));
+  assertEquals(654872716, x &= x);
+  assertEquals(-86260926.17813063, x -= (tmp = 741133642.1781306, tmp));
+  assertEquals(1052176592, x >>>= x);
+  assertEquals(2020882856, x ^= (-3107796616));
+  assertEquals(0, x <<= ((606939871.9812952)|(tmp = -3127138319.1557302, tmp)));
+  assertEquals(NaN, x -= ((x%((1120711400.2242608)%x))*(tmp = -930171286.7999947, tmp)));
+  assertEquals(NaN, x %= (3215044180));
+  assertEquals(NaN, x %= (tmp = 2882893804.20102, tmp));
+  assertEquals(NaN, x %= ((217170359.5778643)^x));
+  assertEquals(0, x &= ((-1095125960.9903677)>>(x^(-2227981276))));
+  assertEquals(-748549860, x += (-748549860));
+  assertEquals(1816208256, x <<= (-610872411.3826082));
+  assertEquals(201400576, x &= (((tmp = 1910394603.4836266, tmp)<<x)^x));
+  assertEquals(0, x %= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x <<= (((((2670901339.6696005)%(2180020861))*((2134469504)/(2237096063.0680027)))*((tmp = 1203829756, tmp)>>((765467065)+(x|(2673651811.9494815)))))<<((-1463378514)|(((x/(tmp = -1075050081, tmp))-((-879974865)+x))>>>(tmp = 2172883926, tmp)))));
+  assertEquals(433013198, x ^= (433013198.2833413));
+  assertEquals(0, x >>= ((((-2404431196)%(x%(tmp = 1443152875.8809233, tmp)))&(x|((1414364997.0517852)/((tmp = -435854369, tmp)+(tmp = 2737625141, tmp)))))|(((tmp = 2241746562.2197237, tmp)^(tmp = -1606928010.1992552, tmp))|((tmp = -3083227418.686173, tmp)>>(tmp = -2717460410, tmp)))));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x *= ((tmp = 2302521322, tmp)>>>(((((((tmp = 344089066.9725498, tmp)%(tmp = 1765830559, tmp))-x)|x)^(((-2450263325)/(tmp = 371928405.17475057, tmp))>>>(1330100413.7731652)))^(((173024329)%(tmp = -2927276187, tmp))+(x>>>(-1042229940.308507))))|(((((tmp = 379074096, tmp)+((142762508)-((-2773070834.526266)-(x&((tmp = 57957493, tmp)<<(2189553500))))))+((36991093)+(tmp = 339487168.58069587, tmp)))*(-1257565451))&(tmp = 645233114, tmp)))));
+  assertEquals(-2644503151.1185284, x += (-2644503151.1185284));
+  assertEquals(-5289006302.237057, x += x);
+  assertEquals(-4008773824.2370567, x -= (tmp = -1280232478, tmp));
+  assertEquals(1975449413, x |= ((tmp = 1957832005.4285066, tmp)>>((1681236712.9715524)&(-675823978))));
+  assertEquals(-146472960, x <<= (-648510672.5644083));
+  assertEquals(-3, x |= (((((x>>>(tmp = 2271744104, tmp))+(tmp = -210058133.30147195, tmp))+(tmp = -2827493425, tmp))/(tmp = 765962538, tmp))%(tmp = 1048631551, tmp)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= (1070524782.5154183));
+  assertEquals(0, x <<= (462502504));
+  assertEquals(0, x %= (540589670.0730014));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x /= ((-1268640098)%x));
+  assertEquals(NaN, x %= (1741157613.744652));
+  assertEquals(NaN, x += x);
+  assertEquals(NaN, x %= ((x|(tmp = 1992323492.7000637, tmp))*x));
+  assertEquals(NaN, x /= ((tmp = -2271503368.0341196, tmp)>>((tmp = 1224449194, tmp)>>>(tmp = 2976803997, tmp))));
+  assertEquals(NaN, x += (tmp = -1078313742.1633894, tmp));
+  assertEquals(NaN, x += (-787923311));
+  assertEquals(NaN, x %= x);
+  assertEquals(-1299878219, x ^= (2995089077));
+  assertEquals(536887953, x &= ((625660571.2651105)&(x^(((tmp = 950150725.2319129, tmp)+(-2122154205.466675))/(tmp = 1754964696.974752, tmp)))));
+  assertEquals(4096, x >>>= x);
+  assertEquals(1, x /= x);
+  assertEquals(-82508517, x ^= (((-930231800)%(tmp = -423861640.4356506, tmp))+x));
+  assertEquals(-82508517, x &= (x&x));
+  assertEquals(-479519, x %= ((tmp = 1861364600.595756, tmp)|x));
+  assertEquals(479518, x ^= (((x>>(-1539139751.6860313))>>(tmp = -456165734, tmp))|(-2786433531)));
+  assertEquals(959036, x += x);
+  assertEquals(29, x >>>= ((tmp = -1049329009.7632706, tmp)^(((((((1117739997)/(((-841179741.4939663)*(-1211599672))>>>((-413696355)%(tmp = -1753423217.2170188, tmp))))<<(tmp = 1599076219.09274, tmp))>>>(-1382960317))^(((x^(tmp = 515115394, tmp))>>>(tmp = -388476217, tmp))>>>(x/x)))^x)<<(136327532.213817))));
+  assertEquals(24, x &= (2388755418));
+  assertEquals(0, x >>>= (tmp = -405535917, tmp));
+  assertEquals(0, x &= (tmp = -1427139674, tmp));
+  assertEquals(NaN, x /= (x^((1530470340)%x)));
+  assertEquals(0, x |= ((x>>(-1429690909.8472774))*((((tmp = 2033516515, tmp)/(1314782862))>>>x)>>(tmp = 1737186497.6441216, tmp))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= (3115422786));
+  assertEquals(-0, x *= (x+(tmp = -2558930842.267017, tmp)));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= (2695531252.254449));
+  assertEquals(-613178182, x ^= (-613178182));
+  assertEquals(54, x >>>= (x%(((tmp = 2277868389, tmp)^((((tmp = -1143932265.3616111, tmp)^((x&((x-((-2100384445.7850044)|(tmp = 908075129.3456883, tmp)))*x))+(((tmp = 1031013284.0275401, tmp)*((((tmp = -233393205, tmp)>>>(tmp = -111859419, tmp))*(-1199307178))|(tmp = -1998399599, tmp)))>>>((((-731759641.9036775)>>>(tmp = 2147849691, tmp))>>>(tmp = -2121899736, tmp))>>>(x>>>x)))))>>((1900348757.360562)^(tmp = 2726336203.6149445, tmp)))>>>((x*((tmp = -2697628471.0234947, tmp)%((x^(tmp = -2751379613.9474974, tmp))*x)))+(x>>(tmp = 42868998.384643435, tmp)))))+(598988941))));
+  assertEquals(34, x &= ((tmp = 2736218794.4991407, tmp)%(2169273288.1339874)));
+  assertEquals(2.086197133417468, x /= ((tmp = 2176358852.297597, tmp)%x));
+  assertEquals(2, x <<= (((tmp = -1767330075, tmp)|(-3107230779.8512735))&x));
+  assertEquals(4194304, x <<= (tmp = 1061841749.105744, tmp));
+  assertEquals(48609515, x ^= (44415211.320786595));
+  assertEquals(48609515, x %= (1308576139));
+  assertEquals(23735, x >>>= ((-324667786)-x));
+  assertEquals(23735, x <<= ((-1270911229)<<(((((tmp = -882992909.2692418, tmp)+(tmp = 394833767.947718, tmp))-x)<<(702856751))/x)));
+  assertEquals(-31080872939240, x *= (tmp = -1309495384, tmp));
+  assertEquals(-14625.31935626114, x /= ((668084131)+(1457057357)));
+  assertEquals(-14625.31935626114, x %= (266351304.6585492));
+  assertEquals(-12577, x |= (-945583977.619837));
+  assertEquals(-4097, x |= ((tmp = -2621808583.2322493, tmp)-(tmp = -2219802863.9072213, tmp)));
+  assertEquals(-1004843865, x &= ((-1004839768)+((tmp = 2094772311, tmp)/(-1340720370.275643))));
+  assertEquals(-31401371, x >>= ((2035921047)>>>((tmp = -1756995278, tmp)>>>(-537713689))));
+  assertEquals(1791746374.016472, x -= ((tmp = -1823147745, tmp)-(x/(tmp = -1906333520, tmp))));
+  assertEquals(3.7289343120517406, x /= (tmp = 480498240, tmp));
+  assertEquals(7.457868624103481, x += x);
+  assertEquals(234881024, x <<= (-781128807.2532628));
+  assertEquals(67108864, x &= (tmp = -2060391332, tmp));
+  assertEquals(-605958718, x -= (673067582));
+  assertEquals(-605958718, x <<= ((x%x)&((tmp = 1350579401.0801518, tmp)|x)));
+  assertEquals(-109268090.4715271, x %= (tmp = -496690627.5284729, tmp));
+  assertEquals(-109268090, x <<= (((-2004197436.8023896)%((x|((tmp = 271117765.61283946, tmp)-((1595775845.0754795)*(555248692.2512416))))/x))<<x));
+  assertEquals(-652725370, x &= (-543590449));
+  assertEquals(0.321858133298825, x /= (tmp = -2027990914.2267523, tmp));
+  assertEquals(1959498446, x ^= (1959498446));
+  assertEquals(1959498446, x &= (x%(tmp = 3155552362.973523, tmp)));
+  assertEquals(14949, x >>>= ((tmp = 586618136, tmp)>>>(tmp = 699144121.9458897, tmp)));
+  assertEquals(-28611391568319.285, x *= (tmp = -1913933478.3811147, tmp));
+  assertEquals(1680557633, x &= (((tmp = 2606436319.199714, tmp)<<(1575299025.6917372))|((-1092689109)/(735420388))));
+  assertEquals(1680361024, x &= ((tmp = 1860756552.2186172, tmp)|(-360434860.1699109)));
+  assertEquals(820488, x >>>= (1788658731));
+  assertEquals(820488, x >>= (-1555444352));
+  assertEquals(2104296413, x ^= (2103543509));
+  assertEquals(16843328, x &= ((x<<((-2920883149)/(1299091676)))-(((((tmp = 3199460211, tmp)+(-237287821.61504316))&(tmp = -1524515028.3596857, tmp))-(tmp = -700644414.6785603, tmp))+(-180715428.86124516))));
+  assertEquals(1326969834, x |= (tmp = -2968063574.793867, tmp));
+  assertEquals(0, x %= (x>>>(tmp = 1350490461.0012388, tmp)));
+  assertEquals(0, x &= ((-2620439260.902854)+x));
+  assertEquals(-1775533561, x |= ((-1775533561)|(((x>>>((861896808.2264911)>>>(970216466.6532537)))%x)%(tmp = 2007357223.8893046, tmp))));
+  assertEquals(-1775533561, x &= x);
+  assertEquals(-23058877.415584415, x /= ((tmp = -3002439857, tmp)>>((((x-(tmp = 1583620685.137125, tmp))|x)%(-2568798248.6863875))^x)));
+  assertEquals(-577.4155844151974, x %= (((-1440361053.047877)+((tmp = 821546785.0910633, tmp)-(((tmp = 1023830881.1444875, tmp)/(-754884477))+(tmp = 651938896.6258571, tmp))))>>(tmp = 346467413.8959185, tmp)));
+  assertEquals(-1, x >>= (tmp = 2993867511, tmp));
+  assertEquals(-1, x |= (tmp = 823150253.4916545, tmp));
+  assertEquals(-0, x %= x);
+  assertEquals(-0, x /= ((tmp = 997969036, tmp)&((((tmp = 928480121, tmp)>>(((-2610875857.086055)>>>(tmp = -2251704283, tmp))|x))+(10781750))>>x)));
+  assertEquals(0, x >>>= ((tmp = -1872319523, tmp)>>>(-278173884)));
+  assertEquals(0, x |= (x/(x*x)));
+  assertEquals(0, x %= ((77912826.10575807)^(tmp = 2770214585.3019757, tmp)));
+  assertEquals(0, x &= (tmp = 722275824, tmp));
+  assertEquals(-1417226266, x |= (tmp = 2877741030.1195555, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= (tmp = -1740126105, tmp));
+  assertEquals(910709964, x |= (tmp = 910709964, tmp));
+  assertEquals(-1744830464, x <<= (tmp = -2445932551.1762686, tmp));
+  assertEquals(318767104, x >>>= (tmp = -2465332061.628887, tmp));
+  assertEquals(301989888, x &= (-2771167302.022801));
+  assertEquals(301989888, x |= x);
+  assertEquals(37748736, x >>= (tmp = -835820125, tmp));
+  assertEquals(1474977371, x ^= (tmp = -2857738661.6610327, tmp));
+  assertEquals(470467500, x += (-1004509871));
+  assertEquals(0.30466562575942585, x /= (((tmp = 1515955042, tmp)<<(x+((1607647367)-(tmp = 1427642709.697169, tmp))))^x));
+  assertEquals(1.0348231148499734e-10, x /= (tmp = 2944132397, tmp));
+  assertEquals(0, x >>= (x>>>(tmp = -2847037519.569043, tmp)));
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x >>>= (-1817784819.9058492));
+  assertEquals(0, x >>= x);
+  assertEquals(-0, x *= ((tmp = -1387748473, tmp)|(x+(352432111))));
+  assertEquals(-0, x *= (((-2591789329)/(tmp = -2144460203, tmp))>>(tmp = -568837912.5033123, tmp)));
+  assertEquals(0, x <<= (-2963600437.305708));
+  assertEquals(0, x &= ((588720662)>>>x));
+  assertEquals(1561910729, x += (1561910729));
+  assertEquals(0, x ^= x);
+  assertEquals(-0, x *= (-2722445702));
+  assertEquals(0, x &= (tmp = -2738643199.732308, tmp));
+  assertEquals(0, x /= (((1859901899.227291)>>>((tmp = -1067365693, tmp)+((-1975435278)|x)))|((1844023313.3719304)&(tmp = -624215417.0227654, tmp))));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x %= (-2852766277));
+  assertEquals(0, x <<= (-1482859558));
+  assertEquals(0, x >>= x);
+  assertEquals(-1196775786, x += (tmp = -1196775786, tmp));
+  assertEquals(-68176201, x |= ((tmp = 2336517643, tmp)+x));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>= (2969141362.868086));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x >>= ((x-((((tmp = -905994835, tmp)|(tmp = 2850569869.33876, tmp))<<((-2405056608.27147)>>(tmp = 1280271785, tmp)))&(-1942926558)))*(tmp = 707499803.177796, tmp)));
+  assertEquals(0, x &= ((-697565829.8780258)+((2978584888.549406)%x)));
+  assertEquals(0, x >>= (748642824.4181392));
+  assertEquals(0, x += x);
+  assertEquals(0, x >>>= (-1701028721));
+  assertEquals(92042539, x -= ((-92042539)|(x*(x%(-293705541.00228095)))));
+  assertEquals(0, x %= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x %= (-2278672472.458228));
+  assertEquals(0, x %= (((-2374117528.0359464)/((tmp = -2809986062, tmp)|(tmp = 895734980, tmp)))&(tmp = 1564711307.41494, tmp)));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x += x);
+  assertEquals(-0, x /= ((tmp = -2749286790.3666043, tmp)<<(x^(-2966741582.324482))));
+  assertEquals(0, x *= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(-1882562314, x ^= (2412404982.782115));
+  assertEquals(-806620, x %= (((tmp = 1527219936.5232096, tmp)*(-1139841417))>>>(tmp = 201632907.3236668, tmp)));
+  assertEquals(-1613240, x += x);
+  assertEquals(-1664766177387640, x *= (1031939561));
+  assertEquals(-9.478083550117849e+23, x *= (tmp = 569334221.1571662, tmp));
+  assertEquals(-8.462574598319509e+21, x /= ((x-(tmp = -2985531211.114498, tmp))>>(tmp = 174615992.91117632, tmp)));
+  assertEquals(1638924288, x <<= (((((x>>((-1823401733.4788911)+((tmp = 1362371590, tmp)>>>x)))^(tmp = -56634380, tmp))/(tmp = 2387980757.1540084, tmp))%((((tmp = -3175469977, tmp)^(tmp = -1816794042, tmp))+(232726694))*(tmp = 822706176, tmp)))/(tmp = 1466729893.836311, tmp)));
+  assertEquals(2686072821796307000, x *= x);
+  assertEquals(-1007977445.9812208, x /= (-2664814408.800125));
+  assertEquals(-1007977445, x &= x);
+  assertEquals(322314656346249100, x *= (tmp = -319763758.54942775, tmp));
+  assertEquals(197436885.26815608, x /= (tmp = 1632494637, tmp));
+  assertEquals(-67191339, x |= ((-399580815.1746769)/((1335558363)/(tmp = 224694526, tmp))));
+  assertEquals(1229588737, x &= (tmp = 1296763683.5732255, tmp));
+  assertEquals(1229588737, x -= ((((1171546503)|((tmp = -2701891308, tmp)%(-2155432197.022206)))/(-306122816.85682726))>>x));
+  assertEquals(4162606632, x -= (tmp = -2933017895, tmp));
+  assertEquals(1.6487311395551163, x /= (2524733434.1748486));
+  assertEquals(-1929308648.9913044, x += (-1929308650.6400356));
+  assertEquals(-3858617297.982609, x += x);
+  assertEquals(788529152, x <<= (x^(1401824663)));
+  assertEquals(6160384, x >>>= ((((((x>>>x)>>((((x*(tmp = -1958877151, tmp))>>>(1310891043))-(tmp = 564909413.9962088, tmp))%(-175978438)))%x)|((tmp = -1193552419.7837512, tmp)*(tmp = 1508330424.9068346, tmp)))|(1428324616.3303494))-((1828673751)/(tmp = 1281364779, tmp))));
+  assertEquals(6160384, x |= x);
+  assertEquals(1, x /= x);
+  assertEquals(1, x &= (tmp = -855689741, tmp));
+  assertEquals(0, x >>>= x);
+  assertEquals(-1088569655.3528988, x -= (tmp = 1088569655.3528988, tmp));
+  assertEquals(-1088569655, x >>= ((tmp = 2429646226.626727, tmp)<<((-1539293782.4487276)>>(x^((tmp = 1140855945.537702, tmp)+x)))));
+  assertEquals(-311, x %= ((x/x)<<x));
+  assertEquals(1.2007722007722008, x /= (x|(tmp = 448796341.87655175, tmp)));
+  assertEquals(3, x |= (x+x));
+  assertEquals(-9.32416092168023e-10, x /= (-3217447688));
+  assertEquals(0, x >>= (615837464.0921166));
+  assertEquals(0, x >>>= (tmp = -2993750670.683118, tmp));
+  assertEquals(0, x >>>= (x%x));
+  assertEquals(1610612736, x ^= ((-1322905256.6770213)<<(-2567950598)));
+  assertEquals(1693676493, x ^= (83063757.63660407));
+  assertEquals(-758030371, x ^= (tmp = -1239274480, tmp));
+  assertEquals(-758030371, x %= (tmp = 1961339006, tmp));
+  assertEquals(-1509754528, x ^= (tmp = 1960027837, tmp));
+  assertEquals(-1509754528, x <<= x);
+  assertEquals(-1509754528, x -= (((tmp = -50690205.33559728, tmp)/((tmp = -1364565380, tmp)<<(tmp = 2585052504, tmp)))<<(tmp = -2356889596, tmp)));
+  assertEquals(1, x >>>= (-3204164321));
+  assertEquals(1, x *= x);
+  assertEquals(1114370230.591965, x *= ((tmp = 1114370229.591965, tmp)+x));
+  assertEquals(-4.886305275432552, x /= ((-228059887.33344483)%(2841553631.3685856)));
+  assertEquals(2.358309397373389e-9, x /= (((x*(tmp = 203428818.08174622, tmp))&(x-(((510438355)*x)+x)))+x));
+  assertEquals(0, x >>>= ((tmp = 1444810010, tmp)&(tmp = -3135701995.2235208, tmp)));
+  assertEquals(0, x /= (1865982928.6819582));
+  assertEquals(0, x *= x);
+  assertEquals(2078726016.3772051, x -= (tmp = -2078726016.3772051, tmp));
+  assertEquals(1580337898, x ^= ((tmp = -2714629398.447015, tmp)^x));
+  assertEquals(1268363034, x -= ((x+((tmp = 1144068248.3834887, tmp)&(-954104940.155973)))<<(tmp = 1270573731.7828264, tmp)));
+  assertEquals(1744830464, x <<= (((1444869551.7830744)>>>((((x+(tmp = -904688528, tmp))<<x)-((tmp = 121151912.85873199, tmp)/(tmp = -2414150217.66479, tmp)))|(((-472906698)|(3215236833.8417764))+(907737193.9056952))))-((x&(-732223723))|(-221800427.7392578))));
+  assertEquals(717338523283226600, x *= (x^(tmp = -2407450097.0604715, tmp)));
+  assertEquals(402653184, x >>= ((-3191405201.168252)*((tmp = -1941299639.695196, tmp)|(((x>>(((3215741220)>>>x)/(x+x)))^(((tmp = -2144862025.9842231, tmp)|((tmp = -1966913385, tmp)&x))%x))*((tmp = -1124749626.6112225, tmp)/(tmp = 837842574, tmp))))));
+  assertEquals(402653184, x &= ((x|x)>>x));
+  assertEquals(134217728, x &= ((2720231644.3849487)*x));
+  assertEquals(134217726.75839183, x -= ((2438054684.738043)/(((((-984359711)*(x|((tmp = 177559682, tmp)^x)))/(-1253443505))/((2727868438.416792)*(x+((x<<(((tmp = 3023774345, tmp)&(-705699616.0846889))/x))<<x))))^(1963626488.548761))));
+  assertEquals(1, x /= x);
+  assertEquals(245781494, x += ((tmp = 2551445099, tmp)^(2528486814)));
+  assertEquals(-1474427807, x ^= (-1497868393.342241));
+  assertEquals(-1057271682, x += ((((((x>>x)%(-1556081693))|(x/(((1166243186.6325684)-(((tmp = 2870118257.1019487, tmp)/(x+(-69909960)))^(2270610694.671496)))/((1463187204.5849519)-x))))-x)-(x<<(-3077313003)))%x));
+  assertEquals(-1065725846, x &= ((tmp = -1808223767, tmp)|(-481628214.3871765)));
+  assertEquals(-1065725846, x ^= (x&(((tmp = -1785170598, tmp)-(tmp = -2525350446.346484, tmp))/((((((-1783948056)^(tmp = 3027265884.41588, tmp))|((((tmp = 2195362566.2237773, tmp)<<(-2919444619))<<((tmp = -2507253075.2897573, tmp)^(x^((tmp = 1067516137, tmp)+((667737752)^(x*(tmp = -1187604212.7293758, tmp)))))))%(-617406719.5140038)))*(tmp = 511060465.6632478, tmp))*((tmp = 2580189800.752836, tmp)|((((tmp = 2357895660, tmp)%((-814381220)*(x-((x>>>(((x<<x)<<(tmp = 1919573020, tmp))-x))>>>((-2756011312.136148)>>(tmp = -1603458856, tmp))))))/((tmp = -1609199312, tmp)&(-3127643445)))%x)))<<(-2261731798)))));
+  assertEquals(1.6020307924030301, x /= (tmp = -665234308.2628405, tmp));
+  assertEquals(-1120020556.697667, x *= (tmp = -699125486.2321637, tmp));
+  assertEquals(-215875188, x -= (((((tmp = -1307845034, tmp)>>>((((-2820720421)^x)-(((x<<x)|(tmp = -3042092997.57406, tmp))+(((-1294857544)+((tmp = -668029108.1487186, tmp)>>(x<<x)))^(912144065.5274727))))^(389671596.2983854)))|(-2774264897.146559))%(x-((tmp = 1378085269, tmp)^x)))+((-1659377450.5247462)&(((1613063452.834885)>>>((-344896580.0694165)>>>((-13450558)+x)))^x))));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>>= (2355750790));
+  assertEquals(1969435421.4409347, x += (1969435421.4409347));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>>= (((x*((-1022802960.6953495)<<(tmp = -2848428731.8339424, tmp)))^(-1630921485))%(1532937011)));
+  assertEquals(0, x <<= ((x+((x^(x^(tmp = 2017651860, tmp)))&(((x<<(((tmp = -1913317290.8189478, tmp)|(x-((((x%((tmp = -3035245210, tmp)+(-2270863807)))>>>((-2351852712)*(x^(-2422943296.0239563))))&((((-1578312517)%x)*x)*(-65592270.28452802)))>>>(tmp = 1104329727.2094703, tmp))))-(tmp = -1431159990.3340137, tmp)))&x)|((tmp = -2589292678.801344, tmp)&(x+((((tmp = -2557773457.456996, tmp)>>(451910805.309445))-x)>>(((tmp = -1937832765.7654495, tmp)^x)%x)))))))%x));
+  assertEquals(0, x %= (tmp = -626944459, tmp));
+  assertEquals(-732310021, x |= (tmp = -732310021, tmp));
+  assertEquals(-732310021, x |= x);
+  assertEquals(671352839, x ^= (x-((-3087309090.7153115)|x)));
+  assertEquals(134479872, x &= (tmp = 2357183984, tmp));
+  assertEquals(18084835973136384, x *= x);
+  assertEquals(0, x <<= ((1040482277)-(tmp = -357113781.82650447, tmp)));
+  assertEquals(74957, x |= ((((tmp = -70789345.7489841, tmp)%(tmp = 1415750131, tmp))&x)|((307027314)>>(2284275468))));
+  assertEquals(9, x >>>= x);
+  assertEquals(0, x &= (x&((x*((x*(x%x))%(x>>x)))/x)));
+  assertEquals(-1872875060, x |= (2422092236.6850452));
+  assertEquals(9, x >>>= (-382763684));
+  assertEquals(4608, x <<= x);
+  assertEquals(40.480234260614935, x /= (((((((tmp = 814638767.5666755, tmp)&((tmp = 2081507162, tmp)^(x>>>(1460148331.2229118))))&(tmp = 1187669197.7318723, tmp))<<(412000677.93339765))^((tmp = 556111951, tmp)>>(tmp = -2232569601.292395, tmp)))&(-3006386864))/x));
+  assertEquals(32, x &= (-3053435209.383913));
+  assertEquals(418357217, x ^= (418357185));
+  assertEquals(204275, x >>= ((-1188650337.9010527)^((51494580)%(-2544545273))));
+  assertEquals(982392804, x += (((x+(((tmp = -982596937.9757051, tmp)+x)%(-2298479347)))^((((tmp = 1610297674.0732534, tmp)>>>x)*(((x>>(-2746780903.08599))&(-2376190704.247188))^(((20545353)/(tmp = 1468302977, tmp))-(x<<x))))>>(((-1434332028.0447056)/((tmp = 1983686888, tmp)&((tmp = 2324500847, tmp)%(394330230.6163173))))%(((-1129687479.2158055)+((-3127595161)*((-3066570223)&((tmp = 3192134577.4963055, tmp)/(-2697915283.3233275)))))+(-1112243977.5306559)))))|(x&(-2622725228))));
+  assertEquals(-2735750653096133600, x *= (-2784782870.9218984));
+  assertEquals(-1876329472, x |= ((((((2752866171)<<(-1681590319))/x)>>((tmp = 1451415208, tmp)>>>(1126858636.6634417)))+(((tmp = 2165569430.4844217, tmp)/x)^(((tmp = -1675421843.4364457, tmp)-(-2187743422.2866993))|x)))*x));
+  assertEquals(3520612287495799000, x *= x);
+  assertEquals(-200278016, x |= ((((-2379590931)%((((-1558827450.833285)&x)>>(-665140792))-((tmp = -445783631.05567217, tmp)+(tmp = 93938389.53113222, tmp))))/(3103476273.734701))^x));
+  assertEquals(-9178285062592.75, x *= ((2042671875.7211144)%(((tmp = 589269308.0452716, tmp)/x)<<(-130695915.9934752))));
+  assertEquals(60048960, x |= (x<<x));
+  assertEquals(60048960, x <<= ((((((tmp = -2793966650, tmp)/(-2882180652))&(((x<<((tmp = -384468710, tmp)+(2236162820.9930468)))>>>((((969371919)>>((tmp = -3153268403.2565875, tmp)-((((573811084)/x)^(tmp = -968372697.4844134, tmp))>>>(((-3096129189)>>x)/(tmp = 830228804.6249363, tmp)))))<<(((1243972633.3592157)|x)&((-1687610429)&(tmp = -1945063977.458529, tmp))))<<(((tmp = -217456781.37068868, tmp)-(400259171.68077815))^x)))>>>x))%(((2728450651.300167)/(((-2713666705.089135)%(tmp = 740472459, tmp))^x))|x))^x)*(-2463032364)));
+  assertEquals(60048960, x %= (tmp = -442107222.9513445, tmp));
+  assertEquals(-1573781504, x <<= (960581227));
+  assertEquals(1297, x >>>= (tmp = -1692919563, tmp));
+  assertEquals(1297, x &= x);
+  assertEquals(-3113308397155.233, x *= (tmp = -2400391979.3024154, tmp));
+  assertEquals(-3115513013486.233, x -= (2204616331));
+  assertEquals(-3113809649082.233, x -= (-1703364404));
+  assertEquals(0, x >>>= (((-1181206665)-(550946816.586771))|(tmp = -2346300456, tmp)));
+  assertEquals(0, x %= (tmp = 1649529739.2785435, tmp));
+  assertEquals(0, x ^= ((tmp = -2452761827.2870226, tmp)%(((1090281070.5550141)/(tmp = 992149154.6500508, tmp))*(x<<((((((x>>>x)|((tmp = -2410892363, tmp)%(tmp = 2585150431.0231533, tmp)))/x)*(tmp = 1541294271, tmp))+x)&((97566561.77126992)&((((-640933510.1287451)&(((((x>>>((-1821077041)<<((tmp = -1138504062.093695, tmp)-(tmp = -181292160, tmp))))%x)-(x>>((x&(((tmp = 1067551355, tmp)/(x|(1004837864.8550552)))&(x-(-103229639.25084043))))&((tmp = 2064184671.210937, tmp)+((((tmp = -2245728052, tmp)|(1538407002.8365717))+(x<<((x>>((76549490)/(tmp = 628901902.6084052, tmp)))<<((x<<x)^(-1907669184)))))+(-1409123688))))))>>>((((-1911547456.933543)-((-512313175)+((tmp = -2620903017, tmp)^(tmp = 2148757592.244808, tmp))))<<((-1740876865)>>>x))+((tmp = 691314720.9488736, tmp)<<(614057604.4104803))))|(x^((tmp = -3040687.291528702, tmp)/(x^(((x+(-2899641915))^((tmp = -1220211746, tmp)/x))%x))))))^(tmp = 119850608, tmp))%(2091975696))))))));
+  assertEquals(291273239, x -= (tmp = -291273239, tmp));
+  assertEquals(2206394018, x += (1915120779));
+  assertEquals(235641480, x <<= (x&(x&(-1810963865.1415658))));
+  assertEquals(28764, x >>= ((tmp = -1927011875, tmp)^((tmp = -1986461808, tmp)|((-868139264.8399222)*((421956566)%(3068424525))))));
+  assertEquals(-99780626900900, x *= ((tmp = -1512869526.3223472, tmp)+(tmp = -1956071751, tmp)));
+  assertEquals(51218520, x &= (((-2353401311)>>>x)-(2216842509)));
+  assertEquals(51218520, x >>>= ((tmp = -1534539302.6990812, tmp)<<x));
+  assertEquals(-2147483648, x <<= (-292608644));
+  assertEquals(-2147483648, x |= ((((((x<<((-2981292735)-x))>>((tmp = 2540545320.96558, tmp)&(tmp = -2343790880, tmp)))>>>((((((x^((-172697043.94487858)/((2627260337)>>(2879112814.1247935))))&(tmp = 3000943191, tmp))<<(tmp = 1094830905, tmp))-x)>>>x)>>((((tmp = 3095796200, tmp)^(x|(tmp = 1460377694, tmp)))<<(x^(tmp = -357546193, tmp)))/((2729539495)>>x))))%(tmp = 268894171.74961245, tmp))|(x>>(tmp = 2735650924, tmp)))/(-2197885357.09768)));
+  assertEquals(-2147483648, x |= x);
+  assertEquals(-1967162776824578000, x *= (tmp = 916031551, tmp));
+  assertEquals(-2147483648, x &= x);
+  assertEquals(-457743917756973060, x *= (tmp = 213153622, tmp));
+  assertEquals(0, x >>>= ((((tmp = 2930076928.480559, tmp)+(x^x))<<(tmp = -1349755597.1280541, tmp))|(x+(2865632849))));
+  assertEquals(0, x <<= ((x>>x)-(x>>(-2629977861))));
+  assertEquals(0, x <<= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x |= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(749327478, x |= ((tmp = 749327478, tmp)^(x>>(tmp = 881107862, tmp))));
+  assertEquals(1897869364, x += (1148541886));
+  assertEquals(463347, x >>>= (tmp = -726431220, tmp));
+  assertEquals(-395990542, x += (-396453889));
+  assertEquals(-2824792585.1675367, x -= (2428802043.1675367));
+  assertEquals(-2147483648, x <<= (tmp = -1420072385.9175675, tmp));
+  assertEquals(8388608, x >>>= (-2211390680.488455));
+  assertEquals(8388608, x >>= (((x/(x|(((x^(((tmp = -2175960170.8055067, tmp)|((tmp = -1964957385.9669886, tmp)/(tmp = -475033330, tmp)))&((x|((tmp = 1386597019.2014387, tmp)>>((tmp = -2406589229.8801174, tmp)+x)))<<(tmp = -844032843.8415492, tmp))))>>(x^x))|x)))-((x&((tmp = 1858138856, tmp)*(-3156357504)))%x))<<(((2046448340)+x)/(-2645926916))));
+  assertEquals(8359470765396279, x *= ((tmp = 871437183.7888144, tmp)-(-125089387.17460155)));
+  assertEquals(0, x ^= x);
+  assertEquals(-303039014, x += ((tmp = -2475713214, tmp)|(-372871718.2343409)));
+  assertEquals(2655126577, x -= (-2958165591));
+  assertEquals(1830332793, x ^= (tmp = -212161208, tmp));
+  assertEquals(1830332793, x ^= (((2352454407.0126333)<<((((tmp = 3083552367, tmp)/x)-(-1243111279))-((tmp = -1669093976, tmp)%(((-757485455)-(tmp = -116051602, tmp))<<x))))>>(((((-2235071915.9536905)>>(tmp = -1284656185, tmp))-x)>>((-1807028069.7202528)>>>((x%((tmp = -3070857953.311804, tmp)+((tmp = 2759633693.441942, tmp)%((169489938)*(-1582267384)))))<<(x^((tmp = -787578860, tmp)<<x)))))>>((x/(x|(409464362)))-(tmp = -64033017, tmp)))));
+  assertEquals(397605933.90319204, x %= (tmp = 716363429.548404, tmp));
+  assertEquals(186400, x &= (((x%(-1745754586))>>>x)<<(x&(x&((-2163627752)-((1784050895)+(((-2864781121.899456)>>>x)&x)))))));
+  assertEquals(186400, x %= (tmp = -423209729, tmp));
+  assertEquals(186400, x <<= ((x<<(x+(1232575114.4447284)))*x));
+  assertEquals(1386299, x ^= ((tmp = -1074209615, tmp)>>>(x>>>((tmp = -1456741008.2654872, tmp)>>((1724761067)>>(-2016103779.9084842))))));
+  assertEquals(347302967.20758367, x -= (-345916668.20758367));
+  assertEquals(1.9325619389304094, x /= (179711170.03359854));
+  assertEquals(-3703324711.628227, x *= (tmp = -1916277371, tmp));
+  assertEquals(-920980517031624800, x *= (tmp = 248690187.53332615, tmp));
+  assertEquals(0, x &= (((tmp = -2753945953.082594, tmp)*x)-(172907186)));
+  assertEquals(-0, x /= (((((-2744323543.187253)>>((tmp = 2663112845, tmp)>>(((-121791600)+(x^x))*(2758944252.4214177))))|x)/(tmp = -2746716631.6805267, tmp))-x));
+  assertEquals(0, x ^= ((tmp = 983113117, tmp)&((2638307333)+((((tmp = 3076361304.56189, tmp)<<(-2663410588.5895214))%((-1109962112)-(tmp = -2381021732, tmp)))%((tmp = 410559095, tmp)&x)))));
+  assertEquals(0, x <<= (tmp = 1510895336.5111506, tmp));
+  assertEquals(0, x <<= (tmp = -1688348296.2730422, tmp));
+  assertEquals(2269471424, x -= (-2269471424));
+  assertEquals(-2022580224, x ^= (x%((tmp = 160999480.21415842, tmp)&x)));
+  assertEquals(-2077171712, x &= (tmp = 3032415014.3817654, tmp));
+  assertEquals(270727, x >>>= (2973489165.1553965));
+  assertEquals(270727, x |= x);
+  assertEquals(-1895894537, x |= ((tmp = -1895903118.129186, tmp)|x));
+  assertEquals(-1895894537, x -= ((((((((3143124509)>>>(-2866190144.8724117))*((x>>((961021882)*(tmp = 2363055833.8634424, tmp)))/((2032785518)+((2713643671.3420825)>>((-447782997.0173557)*((tmp = 1174918125.3178625, tmp)*((((tmp = -541539365.548115, tmp)%(-359633101))|(1765169562.2880063))+(tmp = -2512371966.374508, tmp))))))))/x)>>(x*((((-847238927.6399388)&(857288850))%(-2427015402))^((2221426567)%(x+x)))))>>>x)<<((tmp = 2009453564.2808268, tmp)>>((2924411494)<<(x>>(tmp = -1240031020.8711805, tmp)))))%(tmp = 3118159353, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= (-30151583));
+  assertEquals(-1035186736, x ^= ((tmp = -517593368, tmp)<<(tmp = 3216155585, tmp)));
+  assertEquals(49740, x >>>= x);
+  assertEquals(49740, x %= (640223506));
+  assertEquals(388, x >>>= ((x>>(tmp = 3161620923.50496, tmp))+(2605183207)));
+  assertEquals(776, x += x);
+  assertEquals(-97905, x ^= ((((((tmp = 145447047.8783008, tmp)^(((x>>>(tmp = 3014858214.2409887, tmp))>>>(629911626.132971))>>(((x+((369309637.229408)-x))<<(-2661038814.9204755))*(x+(x%(3025191323.4780884))))))+x)*(-482550691))|(-632782135))/x));
+  assertEquals(-97905, x %= ((((-492914681)-((-2508632959.269368)&(tmp = 1209318291, tmp)))>>(-723512989.459533))>>>(((-528429623.985692)&(x^(tmp = -925044503, tmp)))-(-1696531234))));
+  assertEquals(9585389025, x *= x);
+  assertEquals(-715425728, x <<= ((583763091)<<(-1223615295)));
+  assertEquals(-520093696, x <<= ((tmp = -1891357699.671592, tmp)*(((tmp = 3206095739.5163193, tmp)+(-2908596651.798733))>>>((tmp = -2820415686, tmp)>>(x|((((tmp = -566367675.6250327, tmp)*(-959117054))>>((((-187457085.89686918)*x)*(tmp = -2394776877.5373516, tmp))>>>x))|(((tmp = 80478970.46290505, tmp)<<(tmp = 2173570349.493097, tmp))-(x/((-2896765964)-((x/((tmp = 198741535.7034216, tmp)%(436741457)))%(tmp = 2936044280.0587225, tmp)))))))))));
+  assertEquals(-2520.5909527086624, x /= ((211290893.06029093)>>(663265322)));
+  assertEquals(-2520.5909527086624, x %= (x^((1057915688)<<(tmp = 1914820571.1142511, tmp))));
+  assertEquals(1, x >>>= (((894963408.7746166)+(tmp = -2888351666, tmp))|x));
+  assertEquals(-1989841636629996300, x += ((1424670316.224575)*((-2144149843.0876865)|((((421479301.0983993)|((3082651798)^(tmp = -271906497, tmp)))>>x)+((tmp = -178372083, tmp)%x)))));
+  assertEquals(17935384255.088326, x /= (((((((tmp = 1168194849.2361898, tmp)>>>(-107316520.53815603))>>>(x^(((x%((x>>>(((-2456622387)/x)&((2124689803)|(((-1130151701)^(2796315158))>>x))))-((-884686033.5491502)>>>((-2371185318.5358763)&x))))+(tmp = 558422989, tmp))|((tmp = -420359120.0596726, tmp)/((-1820568437.0587764)&(2298602280.266465))))))>>(x-((tmp = -1164568978, tmp)^x)))^x)-x)+x));
+  assertEquals(134233150, x &= ((x>>(((tmp = 98498118.13041973, tmp)-(804574397))/(tmp = -1564490985.7904541, tmp)))+x));
+  assertEquals(4, x >>= (449610809));
+  assertEquals(1912543790, x |= (1912543790));
+  assertEquals(2487274263, x += (tmp = 574730473, tmp));
+  assertEquals(-2140759118, x ^= (tmp = 338055333.9701035, tmp));
+  assertEquals(311607367, x += (2452366485));
+  assertEquals(9509, x >>= (372113647.84365284));
+  assertEquals(-2001075684.1562128, x += (-2001085193.1562128));
+  assertEquals(-638703280, x ^= (((tmp = 1096152237, tmp)&x)|((2707404245.0966487)-(((tmp = 1550233654.9691348, tmp)+(tmp = 2008619647, tmp))&((tmp = -2653266325, tmp)+(tmp = -280936332, tmp))))));
+  assertEquals(-101811850, x |= (-2250090202));
+  assertEquals(-13, x >>= ((-561312810.0218933)|(tmp = 79838949.86521482, tmp)));
+  assertEquals(-13, x >>= ((tmp = -936543584, tmp)/(1180727664.1746705)));
+  assertEquals(-1547, x *= (((tmp = 1005197689, tmp)>>>x)>>>(tmp = 34607588, tmp)));
+  assertEquals(2393209, x *= x);
+  assertEquals(2393209, x |= x);
+  assertEquals(0, x >>= (-2691279235.1215696));
+  assertEquals(0, x *= (((896175510.4920144)*((((tmp = 1770236555.7788959, tmp)%(537168585.7310632))/x)&(tmp = 1094337576, tmp)))&(((x-x)-x)>>x)));
+  assertEquals(-1922620126, x ^= (-1922620126));
+  assertEquals(3.43481396325761, x /= (tmp = -559745053.6088333, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>>= (tmp = 2106956255.6602135, tmp));
+  assertEquals(-1339003770, x ^= ((tmp = 2955963526.960022, tmp)+x));
+  assertEquals(-0, x *= ((((tmp = 368669994, tmp)>>>(x*x))<<(tmp = 2355889375, tmp))&(tmp = -2267550563.9174895, tmp)));
+  assertEquals(0, x >>= (753848520.8946902));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x %= ((tmp = -2872753234.2257266, tmp)|x));
+  assertEquals(NaN, x %= (x>>>(tmp = 890474186.0898918, tmp)));
+  assertEquals(NaN, x %= ((tmp = 1341133992.284471, tmp)&(tmp = -2979219283.794898, tmp)));
+  assertEquals(NaN, x += (-2865467651.1743298));
+  assertEquals(NaN, x += ((-1424445677)%(x^(tmp = 1150366884, tmp))));
+  assertEquals(0, x &= (x+((tmp = 1499426534, tmp)+x)));
+  assertEquals(0, x |= (((((tmp = -2413914642, tmp)<<((x>>>x)^(1218748804)))+((((-1085643932.2642736)-(-1199134221.533854))>>(tmp = 2148778719, tmp))-((tmp = 1589158782.0040946, tmp)/(tmp = -2485474016.1575155, tmp))))>>>(x>>x))/(2230919719)));
+  assertEquals(0, x %= ((tmp = -2576387170.517563, tmp)>>>((tmp = -2362334915.919525, tmp)>>>(((3096453582)-(700067891.4834484))^(2396394772.9253683)))));
+  assertEquals(-1798103432, x ^= (((((tmp = 2396144191, tmp)*(x>>>(1512158325)))&(((-1256228298.5444434)&(((-2963136043.434966)&((tmp = 2472984854, tmp)+(tmp = -454900927, tmp)))%(tmp = 484255852.65332687, tmp)))>>((x%x)-x)))&(tmp = 929723984, tmp))^(tmp = -1798103432.5838807, tmp)));
+  assertEquals(-2137913344, x &= ((((x|(-2970116473))&(((x/x)/((tmp = 2853070005, tmp)>>>x))%(((tmp = -3123344846, tmp)/((2224296621.6742916)-(tmp = -2246403296.455411, tmp)))+((x&(((x^(x*(2829687641)))+x)&(tmp = 988992521, tmp)))^x))))<<((((-820608336)^(tmp = 2851897085, tmp))>>(tmp = -402427624, tmp))>>>x))-(((x*(((-2287402266.4821453)%(tmp = -520664172.1831205, tmp))^(x/(1875488837))))<<(tmp = 402393637, tmp))&(tmp = 1576638746.3047547, tmp))));
+  assertEquals(-2827557853031924000, x *= (tmp = 1322578326.6507945, tmp));
+  assertEquals(6.424459501778244e+27, x *= (tmp = -2272087729.3065624, tmp));
+  assertEquals(-1586887483, x |= (-1586887483));
+  assertEquals(-567868980691736100, x *= (tmp = 357850816, tmp));
+  assertEquals(1489101591, x ^= (x%(x|(421921075))));
+  assertEquals(-801213804822328000, x *= (x|(-672326904.6888077)));
+  assertEquals(612257233.6612054, x /= (((tmp = -350127617, tmp)>>>(-1140467595.9752212))<<((x^x)+(-3117914887))));
+  assertEquals(19097.231243331422, x /= ((x^(tmp = -570012517, tmp))>>>x));
+  assertEquals(0, x >>= ((x%(((-2347648358)%((x-(tmp = -456496327, tmp))|(x^(-1977407615.4582832))))<<(x/(tmp = -2021394626.214082, tmp))))%(tmp = -949323000.2442119, tmp)));
+  assertEquals(0, x <<= x);
+  assertEquals(NaN, x %= (x^(x>>(((tmp = 597147546.7701412, tmp)&(((((-972400689.6267757)|(tmp = -2390675341.6367044, tmp))|(tmp = 1890069123.9831812, tmp))<<(((1606974563)-(tmp = -2211617255.8450356, tmp))&((((x+((2433096953)&(-2527357746.681596)))*(tmp = -313956807.55609417, tmp))|((tmp = -2146031047.968496, tmp)/(tmp = 2851650714.68952, tmp)))>>(((tmp = 2630692376.6265225, tmp)-(tmp = -3162222598, tmp))>>((tmp = 1915552466, tmp)*(x>>>(-2413248225.7536864)))))))&(x%((((1218471556)|x)+(tmp = -849693122.6355379, tmp))+x))))>>>(x/((tmp = 689889363, tmp)/x))))));
+  assertEquals(0, x >>>= (45649573.23297));
+  assertEquals(0, x >>>= (tmp = 1084439432.771266, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x *= (tmp = 1642750077, tmp));
+  assertEquals(0, x >>>= (tmp = -1944001182.0778434, tmp));
+  assertEquals(1682573000, x |= (tmp = -2612394296.2858696, tmp));
+  assertEquals(3041823595, x -= (((tmp = 720576773, tmp)|(x^(-1068335724.2253149)))>>(x*(-2501017061))));
+  assertEquals(6083647190, x += x);
+  assertEquals(-6536258988089986000, x *= ((tmp = 632312939.6147232, tmp)|((-1621821634)+(((tmp = -2281369913.562131, tmp)&((tmp = -381226774, tmp)|x))&(664399051)))));
+  assertEquals(4.272268155938712e+37, x *= x);
+  assertEquals(733271152, x %= (-1345127171));
+  assertEquals(847089925, x ^= (tmp = 432620917.57699084, tmp));
+  assertEquals(1337073824, x <<= x);
+  assertEquals(-25810602, x ^= (tmp = 2982414838, tmp));
+  assertEquals(-25282209, x |= ((tmp = -2927596922, tmp)>>>(-2404046645.01413)));
+  assertEquals(639190091919681, x *= x);
+  assertEquals(173568320, x &= ((((tmp = -718515534.4119437, tmp)&(tmp = 2989263401, tmp))<<x)|((tmp = 537073030.5331153, tmp)-(tmp = 883595389.314624, tmp))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>>= (tmp = -1844717424.917882, tmp));
+  assertEquals(0, x >>= (tmp = -462881544.2225325, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(-1868450038, x ^= (2426517258.6111603));
+  assertEquals(1, x /= x);
+  assertEquals(1175936039.4202638, x += (tmp = 1175936038.4202638, tmp));
+  assertEquals(-127916015, x ^= ((x/(1841969600.3012052))-(tmp = 1099467723, tmp)));
+  assertEquals(395713785658171900, x *= (-3093543726));
+  assertEquals(395713787128560900, x += (((((-717204758)*(tmp = -588182129.6898501, tmp))-x)+(tmp = 20638023, tmp))^x));
+  assertEquals(-962609355, x |= ((x^(-3118556619.912983))<<((tmp = 876126864, tmp)&x)));
+  assertEquals(-962609355, x %= (tmp = -2079049990, tmp));
+  assertEquals(-114583755, x -= (((-2806715240)&(((1961136061.0329285)>>>((2087162059)*x))+((tmp = -1890084022.7631018, tmp)%(tmp = 2137514142.358262, tmp))))+(x<<(tmp = 2991240918, tmp))));
+  assertEquals(-425721856, x <<= x);
+  assertEquals(3778560, x >>>= ((x|(3198503572))>>(1158434541.1099558)));
+  assertEquals(3778560, x %= (tmp = -2592585378.9592104, tmp));
+  assertEquals(624640, x &= (tmp = 2261638192.9864054, tmp));
+  assertEquals(1249280, x += x);
+  assertEquals(1048576, x &= ((tmp = -2144301819.9892588, tmp)^((x-x)<<x)));
+  assertEquals(2097152, x <<= (x/x));
+  assertEquals(5069061551149729, x *= (tmp = 2417116904.8069615, tmp));
+  assertEquals(1.4836296666029616e+25, x += ((tmp = 2926833006.7121572, tmp)*x));
+  assertEquals(-256, x >>= ((-469330345.3589895)%((x^(((2554170843.4978285)/(2495676674.815263))>>>x))*(-918892963))));
+  assertEquals(-134217728, x <<= (x|(((((1687450853.1321645)+(tmp = 2369533014.5803776, tmp))+(tmp = -2613779445, tmp))+(tmp = -2488826226.3733397, tmp))>>(tmp = -220646936.41245174, tmp))));
+  assertEquals(704164545131708400, x *= ((-2632786741)+(-2613647956)));
+  assertEquals(9216, x >>>= (-1925405359.657349));
+  assertEquals(4491403261551.008, x *= (tmp = 487348444.1787118, tmp));
+  assertEquals(4490606381829.008, x -= (tmp = 796879722, tmp));
+  assertEquals(-60294056, x >>= x);
+  assertEquals(-3193966580.494005, x += (tmp = -3133672524.494005, tmp));
+  assertEquals(550500358, x >>>= ((tmp = -2779637628.390116, tmp)-((tmp = 29230786.984039664, tmp)%(tmp = -310649504.7704866, tmp))));
+  assertEquals(68812544, x >>= (-1347584797));
+  assertEquals(1.2120221595741834e-11, x /= ((2791020260)*((((1964870148.6358237)^x)|(-3082869417))-((x^x)&((1234292117.8790703)<<(-1792461937.2469518))))));
+  assertEquals(1.2120221595741834e-11, x %= (x-(2780439348)));
+  assertEquals(-1421552183, x |= (tmp = -1421552183.5930738, tmp));
+  assertEquals(-1420954119, x |= ((((-2547788562.5735893)<<x)%(435385623))>>(x|x)));
+  assertEquals(1, x /= x);
+  assertEquals(1, x >>= (x>>>(((2975715011.501709)-(tmp = -1473273552.981069, tmp))/(1654883913.042487))));
+  assertEquals(-65382, x ^= ((x/((tmp = -2780026200, tmp)<<x))^(((-2683084424)<<x)>>(-1716245874))));
+  assertEquals(1530921106, x &= (1530940914));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x /= (tmp = 773741434.1972584, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(0, x <<= (-67977514.99888301));
+  assertEquals(0, x %= (2496550482.524729));
+  assertEquals(-0, x /= (tmp = -515040417, tmp));
+  assertEquals(0, x <<= (-1673460935.2858837));
+  assertEquals(-2638209488, x += (-2638209488));
+  assertEquals(-2400951839498683400, x *= (910068685));
+  assertEquals(1600582036, x ^= (((-1247602308.4812562)>>(((-2393714444.179732)>>>x)%(-778140635.7165127)))+(-1933914727.2268424)));
+  assertEquals(0, x *= ((x-x)>>(-1270234575)));
+  assertEquals(0, x >>>= (tmp = 3193676327.493656, tmp));
+  assertEquals(0, x ^= (x>>>(1148676785.389884)));
+  assertEquals(0, x >>= (tmp = -2269181763.8663893, tmp));
+  assertEquals(0, x >>= (3149450221));
+  assertEquals(0, x >>= (1069630750));
+  assertEquals(-625009654, x ^= ((-2143499112)%(-759244728.6214335)));
+  assertEquals(3583943, x >>>= (-2942645558.1204453));
+  assertEquals(1791971, x >>= (x/x));
+  assertEquals(223996, x >>= x);
+  assertEquals(6999, x >>= (tmp = -1051883611.9443719, tmp));
+  assertEquals(1459617792, x <<= (-1572314984));
+  assertEquals(2622356453.269262, x -= (tmp = -1162738661.2692618, tmp));
+  assertEquals(5103676461.269262, x += (2481320008));
+  assertEquals(823989684.2692623, x %= (x^(((((1048362966)*((tmp = -2423040747.6233954, tmp)>>>x))*((tmp = 2330818588.4081, tmp)>>(tmp = 103312020.98346841, tmp)))+(tmp = 2264492857.144133, tmp))>>>((tmp = 2523442834, tmp)<<x))));
+  assertEquals(0, x >>>= (tmp = -2018700898.531027, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x <<= (tmp = -2489442223, tmp));
+  assertEquals(0, x >>= ((3045836220)>>>x));
+  assertEquals(-1156905149, x ^= (3138062147));
+  assertEquals(-0, x %= x);
+  assertEquals(-3118433907.512866, x -= ((tmp = 1338611238, tmp)-(-1779822669.5128663)));
+  assertEquals(100679693, x &= (1040565279));
+  assertEquals(10136400582574248, x *= x);
+  assertEquals(0, x %= x);
+  assertEquals(2400318405, x += (2400318405));
+  assertEquals(1.0036190808578471, x /= (((tmp = -2313492253.9889445, tmp)|(x-((tmp = -205459123, tmp)>>x)))+x));
+  assertEquals(0, x >>>= (tmp = 882343227.1675215, tmp));
+  assertEquals(0, x &= ((tmp = 2307828832.2706165, tmp)^((((((1404388047)<<((807879382)-(-2862921873)))-x)*(tmp = -1897734732, tmp))>>(tmp = 1981888881.2306776, tmp))%x)));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x *= (((x*x)*((((2764801384.171454)%(x>>>x))&(384818815))+(x>>(tmp = -1481683516, tmp))))&x));
+  assertEquals(0, x >>= (tmp = -2202536436, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= (tmp = 15161124, tmp));
+  assertEquals(-1586110900, x ^= (-1586110900));
+  assertEquals(-1586127952, x -= ((tmp = 560737212, tmp)%((1349529668)>>>(tmp = -1956656528, tmp))));
+  assertEquals(-1174945870, x -= ((1178456190)|x));
+  assertEquals(1335167624.3422346, x -= (tmp = -2510113494.3422346, tmp));
+  assertEquals(1329952126.3422346, x -= (x>>x));
+  assertEquals(1, x >>= x);
+  assertEquals(3, x |= (x<<x));
+  assertEquals(3, x -= (x-x));
+  assertEquals(-1938525669, x |= (tmp = 2356441625.5128202, tmp));
+  assertEquals(-1938525669, x ^= ((tmp = -197149141.3622346, tmp)/(2833823156)));
+  assertEquals(-2.6292393147661324, x /= (737295254.2254335));
+  assertEquals(2925975987.370761, x -= (-2925975990));
+  assertEquals(2925975987.370761, x %= (tmp = 3041184582.8197603, tmp));
+  assertEquals(-1908068660, x ^= ((tmp = -1380575181, tmp)-(2375164084.8366547)));
+  assertEquals(-477017165, x >>= (tmp = 2420877826.353099, tmp));
+  assertEquals(-477017165, x %= ((tmp = -2919204062.3683634, tmp)-(tmp = -2263328990, tmp)));
+  assertEquals(-2105539936, x &= ((tmp = -1630795440, tmp)-(x&((933423833)>>(-475069901)))));
+  assertEquals(-4979480720, x -= (tmp = 2873940784, tmp));
+  assertEquals(-4190953472, x -= (x&(tmp = -645918862.9001305, tmp)));
+  assertEquals(17564091004468855000, x *= x);
+  assertEquals(-857277134, x |= (tmp = 2363948338, tmp));
+  assertEquals(1015632515, x -= (-1872909649));
+  assertEquals(-1150380043, x ^= (tmp = -2014853770, tmp));
+  assertEquals(1607729152, x <<= ((2194449589)+(x|(tmp = -1470075256.4605722, tmp))));
+  assertEquals(1608356496, x |= ((((x|(670426524))<<((-2415862218)>>(tmp = 1572561529.9213061, tmp)))^((-1989566800.3681061)|x))&(2170270618.3401785)));
+  assertEquals(-1836056576, x <<= (tmp = 2906301296.540217, tmp));
+  assertEquals(-2952415961567723500, x *= (tmp = 1608020145, tmp));
+  assertEquals(1435500544, x <<= x);
+  assertEquals(700928, x >>>= (tmp = 2924829771.1804566, tmp));
+  assertEquals(0, x <<= ((x^(2410009094))|(((-164334714.18698573)%(x*x))|(tmp = 2182431441.2575436, tmp))));
+  assertEquals(-143321285, x ^= (tmp = -143321285, tmp));
+  assertEquals(-2, x >>= x);
+  assertEquals(-1, x >>= (x&(1109737404)));
+  assertEquals(1, x >>>= x);
+  assertEquals(0, x ^= x);
+  assertEquals(-2463707358.165766, x += (-2463707358.165766));
+  assertEquals(1831259938, x >>= (((((x-(tmp = 1359448920.5452857, tmp))%(tmp = -104541523, tmp))/((3133289055.9780197)*x))>>x)%x));
+  assertEquals(1858895646, x ^= ((tmp = 131424376, tmp)>>(tmp = -396761023, tmp)));
+  assertEquals(1, x >>= x);
+  assertEquals(-1888369021, x |= ((tmp = -2038869285.046599, tmp)^((tmp = -1318286592.4250565, tmp)-(tmp = 2825123496, tmp))));
+  assertEquals(1036458508, x <<= ((tmp = 2722401450, tmp)/((tmp = 1090712291, tmp)>>((tmp = -2155694696.9755683, tmp)*(tmp = 1661107340, tmp)))));
+  assertEquals(1, x /= (x%((tmp = -1716050484, tmp)+(tmp = -1683833551.797319, tmp))));
+  assertEquals(0, x >>= (tmp = -2899315628, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x <<= x);
+  assertEquals(1546062911, x |= (1546062911));
+  assertEquals(1546195271, x += ((tmp = -3210667091, tmp)>>(tmp = 1323121165, tmp)));
+  assertEquals(3092390542, x += x);
+  assertEquals(-1199626354, x |= (406783756));
+  assertEquals(-3650317194584908300, x *= (tmp = 3042878461.625484, tmp));
+  assertEquals(-7.650495675092354e+27, x *= (2095844078));
+  assertEquals(0, x >>= (tmp = 342617880.3384919, tmp));
+  assertEquals(22, x ^= (((tmp = 381409558.9104688, tmp)>>((2823172888.974557)>>x))>>x));
+  assertEquals(736383550, x += (736383528));
+  assertEquals(0, x %= x);
+  assertEquals(0, x += x);
+  assertEquals(-1553157831, x -= (1553157831));
+  assertEquals(1838556960, x <<= (3158944357.262641));
+  assertEquals(5503285699.188747, x *= ((tmp = 2437440276, tmp)/(814308583.8128904)));
+  assertEquals(5824889900.188747, x -= (((tmp = 1171445694, tmp)-(tmp = -1584666956, tmp))^(tmp = 1217545373, tmp)));
+  assertEquals(747032, x >>>= (-89332085));
+  assertEquals(747032, x |= (x^(x^(x>>>x))));
+  assertEquals(747032, x >>>= ((-1558482440)*((tmp = -2413907480, tmp)+(3003996862.384156))));
+  assertEquals(7.747761349084291e+23, x += ((tmp = 518064022.64624584, tmp)*((tmp = 2001951702, tmp)*x)));
+  assertEquals(0, x <<= (2769324707.5640426));
+  assertEquals(NaN, x %= (((((((-2458056470.7717686)&x)>>(tmp = -361831232.42602444, tmp))*(2611108609.6727047))>>>x)/(-1713747021.8431413))*(-1143281532)));
+  assertEquals(NaN, x %= ((x^((-613836813)*(tmp = -3180432597.0601435, tmp)))%x));
+  assertEquals(NaN, x /= ((-1607092857)^x));
+  assertEquals(0, x &= (-1190719534));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x += (x>>(642177579.1580218)));
+  assertEquals(-3129552333, x += (-3129552333));
+  assertEquals(1165414963, x &= x);
+  assertEquals(2222, x >>= (((tmp = 2606317568, tmp)|x)+(tmp = 1844107136, tmp)));
+  assertEquals(NaN, x %= ((x^x)<<(x/(((tmp = -1362148700, tmp)&((tmp = 76371048, tmp)<<x))>>>((x^(-2605741153))>>(((tmp = -2131608159.7634726, tmp)|(((2827792229.8004875)|(((-848439251)+(-2576768890.123433))|((tmp = -2617711776, tmp)-((-199980264)&((tmp = -46967951.76266599, tmp)/(-733253537))))))*(tmp = 1820087608, tmp)))>>>(tmp = -3118359396.4298744, tmp)))))));
+  assertEquals(NaN, x /= ((2144871731)*x));
+  assertEquals(NaN, x *= x);
+  assertEquals(NaN, x %= (tmp = 234811462.08692443, tmp));
+  assertEquals(0, x >>>= ((1121416685)|(x^(((tmp = -2905413334, tmp)<<(tmp = -3091554324.030834, tmp))<<x))));
+  assertEquals(-55938048, x |= ((tmp = -55938048, tmp)+(x*(tmp = -1518809027.2695136, tmp))));
+  assertEquals(-3.3234995678333864e-10, x /= (x*(tmp = -3008876576, tmp)));
+  assertEquals(0, x <<= (x/((((((-2168824234.2418427)>>(((tmp = 1976810951, tmp)%x)<<(x*(x>>(x%(3146266192))))))%(tmp = 1756971968.122397, tmp))>>>(-2859440157.8352804))/(-1001406.1919288635))>>>(-1358031926))));
+  assertEquals(-0, x *= (tmp = -1756000533, tmp));
+  assertEquals(-0, x %= (2522761446.869926));
+  assertEquals(0, x >>>= (((1087690535)>>>(2741387979))^x));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= (-819422694.2188396));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x &= (tmp = 86627723, tmp));
+  assertEquals(0, x += x);
+  assertEquals(0, x %= (tmp = -2317915475, tmp));
+  assertEquals(Infinity, x += (((-3072799584)^(-2487458319))/(((tmp = -3050692353, tmp)&x)>>(-777977292.8500206))));
+  assertEquals(Infinity, x += x);
+  assertEquals(Infinity, x -= (tmp = 484428269, tmp));
+  assertEquals(Infinity, x *= x);
+  assertEquals(Infinity, x /= (2059586218.2278104));
+  assertEquals(Infinity, x *= (tmp = 415918523.8350445, tmp));
+  assertEquals(-1800869091, x |= (((-1800869091)>>>(x>>>(tmp = -2832575051, tmp)))>>>x));
+  assertEquals(6196126991451132000, x *= ((-1467292383.8458765)+(-1973339154.7911158)));
+  assertEquals(6196126992684649000, x += (1233517421));
+  assertEquals(1, x /= x);
+  assertEquals(-7153809722216516000, x -= (((-2984550787.146106)<<(tmp = 743743974, tmp))*((3155151275)/((-1771412568.8965073)%x))));
+  assertEquals(-7153809721471491000, x -= (-745024056));
+  assertEquals(5.117699353102001e+37, x *= x);
+  assertEquals(0, x >>= x);
+  assertEquals(-0, x *= ((-2651785447.666973)<<(-1124902998)));
+  assertEquals(-0, x /= (2119202944));
+  assertEquals(1042673805.5205957, x -= ((x<<x)-(tmp = 1042673805.5205957, tmp)));
+  assertEquals(62, x >>>= (tmp = 2769597912.977452, tmp));
+  assertEquals(34, x &= ((tmp = -61541150, tmp)%(x^(-943160469))));
+  assertEquals(34, x ^= ((-2625482224.4605474)<<(-2277806338.3461556)));
+  assertEquals(536870912, x <<= ((-2373927426.4757633)^x));
+  assertEquals(536870912, x &= x);
+  assertEquals(512, x >>>= ((-1626769708.310139)<<((tmp = 641796314, tmp)/(721629637.3215691))));
+  assertEquals(0, x <<= (-113973033));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x += (-1602711788.2390788));
+  assertEquals(NaN, x *= (x%x));
+  assertEquals(0, x &= (x<<(x|(x>>((x>>>(x%((1182960050)^(((-220896609)-((((tmp = 1518275435.360103, tmp)/(tmp = -88234820, tmp))^x)/x))>>(3169930777.548236)))))-(tmp = -2912668817.662395, tmp))))));
+  assertEquals(0, x *= ((2323969408.7524366)/(((tmp = -3089229853, tmp)>>>((((tmp = -1012580544.5631487, tmp)>>(1138049418.9023373))>>x)&x))*(tmp = 626912001, tmp))));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x /= (x%(-868024322)));
+  assertEquals(NaN, x /= (tmp = -1749532322, tmp));
+  assertEquals(1861918711, x |= (-2433048585.853014));
+  assertEquals(1861918711, x >>= (((102451747)>>>((((241651917.47259736)/((((((((1759022236)^(tmp = -2592022722, tmp))+((-1748044969)>>>(704597925)))/(-1639604842))%((1349846853.7345295)<<(-729695861)))/(x>>((tmp = -2654474404.7365866, tmp)>>x)))>>>(((-480356478)|(x%((tmp = -1668269244.6979945, tmp)+(tmp = -2441424458.565183, tmp))))^((1634981212.7598324)>>>(tmp = 122455570.22000062, tmp))))<<x))*((tmp = -1058636137.5037816, tmp)+((2794083757.138838)&((x/(50081370))&x))))/x))/((tmp = -243106636, tmp)<<((x*((tmp = -648475219.5971704, tmp)>>((tmp = -1568913034, tmp)-((tmp = 911458615, tmp)|x))))>>>(tmp = 2714767933.920696, tmp)))));
+  assertEquals(0, x ^= x);
+  assertEquals(-2080484602, x |= (((1544771831.4758213)|x)^(-538113039)));
+  assertEquals(696451072, x <<= (tmp = -1587032689, tmp));
+  assertEquals(-162595645, x += (tmp = -859046717, tmp));
+  assertEquals(516546456, x >>>= x);
+  assertEquals(623083588, x += ((-1371850352)^(tmp = -1469933252, tmp)));
+  assertEquals(92342412, x %= (tmp = -132685294, tmp));
+  assertEquals(500272110, x |= ((tmp = 1616032506, tmp)%((tmp = 1589569590.4269853, tmp)|(-972791738.1829333))));
+  assertEquals(3247086, x %= (((tmp = 1372216208, tmp)|(-638950076.3387425))&((-2619249161.849716)&(73957896))));
+  assertEquals(0, x >>>= (tmp = -1482343462.6911879, tmp));
+  assertEquals(1265125662, x ^= (tmp = -3029841634, tmp));
+  assertEquals(4941897, x >>>= (-2039728632));
+  assertEquals(206857, x &= (tmp = 226962365.45571184, tmp));
+  assertEquals(1.0925018562586405e+24, x += ((tmp = 2687424146, tmp)*(((-1998020319)%x)*(-2080331363))));
+  assertEquals(-1.755270751212437e+32, x *= (-160665242));
+  assertEquals(0, x <<= (3152796521.6427975));
+  assertEquals(0, x ^= ((((((tmp = -855001595, tmp)<<(2007525777))-(x-(x-x)))/(3036585090.9701214))&(1827983388))*((tmp = -915604789.0515733, tmp)&(((((tmp = -806628722.7820358, tmp)%x)/(tmp = -2773117447, tmp))|x)<<(((tmp = -2902300974.7300634, tmp)|x)/(-1608133440))))));
+  assertEquals(0, x |= ((((((119024954)*(((x^(tmp = 2939514414, tmp))|x)^(x-(tmp = -1597415597.6795669, tmp))))+(((tmp = -182277816.14547157, tmp)<<(((-2983451324.3908825)^(tmp = 1572568307, tmp))+(-1165604960.8619013)))/(x>>((tmp = -2127699399, tmp)>>((x^(((((tmp = -1968667383, tmp)^(tmp = 3120052415.9964113, tmp))|(((x|(((x^((tmp = 2831505153, tmp)<<((-3150506831.547093)+((x%(tmp = 383761651, tmp))%(2856803457)))))+(((tmp = -2426953997, tmp)^(tmp = -2667954801.1010714, tmp))*(tmp = -2707801631, tmp)))&(tmp = 2082935238.794707, tmp)))^((tmp = 697573323.5349133, tmp)-x))%(tmp = 661936357, tmp)))/(-1717944600.261446))>>>((2423776015.0968056)^((-1410322010)|((x<<(tmp = 2935993226, tmp))/(tmp = -1533896392, tmp))))))*(tmp = -596675330, tmp))))))>>>(((2944268153)^(x&(144579050.93126357)))/(-2123810677.2619643)))>>>(1473040195.9009588))*x));
+  assertEquals(0, x /= (2877666495));
+  assertEquals(2174852514, x -= (tmp = -2174852514, tmp));
+  assertEquals(543713128, x >>>= x);
+  assertEquals(2978128878.939105, x += (tmp = 2434415750.939105, tmp));
+  assertEquals(3529591145844655600, x *= (tmp = 1185170719.3753138, tmp));
+  assertEquals(659, x >>>= ((((((x<<(((((-425423078)/(((tmp = 160617689.20550323, tmp)&(-1524740325.5003028))%(tmp = -1869426475, tmp)))<<(((x^(-487449247))>>>(tmp = -1962893666.7754712, tmp))%x))*x)>>((tmp = 623413085, tmp)&(x+(((((-2200726309.083274)-(x-x))+x)&(-1304849509))|((((tmp = -431896184, tmp)>>>(x>>(-1932126133)))<<((1078543321.2196498)*(-10761352)))>>(tmp = -2681391737.5003796, tmp)))))))/x)-(tmp = -1768629117, tmp))/(((((tmp = -2320718566.0664535, tmp)%x)+(-2831503351.995921))>>>(-2695416841.3578796))*(943979723)))<<x)|((652520546.7651662)>>(1045534827.6806792))));
+  assertEquals(531, x &= (tmp = -293707149, tmp));
+  assertEquals(0, x >>= (tmp = -678056747.5701449, tmp));
+  assertEquals(1184651529.8021393, x += (tmp = 1184651529.8021393, tmp));
+  assertEquals(1721719611, x |= (tmp = 1645413178, tmp));
+  assertEquals(-406880257, x |= (tmp = 2268544460, tmp));
+  assertEquals(-4194304, x <<= (tmp = -109701322.43455839, tmp));
+  assertEquals(17592186044416, x *= x);
+  assertEquals(0, x ^= (x&x));
+  assertEquals(0, x <<= (tmp = 1715401127, tmp));
+  assertEquals(-1793087394, x |= (tmp = -1793087394.730585, tmp));
+  assertEquals(-2, x >>= x);
+  assertEquals(263607360.10747814, x += (tmp = 263607362.10747814, tmp));
+  assertEquals(1073214955, x |= (893759979.3631718));
+  assertEquals(703953930, x -= ((2738450011)%(x^(tmp = 679402836, tmp))));
+  assertEquals(1, x >>= (tmp = 2262515165.6670284, tmp));
+  assertEquals(0, x >>= (((tmp = 747896494, tmp)^((tmp = -1005070319, tmp)+x))|x));
+  assertEquals(0, x >>= ((953612771)>>>(tmp = 3066170923.3875694, tmp)));
+  assertEquals(-314941454, x -= (x+(((314941454)%(((tmp = 2200222912.9440064, tmp)>>>(2534128736.805429))>>>(x|((747716234)%(((tmp = -252254528, tmp)%(-1553513480.1875453))&x)))))<<x)));
+  assertEquals(-535686958, x &= (-522809126));
+  assertEquals(0.5480312086215239, x /= (tmp = -977475278, tmp));
+  assertEquals(-1199953459.6090598, x *= ((-2189571393)+((3186862741.37774)>>(tmp = -2193090564.5026345, tmp))));
+  assertEquals(-1199953459.6090598, x %= ((tmp = 2986532440, tmp)*(2685122845)));
+  assertEquals(-1199953459.6090598, x %= (1951182743.7399902));
+  assertEquals(51262285383887820, x *= (-42720228));
+  assertEquals(-424776752, x |= x);
+  assertEquals(166221344210236600, x *= (tmp = -391314598.6158786, tmp));
+  assertEquals(-1883425600, x >>= (((tmp = -1020679296, tmp)^((-1416867718)+(-1412351617)))<<(-2743753169)));
+  assertEquals(0, x &= (x/(-2250026610)));
+  assertEquals(-1111956501, x ^= (tmp = 3183010795, tmp));
+  assertEquals(2012059503, x ^= (tmp = -900369276, tmp));
+  assertEquals(15719214, x >>>= (tmp = -3196277049, tmp));
+  assertEquals(15719214, x |= x);
+  assertEquals(100779035, x -= ((-1245802025)^(-2964289852)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x &= (((x<<((2361941389.708063)%x))>>((328256762.09842086)>>>((((tmp = 3094192285, tmp)-(((x>>(tmp = -2920437464, tmp))<<(tmp = -2693021467, tmp))-(x>>>((2410065554)%(x%(tmp = 2487056196.689908, tmp))))))-(tmp = -866314146, tmp))^((1754098471)-((((((-2450740191)-(tmp = 1977885539.6785035, tmp))*((tmp = -1205431332, tmp)>>>x))>>(-870601854))>>(tmp = -301859264, tmp))|((tmp = -2308971516.8301244, tmp)/x))))))&((2307007357)-((tmp = -1518812934, tmp)+(2562270162)))));
+  assertEquals(0, x <<= x);
+  assertEquals(-1802124619, x |= (-1802124619));
+  assertEquals(-1802124619, x %= ((1617132364.306333)+((1678465962.079633)|((516698570)%(((569813606)*(-1800804098.6270027))%((tmp = 1976706935, tmp)-((tmp = -1830228989.5488424, tmp)>>(((x^((tmp = 1015246068.3791624, tmp)>>x))^((-2171682812.246772)-(tmp = -398330350, tmp)))&x))))))));
+  assertEquals(904564673.6237984, x -= (tmp = -2706689292.6237984, tmp));
+  assertEquals(818237248768128900, x *= x);
+  assertEquals(254842325.2585001, x %= (1550087667.9657679));
+  assertEquals(-1163919360, x <<= x);
+  assertEquals(-3.4644526843674166, x /= ((-446801454)+(x>>>(tmp = -2025151870, tmp))));
+  assertEquals(0, x &= ((((((((-1739617728)&(x&(((tmp = -2946470036.552597, tmp)/x)*x)))^(-1130501404))>>>x)/((1870230831)>>>(840301398)))%x)/x)/(-2927537567)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>>= (x&(x&x)));
+  assertEquals(0, x &= ((-579614044)-(-756012505.4048488)));
+  assertEquals(-2970367642, x -= (tmp = 2970367642, tmp));
+  assertEquals(-415129376, x ^= (tmp = 2847041926.060355, tmp));
+  assertEquals(-1505681312, x &= (tmp = -1225184902.9215767, tmp));
+  assertEquals(-3174471329.5807734, x += (-1668790017.5807734));
+  assertEquals(-Infinity, x /= (x>>x));
+  assertEquals(NaN, x -= x);
+  assertEquals(0, x ^= (x^(((-1407936301.5682082)<<((x^(((tmp = 3213446217.307076, tmp)|x)|((tmp = 3219810777.3171635, tmp)/(tmp = 1561807400, tmp))))>>>((tmp = 2449910203.0949173, tmp)|((((1954662538.7453175)>>(tmp = -1711636239.9916713, tmp))>>>(tmp = 406219731.214718, tmp))<<(((-907908634.4609842)^((((((tmp = 2408712345, tmp)*(tmp = 1740346634.5154347, tmp))>>(tmp = 715783991, tmp))^(tmp = -655628853.2821262, tmp))%(tmp = 2819143280.434571, tmp))/(-1240412852)))*x)))))/x)));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>>= (((-3198075268.8543105)>>(((((x+((tmp = -133461401.50823164, tmp)-((x&(((((tmp = 2617977319, tmp)>>((tmp = -2704719576.8734636, tmp)|((tmp = -977362542.2423751, tmp)<<(x<<(tmp = 3054487697.1441813, tmp)))))>>>((-1635655471)%x))/(-2079513672))%(tmp = 1993563806, tmp)))<<(tmp = -1310524200.6106496, tmp))))%((((-2558804500.7722936)+(tmp = -1641265491, tmp))<<((tmp = -1309608349, tmp)>>>x))/((tmp = -2306644272, tmp)<<x)))*(-2009396162.3063657))+(267343314.3720045))-(-2212612983.661479)))|x));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x *= x);
+  assertEquals(-824822309, x |= (-824822309));
+  assertEquals(-807944741, x |= (((-598067403)*((x&(tmp = 2897778389, tmp))>>>(-1322468310.3699632)))|x));
+  assertEquals(90004223.44097246, x /= (((tmp = -481122620, tmp)&x)%((tmp = 1109368524, tmp)/(((-3150568522.633032)<<(tmp = 2923396776, tmp))^(x-((x/x)&(x/(-287976185.1049104))))))));
+  assertEquals(0.4521931751193329, x /= (tmp = 199039323, tmp));
+  assertEquals(1.8110466604491368e-10, x /= (2496860986.492693));
+  assertEquals(0, x |= x);
+  assertEquals(-1225944576, x += ((tmp = -807700791.631221, tmp)<<((-700782615.4781106)-((((-2954619897)>>>x)<<((tmp = 997657844, tmp)>>>(1227994596)))/((-1234591654.8495834)*((tmp = -191189053.70693636, tmp)+(tmp = -3027659304, tmp)))))));
+  assertEquals(-1225811383, x |= (-1866233271));
+  assertEquals(3069155913, x >>>= (((x/(-99524153.40911508))%(x>>>((((tmp = 2985975640, tmp)/(tmp = 2781516546.2494454, tmp))&(((2234114508)|(((x/(tmp = -1224195047, tmp))<<x)^(x>>>((537884375.5698513)+x))))^((tmp = -2144817497.5089426, tmp)|(-498079183.8178189))))>>>((x+x)&(-3086080103.6460695)))))<<(((tmp = 2151157136, tmp)*x)/(((x/x)>>>(-1149734628.4364533))-((3025445835.654089)+(tmp = 530902725.91127443, tmp))))));
+  assertEquals(-1733702568, x ^= (tmp = 776361489.423534, tmp));
+  assertEquals(8981504, x &= ((tmp = 2902581847, tmp)*(x-(-2697760560))));
+  assertEquals(1153166.8526612986, x -= ((x/(tmp = -1375025594.5027463, tmp))+((3043576689.1538706)%(x+x))));
+  assertEquals(3389855, x |= (x+x));
+  assertEquals(-488458393.17759943, x += (-491848248.17759943));
+  assertEquals(40982867145206920, x *= ((3132857155)|(tmp = -218356553, tmp)));
+  assertEquals(688, x >>= (((((tmp = 403321821, tmp)+((tmp = 2536984658, tmp)%((tmp = 2759309029.8753624, tmp)|(((tmp = 1994203554.7417293, tmp)^((704660500.434877)*(tmp = 1536292958.2691746, tmp)))+(-164139788)))))/((1205950994.1255205)+x))^((((tmp = 975272146.0133443, tmp)-(150107797))/(-1764309514))^((x>>>(x^(x^x)))+(203250124))))>>>(tmp = 1864959239.512323, tmp)));
+  assertEquals(10, x >>= ((tmp = 1631996431.9620514, tmp)>>x));
+  assertEquals(10, x %= (tmp = 2678904916, tmp));
+  assertEquals(335544320, x <<= (tmp = -2759037415.6811256, tmp));
+  assertEquals(-153389967, x |= ((tmp = -2411636565, tmp)+(tmp = -2305156154, tmp)));
+  assertEquals(-1171, x >>= x);
+  assertEquals(813080576, x &= (((tmp = -65428547, tmp)&(tmp = 3163266999, tmp))<<x));
+  assertEquals(4346532303, x += ((tmp = -761515569.0707853, tmp)>>>(((tmp = 143240971.0661509, tmp)<<x)*(x^((tmp = -271697192.8471005, tmp)&x)))));
+  assertEquals(-863299035, x ^= ((((2663001827.1492147)>>>((x/(((tmp = 482665912, tmp)-(x>>(tmp = 354425840.784659, tmp)))>>((-2012932893)>>>x)))/((tmp = -1354385830.6042836, tmp)>>>(-2149023857))))^((tmp = 585746520, tmp)+(tmp = 756104608, tmp)))^(517529841.184085)));
+  assertEquals(-997654012, x &= (((tmp = -404836025.15326166, tmp)+((tmp = 3035650114.0402126, tmp)<<((-1308209196)>>(tmp = 693748480, tmp))))<<(((465774671.4458921)<<x)/(1971108057))));
+  assertEquals(-320581507110848260, x *= ((x-(tmp = -2266777911.7123194, tmp))^(tmp = -2810021113.304348, tmp)));
+  assertEquals(-320581508271196300, x += ((-1195215841.5355926)|(x-((2715907107.4276557)+(((-843426980)>>(x&(x%(tmp = -1139279208.34768, tmp))))^x)))));
+  assertEquals(368031616, x &= x);
+  assertEquals(368031616, x %= (tmp = 1211767328, tmp));
+  assertEquals(-67505614939510744, x *= (tmp = -183423412.56766033, tmp));
+  assertEquals(959424552, x >>= ((tmp = -171120122.5083747, tmp)/x));
+  assertEquals(30949179.096774194, x /= (((x-((((x&(tmp = -180770090, tmp))<<(((tmp = -2061363045.419958, tmp)*((655711531)^((1205768703)-(tmp = 2468523718.8679857, tmp))))+(-2746704581)))+((-853685888)*(tmp = -2299124234, tmp)))|(tmp = 2429502966, tmp)))|(((-985794986.0232368)>>>(2890862426))%x))>>(tmp = 1005542138.8415397, tmp)));
+  assertEquals(30949179, x |= x);
+  assertEquals(30949179, x %= (810126097.6814196));
+  assertEquals(120895, x >>= (tmp = 3065886056.1873975, tmp));
+  assertEquals(1934320, x <<= (1478650660.7445493));
+  assertEquals(0, x >>= (1069658046.2191329));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x %= (x*x));
+  assertEquals(NaN, x *= ((((2148513916)+(tmp = -210070225.85489202, tmp))>>(975470028))+((-3060642402)>>x)));
+  assertEquals(NaN, x *= (2888778384));
+  assertEquals(NaN, x -= (294531300.16350067));
+  assertEquals(-465620423, x ^= (tmp = -465620423.5891335, tmp));
+  assertEquals(1613303808, x &= (-2530649850.1952305));
+  assertEquals(2045458658, x |= (tmp = 432158946.5708574, tmp));
+  assertEquals(0, x >>>= (2277328255.770018));
+  assertEquals(0, x &= (-64904722.41319156));
+  assertEquals(0, x >>= x);
+  assertEquals(3109394857.361766, x += (3109394857.361766));
+  assertEquals(1519021650, x ^= ((tmp = -2632472653, tmp)|(tmp = 2161964921.8225584, tmp)));
+  assertEquals(370854, x >>>= ((1486892931.4564312)-((tmp = 3017755741.9547133, tmp)>>>x)));
+  assertEquals(1333145110.39802, x -= ((-1051580495.39802)-(tmp = 281193761, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x |= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(799202788.1455135, x -= (tmp = -799202788.1455135, tmp));
+  assertEquals(1539080192, x <<= (x%(((((x-x)|(((((x%(959993901))+(tmp = -2647575570.092733, tmp))/(tmp = -2040600976.5104427, tmp))*(x*(tmp = 2785252760, tmp)))>>(-377867259)))/((x&(1549738240.013423))>>>(tmp = -1502185618, tmp)))*x)%(1159283801.0002391))));
+  assertEquals(0, x >>= (-268660225));
+  assertEquals(-0, x /= (-2795206270.635887));
+  assertEquals(0, x >>>= (1869556260.2489955));
+  assertEquals(64202212, x ^= ((((tmp = -942983515.5386059, tmp)*(((1057759788)-x)*(tmp = 2038041858, tmp)))>>x)+(tmp = 64202212, tmp)));
+  assertEquals(2021126977, x -= ((tmp = -2009912898, tmp)^((2240062309)%x)));
+  assertEquals(4332348265459724000, x *= (tmp = 2143530968, tmp));
+  assertEquals(1472, x >>>= ((283380755)<<x));
+  assertEquals(-1672370407872, x *= (tmp = -1136121201, tmp));
+  assertEquals(338573318, x ^= (tmp = 2329579078.4832354, tmp));
+  assertEquals(2377388772.1662374, x -= (tmp = -2038815454.1662374, tmp));
+  assertEquals(-1.264761712403516, x /= ((((tmp = -2106209534, tmp)>>((((((tmp = 626190172, tmp)/x)>>>(-824270996.8545206))/((1258369810.9498723)-(tmp = -2947556209, tmp)))^((((366784589.24711144)|(1462064104.828938))-(1571045395.777879))<<(444685689.60103726)))>>(tmp = -2757110357.410516, tmp)))/(x>>>((tmp = 829226010, tmp)>>>(629512715))))|x));
+  assertEquals(-2905481691.264762, x -= (2905481690));
+  assertEquals(-1710543566.1481905, x -= (-1194938125.1165714));
+  assertEquals(-3421087132.296381, x += x);
+  assertEquals(-884178944, x <<= ((-1820881235)|x));
+  assertEquals(-884178944, x &= (x%(tmp = -2298828530, tmp)));
+  assertEquals(1516503040, x <<= ((tmp = -3039882653, tmp)+((tmp = 1956034508, tmp)<<(x>>(tmp = 280388051, tmp)))));
+  assertEquals(3033006080, x += x);
+  assertEquals(846431222.321887, x %= (x+(-1939718651.1609435)));
+  assertEquals(-846431224, x ^= ((-1742116766.54132)/x));
+  assertEquals(1157918728, x &= (tmp = 1966568030, tmp));
+  assertEquals(1157918728, x >>>= ((((((tmp = -2392096728.184257, tmp)*(x&(-3051259597.301086)))>>>(((tmp = 1712991918.071982, tmp)*(tmp = -714525951, tmp))-((-1784801647)>>((-1270567991)%(((214272558)/(((-3110194570)|(tmp = 2558910020, tmp))&(-1266294955.717899)))*((2654922400.609189)>>>(tmp = 370485018, tmp)))))))*(((tmp = -2621203138.1838865, tmp)%(858913517))*((tmp = -1564229442.2596471, tmp)>>((tmp = 1898557618, tmp)|(-1282356275)))))*(tmp = -1253508468, tmp))+((-361964404.75944185)|x)));
+  assertEquals(961668975, x += (-196249753));
+  assertEquals(1, x >>= (tmp = 890453053, tmp));
+  assertEquals(1, x >>= (((((tmp = 871309275, tmp)/(x>>>((tmp = 2033022083, tmp)&(tmp = -1393761939, tmp))))%((437488665.104565)^(tmp = 2808776860.4572067, tmp)))-((tmp = -359283111.49483967, tmp)<<((tmp = 2985855945, tmp)%(tmp = -596479825.9114966, tmp))))/(-1965528507)));
+  assertEquals(0, x >>= ((tmp = -1753776989, tmp)%(tmp = 322622654, tmp)));
+  assertEquals(84411424, x ^= (((x|(x|(tmp = -1617122265, tmp)))&(tmp = -313813263, tmp))&(1472888112.0258927)));
+  assertEquals(67633184, x &= ((1556833131.0776267)<<(x<<(1501219716.5575724))));
+  assertEquals(68002293, x |= (((tmp = 188984203.0350548, tmp)>>>(tmp = 1356052777, tmp))%(x*(tmp = -2944960865, tmp))));
+  assertEquals(67108864, x &= (((1046644783.9042064)<<x)+((-2796345632)>>>(((-1913290350.3687286)<<(((((tmp = -2223692353, tmp)>>x)&(x<<(x>>((((tmp = -976850020, tmp)%(tmp = 1379692507, tmp))>>>(1120103052.2077985))>>(tmp = 5592070.612784743, tmp)))))<<(x+((tmp = -3154037212.9764376, tmp)%(((x-(-1961060483.6965141))+(((1920670676)-(2852444470.7530622))/(((1445954602)>>((1353665887)>>(tmp = 111411560.64111042, tmp)))<<x)))+x))))<<((-1773130852.6651905)^((1216129132)>>(1511187313.2680469)))))|((tmp = -1107142147, tmp)|(tmp = -768165441.4956136, tmp))))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= (tmp = -1655707538.0778136, tmp));
+  assertEquals(-184120712930843900, x += (x+((tmp = -3174410166, tmp)+((tmp = -301807453, tmp)*(tmp = 610060182.1666535, tmp)))));
+  assertEquals(-54598560, x >>= (-1365351357));
+  assertEquals(-6763.94449950446, x /= (((-1953016847)<<((673287269.7002038)%(-558739761)))>>>(tmp = 1607754129, tmp)));
+  assertEquals(-1, x >>= x);
+  assertEquals(1, x >>>= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>= ((-384747983)+((((tmp = -949058352.381772, tmp)>>>(-1920744986))-(-882729639))^((x^((tmp = 2351364046, tmp)<<(((tmp = -3110165747, tmp)^(-1266489735))-((tmp = -371614326, tmp)>>((tmp = -2064968414, tmp)&(-2075036504.617934))))))&(((-2616501739)&(tmp = 2591437335.4029164, tmp))>>x)))));
+  assertEquals(0, x >>>= ((tmp = 2946468282, tmp)&((-2741453019)>>x)));
+  assertEquals(0, x -= ((x%(-134700915))&(-1955768279)));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x /= (x^(((((((tmp = 3185669685.772061, tmp)>>(tmp = -1973500738, tmp))-(tmp = -87401348.93002152, tmp))>>(tmp = -2813508730, tmp))&(tmp = -778957225, tmp))<<(x-(x&((-2821756608)+(((((tmp = 2475456548, tmp)/(tmp = 997998362, tmp))<<((tmp = -83043634, tmp)|x))%(636120329))%(tmp = -1910213427.7556462, tmp))))))%x)));
+  assertEquals(0, x &= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>>= (x%x));
+  assertEquals(0, x %= (745221113));
+  assertEquals(0, x >>>= ((1467615554.7672596)|x));
+  assertEquals(0, x /= (tmp = 735317995, tmp));
+  assertEquals(-1513001460, x |= (2781965836));
+  assertEquals(-1513001460, x |= (x%(1970577124.3780568)));
+  assertEquals(-0, x %= x);
+  assertEquals(1864972269, x ^= (-2429995027.840316));
+  assertEquals(1226843341, x &= (tmp = -639621923.5135081, tmp));
+  assertEquals(1226843339.3171186, x += ((1297620268.272113)/(-771070549)));
+  assertEquals(76677708, x >>>= (1009134980));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(716040787, x |= ((1851586229)-(1135545441.3502865)));
+  assertEquals(1385693184, x <<= x);
+  assertEquals(1321, x >>= (x^((tmp = -1576632297.0860603, tmp)>>>(405218605))));
+  assertEquals(-1319012931, x |= (-1319014243));
+  assertEquals(-1319012931, x >>= ((((1689898279.3580785)<<((((x^(x>>>((((tmp = 2635260332, tmp)*(tmp = 2053357650, tmp))*x)*(2856480122.339903))))>>x)&(-2382703000.077593))%(1183918594)))*(tmp = -1670081449, tmp))<<x));
+  assertEquals(-528327581.7646315, x %= (tmp = -790685349.2353685, tmp));
+  assertEquals(2073431790, x ^= (tmp = 2601800333, tmp));
+  assertEquals(-6514722684180, x -= (((tmp = 824141806.0668694, tmp)>>>(((-1865885282.8723454)&(x&(x|((900188006.3757659)>>>(x&x)))))+(2227126244.0526423)))*x));
+  assertEquals(1450593, x >>>= ((2157053647)>>(x+(-2934071355.418474))));
+  assertEquals(576782336, x <<= ((1054640368.827202)&((tmp = -3182236876.434615, tmp)>>(tmp = 2129856634.0328193, tmp))));
+  assertEquals(2950754326, x -= (tmp = -2373971990, tmp));
+  assertEquals(738197504, x <<= (1188157369.5988827));
+  assertEquals(0, x <<= (x+((tmp = -839533141, tmp)&((((((tmp = -1148768474.7306862, tmp)|(172650299))+(tmp = -2739838654, tmp))/(3132557129))%x)>>>(tmp = -1229961746.2466633, tmp)))));
+  assertEquals(0, x %= (tmp = -2974207636, tmp));
+  assertEquals(0, x %= ((2323482163)>>>x));
+  assertEquals(0, x &= (((x/(x+(x>>((tmp = 55935149, tmp)%x))))|((3109182235)>>>(tmp = 1217127738.8831062, tmp)))+((((tmp = -385114910, tmp)*((((((tmp = -2535158574.634239, tmp)&(x+x))<<(-2821692922.43476))&(-776804130.9457026))>>((-1374832535)^(tmp = 2175402162.701251, tmp)))%(-1646995095)))-(x*(tmp = -921556123, tmp)))^(79224621))));
+  assertEquals(128935435, x |= ((tmp = 2279459038, tmp)%(tmp = -537630900.5271742, tmp)));
+  assertEquals(128935435, x /= ((((((x<<(2750024311))-((-1332480769.4784315)&(1418160003)))&(1551783357))<<(((((-2870460218.55027)|((-1958752193.7746758)&(2551525625)))>>>((((tmp = -1698256471, tmp)^(((((((((tmp = -830799466, tmp)+x)-(-111590590))+(tmp = -1105568112.3921182, tmp))/((tmp = -3058577907, tmp)|(((-1944923240.2965696)%(-2884545285))<<(tmp = -1993196044.1645615, tmp))))^(x>>(tmp = -2961488181.3795304, tmp)))&x)*x)|(((tmp = 97259132.88922262, tmp)<<((1601451019.343733)&x))*(x|x))))+((((x>>x)<<x)+(-868409202.2512136))/(((tmp = -2893170791, tmp)-((x|(-853641616))%(((tmp = 549313922, tmp)&(-768036601.6759064))%(tmp = -543862220.9338839, tmp))))-((tmp = 1639851636, tmp)+((2164412959)/(-273028039.941242))))))>>>((((-2382311775.753495)^(-2062191030.2406163))>>>(tmp = -1054563031, tmp))/(-862111938.7009578))))%x)+(-3103170117.625942)))%((tmp = -1144062234, tmp)>>x))>>>(tmp = 1216332814.00042, tmp)));
+  assertEquals(41.631074722901715, x /= (x&(-2542806180.962227)));
+  assertEquals(41.631074722901715, x %= (-14003386.556780577));
+  assertEquals(8, x &= (x&((-2231622948)%(tmp = 488279963.9445952, tmp))));
+  assertEquals(9.002961614252625e-9, x /= ((53802728.56204891)<<(((867697152.3709695)-(538719895.5707034))&(-631307825.4491808))));
+  assertEquals(0, x >>= x);
+  assertEquals(-0, x *= (tmp = -785674989, tmp));
+  assertEquals(-0, x += x);
+  assertEquals(0, x /= (-250703244));
+  assertEquals(0, x <<= ((tmp = -661062581.5511999, tmp)|x));
+  assertEquals(0, x &= (-1299482308));
+  assertEquals(0, x &= ((-399690060)>>>(2448074202.385213)));
+  assertEquals(0, x &= (2574341201));
+  assertEquals(0, x <<= ((x|(((tmp = 2458873162.645012, tmp)+(tmp = -1999705422.8188977, tmp))<<((x^(tmp = -392530472, tmp))>>>x)))&(((tmp = 2463000826.7781224, tmp)|(tmp = 3020656037, tmp))-x)));
+  assertEquals(1397603760, x += ((tmp = -1359413071, tmp)-(tmp = -2757016831, tmp)));
+  assertEquals(513823851, x -= (883779909));
+  assertEquals(-1765712747, x ^= (2288060670.6797976));
+  assertEquals(3117741504918286000, x *= x);
+  assertEquals(3117741506284045300, x += (1365759456));
+  assertEquals(6035555595.597267, x /= (tmp = 516562470, tmp));
+  assertEquals(104203275, x &= (tmp = 376835755.32434213, tmp));
+  assertEquals(10858322520725624, x *= x);
+  assertEquals(59458951, x >>>= (153765028));
+  assertEquals(49370856, x += ((tmp = -1291276092, tmp)>>x));
+  assertEquals(0, x %= x);
+  assertEquals(0, x += x);
+  assertEquals(-1494589645, x -= (1494589645));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x <<= (x&((2730708043.467806)<<x)));
+  assertEquals(0, x /= ((tmp = -1483912394.153527, tmp)>>>((tmp = 1800568769, tmp)^((((((tmp = 1351568510, tmp)>>(tmp = -1337992543.2562337, tmp))>>>(tmp = 2602239360.40513, tmp))*x)%x)+(-2095840128.0700707)))));
+  assertEquals(-0, x /= ((2363946613)^(tmp = -2227868069, tmp)));
+  assertEquals(0, x &= ((((2634933507)<<(2798775374.140882))>>>x)>>>(((tmp = 1135200853.6396222, tmp)-(tmp = -1529829490.7007523, tmp))-(((((((((x^((x|(2135742668.591568))-(924230444.8390535)))%(tmp = -2459525610.51898, tmp))+(x&((tmp = 1177231743.809653, tmp)/(tmp = 1743270357.2735395, tmp))))|(((tmp = -1894305017, tmp)^((tmp = 1791704240, tmp)&x))%(-1569751461)))>>>(tmp = -2078321944, tmp))|x)*(((x*(tmp = -163239354, tmp))<<((tmp = 2859087562.694203, tmp)&(-657988325.9410558)))^(2508013840)))-((-243572350)+(x%((-1095206140)+((tmp = 3213566608.942816, tmp)*((2256442613)%((tmp = 1723751298, tmp)^(x-((-1145710681.2693722)|x)))))))))+(1556870627)))));
+  assertEquals(130883024.97423434, x -= (-130883024.97423434));
+  assertEquals(0.046720352789736276, x /= (tmp = 2801413456, tmp));
+  assertEquals(1806558189, x |= (tmp = 1806558189.157823, tmp));
+  assertEquals(72.40475060062144, x /= (x%((1932591076.531628)>>(1982030182))));
+  assertEquals(-1077558321.5975945, x += (tmp = -1077558394.002345, tmp));
+  assertEquals(98187, x >>>= x);
+  assertEquals(97792, x &= (tmp = -1032487404, tmp));
+  assertEquals(709197609, x |= (x^(709179177)));
+  assertEquals(11081212, x >>>= (tmp = 1412940006.169063, tmp));
+  assertEquals(11081212, x &= x);
+  assertEquals(-1920311203, x -= ((tmp = 1931392415, tmp)<<((x%(tmp = -2873576383, tmp))%x)));
+  assertEquals(-1920311203, x |= (x&(-993884718.2172024)));
+  assertEquals(-4, x >>= (1409411613.0051966));
+  assertEquals(-7947632484, x *= ((-2856731734)^((-1181032235.9132767)-((tmp = 780101930, tmp)+((tmp = -1732707132.6253016, tmp)^x)))));
+  assertEquals(-2016362769, x ^= (tmp = 2711125619.2455907, tmp));
+  assertEquals(-61535, x >>= x);
+  assertEquals(-124771649, x ^= (tmp = 124726558, tmp));
+  assertEquals(-1, x >>= x);
+  assertEquals(-0, x %= (x*x));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x /= (2444628112));
+  assertEquals(0, x <<= ((-38968517.72504854)<<x));
+  assertEquals(-1504619917, x |= (tmp = 2790347379, tmp));
+  assertEquals(-1504619917, x &= x);
+  assertEquals(2790347379, x >>>= ((1825218368)<<(-1843582593.2843356)));
+  assertEquals(7786038495492170000, x *= x);
+  assertEquals(-11011696, x |= (((tmp = 2931644407.4936504, tmp)-(3077095016.001658))%(tmp = -1731851949, tmp)));
+  assertEquals(-107866, x %= ((-697845074.1661191)>>(772708134)));
+  assertEquals(356779149, x ^= (-356884949.503757));
+  assertEquals(0, x %= x);
+  assertEquals(0, x *= ((tmp = 1542291783, tmp)^x));
+  assertEquals(0, x += ((tmp = 1105314644.002441, tmp)&x));
+  assertEquals(-1005882993, x ^= (-1005882993.0899806));
+  assertEquals(-1301065066, x += (tmp = -295182073, tmp));
+  assertEquals(-1454702592, x <<= ((-2440858737.390277)&(-1363565201.7888322)));
+  assertEquals(-201539012492525570, x *= ((((tmp = -1416268089, tmp)|x)-(tmp = 1669129769, tmp))&(x<<((x/(-2614041678.7423654))%x))));
+  assertEquals(-2.1995276811535986e+25, x *= (x/(-1846667987.154371)));
+  assertEquals(0, x |= ((x*(((x>>>((tmp = 1044173034, tmp)>>>((x<<((tmp = -2906412863, tmp)%((tmp = -437401503, tmp)<<(((((x|(2167319070))<<((tmp = 2766179640.1840167, tmp)&(-2372076054)))*(tmp = -241617431.06416297, tmp))*((((((tmp = 2570465382.5574293, tmp)>>>(x/((-2851324509.354545)%x)))>>(((x+((tmp = -614687945, tmp)^x))^((((tmp = 1653437743, tmp)>>x)/(tmp = 3072995069, tmp))>>x))*(((((-290508242)>>((tmp = 2969511554, tmp)<<(tmp = 158176292.95642304, tmp)))<<(32376015))+(tmp = 2391895870.4562025, tmp))*x)))&((((x/(tmp = 365292078.53605413, tmp))>>x)/(1167322811.0008812))|(((tmp = 2487970377.365221, tmp)^x)<<((tmp = 2342607988.711308, tmp)/(((2276081555.340126)-(((tmp = -2571071930, tmp)>>(tmp = -248468735.76550984, tmp))>>>(tmp = -2862254985.608489, tmp)))^(-1312017395))))))<<x)&(2762717852.949236)))+((((-2492896493)&x)<<(-2756272781.4642315))/x)))))*(2405395452))))>>((-1433975206)/((tmp = -2064757738.6740267, tmp)<<((((tmp = -1563531255, tmp)-(-589277532.2110934))<<x)^(2249328237.0923448)))))-x))-(-225624231)));
+  assertEquals(0, x *= (tmp = 1657982666.2188392, tmp));
+  assertEquals(86443387, x |= (tmp = 86443387.25165462, tmp));
+  assertEquals(86443387, x %= (-1341731981.702294));
+  assertEquals(172886774, x <<= ((-1799840391)&(1011948481.310498)));
+  assertEquals(-1115684864, x <<= x);
+  assertEquals(-2098253702059525600, x *= (1880686715.1865616));
+  assertEquals(-2098253700213206300, x -= (tmp = -1846319435.0583687, tmp));
+  assertEquals(570692096, x &= (((tmp = -1572055366.64332, tmp)%(tmp = 1720120910, tmp))%((x-(912386952.5959761))*(tmp = -1146251719.4027123, tmp))));
+  assertEquals(603979776, x <<= ((-329752233.8144052)&(tmp = -368636559, tmp)));
+  assertEquals(603979776, x <<= x);
+  assertEquals(364791569817010200, x *= x);
+  assertEquals(0, x &= ((2074587775.983799)/(tmp = 438856632.76449287, tmp)));
+  assertEquals(0, x &= (((1509671758)*(tmp = -935801537.7325008, tmp))>>>(((tmp = -1752877566, tmp)<<x)%(tmp = -517163766, tmp))));
+  assertEquals(-2031730599, x ^= ((2264285273)&(tmp = -1762662949.014101, tmp)));
+  assertEquals(-843578945, x %= (-1188151654));
+  assertEquals(-2147483648, x <<= x);
+  assertEquals(-2147483648, x >>= (tmp = -3165079200.229641, tmp));
+  assertEquals(-44086313.1323726, x %= ((x%(-254466243.48728585))-((x>>(-457411829.1063688))-((-2606923436.9333453)/x))));
+  assertEquals(-44086313, x |= x);
+  assertEquals(1037812, x >>>= ((tmp = 342497258.9786743, tmp)+(1652928385.8150895)));
+  assertEquals(-2371695599678100, x *= (tmp = -2285284425, tmp));
+  assertEquals(-2371697387004653, x += (tmp = -1787326553.0542095, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= ((x^(tmp = 544039787, tmp))>>>x));
+  assertEquals(0, x &= ((x%(((((((tmp = -424572417.1088555, tmp)|(-2381863189))/(tmp = -2007482475.1809125, tmp))&(((((tmp = 311016073, tmp)>>(tmp = -1548839845, tmp))+((-2557740399.7947464)<<(2399113209)))&x)>>>x))%(-297180308.7721617))-(tmp = 860906293, tmp))^x))%(-2740622304)));
+  assertEquals(4971841192462909000, x += ((tmp = -2723203837.572612, tmp)+((((-2909100706)+(-951999374))|(-3116735764))*(3087123539.422669))));
+  assertEquals(-460, x >>= (1081807537.557404));
+  assertEquals(2354165127.3906384, x += (tmp = 2354165587.3906384, tmp));
+  assertEquals(357.8680960002211, x /= ((((x<<(((x&x)+(1113841407))|((x/(tmp = 384533564, tmp))>>>(-605853882))))%x)&((tmp = 2050375842, tmp)>>>x))>>(((2745147573)^x)<<(x-(900043292)))));
+  assertEquals(0, x *= (x>>>(-295974954.5058532)));
+  assertEquals(0, x *= ((-2448592125.815531)*(tmp = -94957474.8986013, tmp)));
+  assertEquals(0, x &= ((x>>x)^(tmp = -1335129180, tmp)));
+  assertEquals(395092065, x |= ((3081659156)^(tmp = -1608334475, tmp)));
+  assertEquals(395092065, x &= x);
+  assertEquals(-413337639, x += (x^(tmp = -664996071.3641524, tmp)));
+  assertEquals(-1604423637896759800, x *= (x>>>(tmp = 1242912352.955432, tmp)));
+  assertEquals(0, x &= ((((((tmp = 651293313, tmp)|(((2541604468.635497)>>>(tmp = 758815817.7145422, tmp))>>>((-1948795647)/x)))&x)/((tmp = -3161497100, tmp)+(782910972.3648237)))>>>x)%(834206255.5560443)));
+  assertEquals(0, x >>>= (tmp = 125945571, tmp));
+  assertEquals(NaN, x -= (x%x));
+  assertEquals(NaN, x %= (tmp = 282259853, tmp));
+  assertEquals(NaN, x += (tmp = -2081332383, tmp));
+  assertEquals(0, x >>>= (((x>>(-2298589097.7522116))|((((x>>>(x-(tmp = 755218194, tmp)))|x)%x)-(tmp = 2206031927, tmp)))>>>((((x&(x-x))^(tmp = 2836686653, tmp))*((x<<(tmp = -1624140906.4099245, tmp))>>>((2942895486)|((x>>>x)>>>(-1586571476)))))|((781668993)+(-1857786909)))));
+  assertEquals(0, x &= (tmp = -708084218.9248881, tmp));
+  assertEquals(0, x %= (1645913394.5625715));
+  assertEquals(0, x <<= ((x^((tmp = 1185413900, tmp)*((-2441179733.997965)*(tmp = 2554099020.066989, tmp))))%((1704286567.29923)/x)));
+  assertEquals(0, x += x);
+  assertEquals(0, x *= x);
+  assertEquals(0, x |= (x>>>(139138112.141927)));
+  assertEquals(0, x >>>= (tmp = 2142326564, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(-0, x /= ((((x+(2817799428))|x)%((1050079768)-(x>>>((1452893834.8981247)|((((tmp = -1737187310.889149, tmp)/(tmp = -362842139, tmp))%(1234225406))%(((x|x)*((-1055695643.739629)-((x-x)*(945954197.676585))))-(tmp = 786185315.346615, tmp)))))))<<(-173891691)));
+  assertEquals(0, x &= (-2842855092.319309));
+  assertEquals(0, x &= ((-3188403836.570895)/x));
+  assertEquals(0, x *= (x+x));
+  assertEquals(NaN, x /= (x>>>(((tmp = 391037497.68871593, tmp)/((192754032)*(1382659402.5745282)))/((((-2187364928)>>>x)>>(tmp = 2563448665.7594023, tmp))^(tmp = 1500866009.7632217, tmp)))));
+  assertEquals(NaN, x /= ((tmp = -935036555.2500343, tmp)-(x/(((x&(x^(tmp = -3001352832.5034075, tmp)))^x)/((1122547613)>>x)))));
+  assertEquals(0, x >>= (tmp = -2951766379.0809536, tmp));
+  assertEquals(-632945188, x ^= (-632945188.7188203));
+  assertEquals(-632945188, x %= ((((((tmp = -3181527314.82724, tmp)&(2280175415))>>(x^(x|x)))^(tmp = -524233678.52970886, tmp))*x)|((tmp = 1782882786, tmp)>>>(tmp = -592607219, tmp))));
+  assertEquals(404189184, x <<= ((tmp = -2761472127, tmp)^(36616299.88780403)));
+  assertEquals(872651572, x ^= (tmp = 739568436.6252247, tmp));
+  assertEquals(13, x >>>= ((tmp = -1033843418.865577, tmp)%(x%(1247263629.0445533))));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>= (3189175317));
+  assertEquals(0, x &= (((2391973519.6142406)^((-2950058736.191456)|(x*x)))>>(tmp = 343822384.294345, tmp)));
+  assertEquals(0, x >>>= (tmp = -2306246544, tmp));
+  assertEquals(-1572339598, x ^= ((tmp = 2991380083.337327, tmp)&(tmp = -1361507970, tmp)));
+  assertEquals(649, x >>>= ((1961407923.4950056)>>(x-(-872821523.7513013))));
+  assertEquals(649, x ^= (((x&(tmp = -702931788, tmp))^(((x>>x)|(((tmp = 2710759269, tmp)/(x>>(x*((((((tmp = -2428445134.9555864, tmp)+(-1859938743))%(x<<x))*((236868604)+((tmp = -3066688385, tmp)/(787503572.8839133))))/(tmp = 3215629315, tmp))>>(-1315823020)))))%(1461368627.1293125)))>>>(tmp = -2921804417.5735087, tmp)))/(x>>>(((tmp = 2175260691.824617, tmp)/((-582958935.7628009)-((((((x>>x)|(2590503723.4810824))^(tmp = -1994324549, tmp))-(-684683327))/(tmp = -3133419531, tmp))|(tmp = -328974092.05095506, tmp))))>>(-447624639.4518213)))));
+  assertEquals(649, x %= ((((1854382717)|(((x+(tmp = 2568081234, tmp))-x)+((tmp = 1043086140, tmp)<<((tmp = 2979118595.0496006, tmp)+((x&(2669577199.852803))/(-2567808445.101112))))))<<((((tmp = -1471092047, tmp)&((-3099138855.21041)-((tmp = -798574377.526715, tmp)&((2255586141)<<(-1069867774)))))>>>(((x*(tmp = -2810255707.781517, tmp))/x)*(2706435744.054121)))^(394262253)))^((844325548.0612085)/(tmp = 1434691648, tmp))));
+  assertEquals(823215943.1924392, x += (tmp = 823215294.1924392, tmp));
+  assertEquals(536872706, x &= ((-334612686)%((1303605874)|x)));
+  assertEquals(-30666374.413486242, x += ((tmp = -567539080.4134862, tmp)%(tmp = -1655555936.3195171, tmp)));
+  assertEquals(-56438727096752984, x *= (tmp = 1840410814, tmp));
+  assertEquals(-33200107.984488487, x %= (((tmp = 3007206509, tmp)-(3079337725.6659536))%(1819565202.5011497)));
+  assertEquals(-1214493182, x ^= (-3060193769));
+  assertEquals(-1214493179.1335113, x -= ((-3218099496.595745)/(1122662554)));
+  assertEquals(-1214493179, x >>= ((-375364195)<<(((tmp = 619439637.8754326, tmp)>>(-1830023279.9486575))&(tmp = -1106180387.2448823, tmp))));
+  assertEquals(-303623295, x >>= (-2109241374.3349872));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x |= x);
+  assertEquals(1917126206, x -= (-1917126206));
+  assertEquals(2659779928, x -= (tmp = -742653722, tmp));
+  assertEquals(-1635187368, x >>= ((tmp = -674385169, tmp)*((9848362.783326745)|(x*(55220544.00989556)))));
+  assertEquals(-1981113695, x ^= ((tmp = 392404985, tmp)>>(((x<<((2006207061)<<(tmp = 2558988218, tmp)))*((((tmp = 1789304307.1153054, tmp)/(2538061546))<<(tmp = 556026116, tmp))&((tmp = 1076457999.6424632, tmp)*(tmp = -1822378633.2489474, tmp))))%(((((-1117046924)&((-69013651)%(x&(((-2320327696)/(x&x))-(tmp = 2458222544, tmp)))))>>((-3092360983.0037227)/(-3171415636)))*(((tmp = 2520431213, tmp)<<(1066492762.6149663))+((tmp = 1272200889, tmp)^((1687693123.2295754)+x))))-(-1096823395)))));
+  assertEquals(-990556848, x >>= x);
+  assertEquals(981202869119695100, x *= x);
+  assertEquals(981202869119695100, x -= (x/x));
+  assertEquals(0, x ^= (x>>x));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x *= ((((2980512718)>>>x)<<((x^(-1111233869))>>((2531466092.6036797)>>>(((tmp = -1791229364, tmp)*(-2210950307.206208))%((tmp = -806645443, tmp)<<((((((((tmp = 112334634.26187229, tmp)%(x|((((2154021796.1166573)+x)&((-1047293079.9686966)^(tmp = -1894127139, tmp)))+(tmp = 1910946653.2314827, tmp))))^(293142672.5016146))-x)<<(-1593533039.8718698))+x)>>(x<<(((46359706.50393462)&(tmp = 272146661, tmp))|(tmp = 2117690168, tmp))))%(tmp = -1784737092.4924843, tmp)))))))-(1465796246)));
+  assertEquals(0, x &= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= (x+(-1612418456)));
+  assertEquals(0, x &= ((tmp = -843964311, tmp)/x));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x *= x);
+  assertEquals(NaN, x += (x>>>(54020240)));
+  assertEquals(489206868, x |= (489206868));
+  assertEquals(489206868, x &= x);
+  assertEquals(489206848, x &= ((tmp = -1699133906.2361684, tmp)>>(tmp = 2658633814, tmp)));
+  assertEquals(489206848, x |= x);
+  assertEquals(1910559006, x -= (tmp = -1421352158, tmp));
+  assertEquals(1, x >>= x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= (x^(tmp = 2745376003.2927403, tmp)));
+  assertEquals(0, x %= (((tmp = 3199743302.1063356, tmp)^((-1905944176)&(x>>>(187247029.5209098))))<<((x*((-1394648387)*(1252234289)))-(3140049815))));
+  assertEquals(0, x <<= (-2567872355));
+  assertEquals(0, x %= (tmp = 1057707555.8604916, tmp));
+  assertEquals(0, x %= ((tmp = -1877857405.0228279, tmp)>>>(((tmp = 423831184, tmp)*((tmp = -2106757468.324615, tmp)%(tmp = -1197717524.6540637, tmp)))>>(tmp = -93746263.46774769, tmp))));
+  assertEquals(0, x |= x);
+  assertEquals(-0, x *= ((tmp = 1317609776.6323466, tmp)*(tmp = -26959885.89325118, tmp)));
+  assertEquals(0, x >>= (-1288116122.0091262));
+  assertEquals(0, x &= ((370818172.92511404)%((tmp = -528319853.54781747, tmp)*(x/((tmp = -2839758076, tmp)^(x+(((-1258213460.041857)<<(tmp = 302017800.72064054, tmp))|((((tmp = -624254210, tmp)^((-338165065.97507)|((623392964)-x)))>>>x)%(tmp = 2767629843.0643625, tmp)))))))));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x |= ((-2001549164.1988192)*x));
+  assertEquals(0, x -= x);
+  assertEquals(0, x *= (((((165836842.14390492)*(tmp = -3220002961, tmp))|(-2840620221.747431))%((x/(tmp = 3153915610, tmp))>>>(tmp = 2018941558, tmp)))>>>x));
+  assertEquals(-0, x *= (-231994402.93764925));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x %= (tmp = 2702385056.1149964, tmp));
+  assertEquals(0, x <<= (tmp = 378459323, tmp));
+  assertEquals(0, x >>>= ((x&(x&(((-1014963013)<<(x&((tmp = -3110294840, tmp)|(x+(x<<(1129643420))))))+(1093795819.1853619))))+((((tmp = -2295103369.697398, tmp)&(((370501313.43019223)>>>(2465439579))/x))-x)>>x)));
+  assertEquals(0, x /= ((tmp = 1779625847, tmp)+(tmp = -662459654.6908865, tmp)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= ((tmp = 2723291421, tmp)|(277246502.4027958)));
+  assertEquals(0, x ^= (((-2936270162)>>>((((tmp = -2019015609.1648235, tmp)|(47218153))*(-823685284))+x))&(x<<(x*(x|(((tmp = -941955398, tmp)^(tmp = -2365238993.5300865, tmp))-(778674685)))))));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= (-175235975.8858137));
+  assertEquals(-2684493800.1062117, x += (tmp = -2684493800.1062117, tmp));
+  assertEquals(-1290806265.6063132, x -= (-1393687534.4998984));
+  assertEquals(-1290806265, x >>= (((x>>(tmp = -1710112056.4935386, tmp))*(586227650.2860553))<<(tmp = -2918251533.6052856, tmp)));
+  assertEquals(23470008, x >>>= x);
+  assertEquals(1668734969, x |= ((-295560682.9663689)^(x|((((tmp = -1183847364, tmp)&(3135327694))+(1679127747.1406744))-((-1895825528)%((tmp = -3180115006, tmp)+((tmp = 2373812187, tmp)|x)))))));
+  assertEquals(1744306169, x |= (1188503928.5009093));
+  assertEquals(1744306169, x %= (tmp = -2723982401.4997177, tmp));
+  assertEquals(3488612338, x += x);
+  assertEquals(3488612337, x += (((x/(-325849204))>>x)|(-1820624550.9149108)));
+  assertEquals(-1511119305, x ^= (tmp = 1778506182.2952862, tmp));
+  assertEquals(-12211415, x %= (x^(tmp = -54943035, tmp)));
+  assertEquals(-12211415, x %= ((-1267051884)%(-643566443.0122576)));
+  assertEquals(-30.84976063258681, x /= (((1052047194)>>>x)&(1495698235.5117269)));
+  assertEquals(-61.69952126517362, x += x);
+  assertEquals(-244, x <<= (x^(x+(tmp = -2822258210.076373, tmp))));
+  assertEquals(-6652, x &= ((tmp = 2593685093, tmp)>>((((2047688852.4609032)<<((x*(-611076291))*x))^(-2665364024.817528))>>>(165267874))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x /= (2454186758));
+  assertEquals(0, x &= (tmp = -2226895206, tmp));
+  assertEquals(0, x += x);
+  assertEquals(-21390701, x += ((-1369004846.0816503)>>(tmp = -2661552634.039692, tmp)));
+  assertEquals(-0.012568536912921919, x /= (1701924507.856429));
+  assertEquals(7.09517966608176e-11, x /= (tmp = -177141911.8955555, tmp));
+  assertEquals(0, x >>= (tmp = 231535697, tmp));
+  assertEquals(1383687797, x ^= (tmp = -2911279499.568808, tmp));
+  assertEquals(1383687797, x %= (tmp = -2258636646.5294995, tmp));
+  assertEquals(1319, x >>= ((tmp = -2549411892.8426056, tmp)/(((((1532476676)^(153720871.82640445))+x)/(((2988190456.3206205)&(tmp = -2920873674, tmp))-(((((tmp = -1044518167.0581458, tmp)>>x)-((((tmp = -194701879.13505793, tmp)&(498352051))&((tmp = -2167339635.6529818, tmp)^(((x>>(tmp = 700159851, tmp))*(tmp = 2874921158, tmp))/x)))-((2856128689)|((-1876321441)>>>(2110732915)))))^((((tmp = -193379494.18825436, tmp)/(-3055182489.533142))<<x)+((tmp = -2286109605, tmp)>>(tmp = 698475484.3987849, tmp))))^(3182231653.500364))))|(((tmp = -194670835, tmp)>>>((786780139)%(((2114171416.2305853)^(1703145352.8143656))/x)))>>>((tmp = -3029462067, tmp)>>((67647572.02624655)&(x*(-2394283060))))))));
+  assertEquals(13903855, x |= ((tmp = -2515306586, tmp)>>>x));
+  assertEquals(54311, x >>>= ((-2413722658)-((tmp = -2159787584, tmp)^(tmp = 949937622.9744623, tmp))));
+  assertEquals(108622, x += x);
+  assertEquals(1250717187, x ^= ((tmp = 842692148, tmp)+(((2649331689.694273)<<x)-(tmp = -2992181273, tmp))));
+  assertEquals(4536777, x %= (tmp = 73304730, tmp));
+  assertEquals(0, x -= x);
+  assertEquals(-580081499, x ^= ((tmp = -580081499.0170684, tmp)^(x%(tmp = -1542730817.88261, tmp))));
+  assertEquals(-1382738784, x <<= x);
+  assertEquals(-1382738784, x <<= x);
+  assertEquals(2912228512, x >>>= (x*(x>>>x)));
+  assertEquals(-1076374105, x |= (2589443367));
+  assertEquals(-0.2818750938197037, x /= (((tmp = -1559525732.9603848, tmp)|(-477068917.5483327))>>>((-688616257)*((((tmp = -1192490153.1226473, tmp)*(-502280624.0265591))<<(-442688727.4881985))%(x+(((((tmp = -2948836853.831935, tmp)-(tmp = -2850398330.910424, tmp))>>>(x>>>(-1947835558)))^x)+(x*x)))))));
+  assertEquals(2032826546, x |= (tmp = 2032826546.819327, tmp));
+  assertEquals(3408404827.14316, x += (tmp = 1375578281.1431599, tmp));
+  assertEquals(258183922.14315987, x %= (tmp = 350024545, tmp));
+  assertEquals(479694848, x <<= (tmp = -481187157, tmp));
+  assertEquals(-2147483648, x <<= (((tmp = -2956588045.472398, tmp)>>>(((tmp = -1838455399.1775856, tmp)&(((((tmp = -637547, tmp)/x)&(x^((-44876328.1767962)+(((-2059598286)-(1071496688))%(tmp = -1492254402, tmp)))))-(x%x))*(x|x)))>>(1226250760)))<<x));
+  assertEquals(-2288163338.9020815, x -= (140679690.9020816));
+  assertEquals(4954833118513997000, x *= (-2165419327.4906025));
+  assertEquals(1578331238, x ^= (-2410854298.2270393));
+  assertEquals(-810627292, x += (-2388958530));
+  assertEquals(-810627292, x ^= ((1495296640.4087524)/(tmp = 1561790291, tmp)));
+  assertEquals(657116606535253200, x *= x);
+  assertEquals(0.675840332689047, x %= (((-1816548473)^(((tmp = -151918689.19451094, tmp)|(1819911186.535233))/((((((1514297447)+(tmp = 856485190.9684253, tmp))&(((1809369464.4363992)<<(493538496))*x))+((x*(x>>(x&(tmp = 222293461, tmp))))>>>(((784519621)|x)^((-580766922)>>(tmp = -947264116, tmp)))))>>>((((2794210354.22964)>>>(((2896952532.0183973)*((x+(tmp = -1813175940, tmp))<<(tmp = -1302618293, tmp)))&x))>>(x-(((x|((1456466890.1952953)*x))^(-169979758.19158387))-(x-x))))>>x))&(tmp = 2671604078.3026733, tmp))))/(-1701675745)));
+  assertEquals(0.675840332689047, x %= ((tmp = 2421871143, tmp)^x));
+  assertEquals(NaN, x %= ((((tmp = 1175526323.433271, tmp)+(tmp = 2813009575.952405, tmp))%((tmp = -3112133516.3303423, tmp)&x))&((((((-424329392)^(tmp = 1430146361, tmp))+x)-(1533557337.268306))%((tmp = -3117619446, tmp)-(-3127129232)))>>>x)));
+  assertEquals(NaN, x += x);
+  assertEquals(0, x >>>= ((1710641057.7325037)%(104961723.56541145)));
+  assertEquals(0, x <<= (tmp = -970072906, tmp));
+  assertEquals(0, x *= (87768668));
+  assertEquals(-1464968122, x ^= (tmp = -1464968122, tmp));
+  assertEquals(-1467983895, x ^= ((tmp = -1204896021, tmp)>>>(((91792661)&(x>>>(((-2364345606)>>>x)*x)))+x)));
+  assertEquals(2.991581508270506, x /= (-490704963.5591147));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>= ((tmp = 639854873, tmp)%(tmp = 743486160.3597239, tmp)));
+  assertEquals(0, x <<= (tmp = 1045577245.3403939, tmp));
+  assertEquals(0, x >>= ((tmp = -1932462290, tmp)|(tmp = 1629217987, tmp)));
+  assertEquals(517617438, x ^= ((tmp = 2737789043, tmp)%(tmp = -2220171604.135681, tmp)));
+  assertEquals(126371, x >>>= ((tmp = 205210223.69909227, tmp)-(tmp = 598118404, tmp)));
+  assertEquals(918548455, x |= ((918228734.8363427)+(x+x)));
+  assertEquals(918548455, x |= ((tmp = 599828198, tmp)>>((tmp = -851081330, tmp)|(tmp = -1152596996.8443217, tmp))));
+  assertEquals(918548443.7739062, x -= ((tmp = 1497642976.2260938, tmp)%(x>>(tmp = -548469702.5849569, tmp))));
+  assertEquals(0.7739062309265137, x %= (x&x));
+  assertEquals(2317939163.8239403, x *= (tmp = 2995116296, tmp));
+  assertEquals(1014415360, x <<= (-279972114));
+  assertEquals(0, x &= ((296810932)/(x*(tmp = -2750499950, tmp))));
+  assertEquals(0, x *= (x%((126285451.05086231)>>>(x*(tmp = -2789790532, tmp)))));
+  assertEquals(0, x >>>= ((975695102.5771483)%(x-((-1011726540)-((tmp = 2223194882, tmp)/x)))));
+  assertEquals(-1747794584, x |= (-1747794584.3839395));
+  assertEquals(-543544679, x %= (tmp = -1204249905, tmp));
+  assertEquals(-543544679, x %= (-881024001));
+  assertEquals(1, x /= x);
+  assertEquals(-1879376393, x |= ((tmp = 161643764, tmp)|(tmp = 2281346499.9084272, tmp)));
+  assertEquals(1.321124264431369, x /= (-1422558379.7061746));
+  assertEquals(1, x >>>= (x&(tmp = -963118950.4710281, tmp)));
+  assertEquals(3, x ^= ((x+x)/x));
+  assertEquals(1, x /= x);
+  assertEquals(1, x &= (2090796073));
+  assertEquals(-1284301873, x ^= (((-11041168.146357536)+(tmp = -1273260707.8134556, tmp))+x));
+  assertEquals(292559045, x &= (x&((-2401110739)^((tmp = 630802904, tmp)^(((1012634447.0346229)+x)%((tmp = -1240091095, tmp)%(x/(-1483936527))))))));
+  assertEquals(0, x %= x);
+  assertEquals(0, x /= (tmp = 613145428.3653506, tmp));
+  assertEquals(0, x /= ((x-(tmp = 3116638456, tmp))*(-973300716)));
+  assertEquals(0, x %= (tmp = -1794741286.0464535, tmp));
+  assertEquals(0, x &= x);
+  assertEquals(0, x >>= (-551370105.0746605));
+  assertEquals(-1471996874, x ^= ((2822970422.2331414)-x));
+  assertEquals(-277914313, x |= (tmp = -818980601.2544096, tmp));
+  assertEquals(-34, x >>= x);
+  assertEquals(305422768, x -= (-305422802));
+  assertEquals(-2406146240, x += (tmp = -2711569008, tmp));
+  assertEquals(1073745408, x &= (tmp = -3046625618, tmp));
+  assertEquals(1073745408, x <<= ((-1234108306.7646303)<<((-233519302)|x)));
+  assertEquals(1073745408, x %= (tmp = 1898831268, tmp));
+  assertEquals(1073745408, x <<= (((tmp = 3089406038, tmp)/x)&(-2960027680)));
+  assertEquals(65536, x >>>= (2858188366));
+  assertEquals(128, x >>>= ((-2640257239.857275)%((tmp = -3185405235.3177376, tmp)*x)));
+  assertEquals(128, x >>>= x);
+  assertEquals(128, x -= (x&(x-(tmp = -247588018, tmp))));
+  assertEquals(81616906825.07776, x *= (tmp = 637632084.57092, tmp));
+  assertEquals(78860097686.07776, x -= (((1507215684)^((709254783)+(((x<<x)*((-2890828152.667641)%(2537817529.2041526)))^x)))+(3114024487)));
+  assertEquals(-2920545695.721283, x += (((tmp = -2555437435, tmp)>>>x)-((2920546109.72129)+x)));
+  assertEquals(-2879412281.721283, x += ((-1662428756)>>>(tmp = -1928491386.6926208, tmp)));
+  assertEquals(67403845, x &= (tmp = 2921644117, tmp));
+  assertEquals(16850961, x >>>= (((-1039328365)>>>(tmp = -768615112, tmp))<<((1037261855)*(tmp = -2906902831.4797926, tmp))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x *= ((-2729056530)/((-1776175111)%(1493002300.4604707))));
+  assertEquals(0, x *= (tmp = 370696035.22912216, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x |= ((((((tmp = -1541196993, tmp)^x)/(854730380.1799632))/(2879117705.492209))+((((-2892068577)^(-2460614446.1044483))>>>((743413943)<<(-1285280084.4220598)))/(tmp = -1719994579.5141463, tmp)))%(((((tmp = 2522797851.088227, tmp)<<(tmp = 2257160597.1538725, tmp))/(-680406007))&((x>>>(tmp = -260350730, tmp))^(tmp = 1920522110.852598, tmp)))>>(-697620442))));
+  assertEquals(0, x &= x);
+  assertEquals(-591399642.958673, x += (x-(tmp = 591399642.958673, tmp)));
+  assertEquals(27, x >>>= (tmp = -726721317.2109983, tmp));
+  assertEquals(-2043736843, x -= (2043736870));
+  assertEquals(-3991674, x >>= (tmp = 1098126089, tmp));
+  assertEquals(-997919, x >>= ((x%(((x*(((-1497329257.1781685)%(2334511329.2690516))/(-3072526140.6635056)))+(-1843998852))-(tmp = 240300314.34070587, tmp)))+(714080860.6032693)));
+  assertEquals(-0, x %= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x >>= (tmp = 538348328.5363884, tmp));
+  assertEquals(0, x *= (800317515));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= (984205514));
+  assertEquals(857282491, x += (tmp = 857282491, tmp));
+  assertEquals(587792897, x &= (tmp = 2951307845.164059, tmp));
+  assertEquals(595301269, x |= (tmp = 24285588.90314555, tmp));
+  assertEquals(1190602538, x += x);
+  assertEquals(0, x -= x);
+  assertEquals(-442423060, x |= ((x^((x-(tmp = 2342497475.637024, tmp))%(-1900074414.7678084)))|((tmp = 1932380130, tmp)%(x%(2291727569.817062)))));
+  assertEquals(-442423060, x %= (((tmp = 703479475.545413, tmp)>>(x-x))<<(2435723056.753845)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= x);
+  assertEquals(-1265317851, x |= (tmp = -1265317851, tmp));
+  assertEquals(-2, x >>= (-2015895906.8256726));
+  assertEquals(-0, x %= x);
+  assertEquals(-0, x %= (((1219237746)+(284683029))*(((tmp = 2288119628, tmp)|(-404658161.2563329))*(-265228691.74142504))));
+  assertEquals(1039509109, x -= (-1039509109));
+  assertEquals(2079018218, x += x);
+  assertEquals(-1979.9362673719077, x /= ((3219723500)>>x));
+  assertEquals(-62, x >>= ((x/(326466691))*(tmp = -607654070, tmp)));
+  assertEquals(-45, x |= (tmp = -2954888429.549882, tmp));
+  assertEquals(-1180929712, x &= (3114037588.570232));
+  assertEquals(815550480, x &= (-2302684143.3378315));
+  assertEquals(815550480, x %= (-2177479570));
+  assertEquals(815550480, x %= (tmp = 2895822167, tmp));
+  assertEquals(815550480, x %= (-1247621230.5438688));
+  assertEquals(283929811, x -= ((tmp = 251831053.17096448, tmp)|((tmp = 1140463506.004994, tmp)+(tmp = -743224673.546309, tmp))));
+  assertEquals(1825767424, x <<= (((tmp = 1732353599, tmp)^(tmp = 658726044, tmp))>>>((-2827889370.932477)%(tmp = 1950139204.3291233, tmp))));
+  assertEquals(1828450414, x |= (tmp = 1618538606, tmp));
+  assertEquals(0, x <<= (-2411670689.045702));
+  assertEquals(0, x <<= (-27744888.428537607));
+  assertEquals(-0, x /= (tmp = -1597552450, tmp));
+  assertEquals(0, x >>>= (((2165722776.7220936)>>>(tmp = 1233069931, tmp))>>>(-1120420811)));
+  assertEquals(-0, x *= ((tmp = -1505252656, tmp)>>((((3035637099.6156535)&((467761577.7669761)>>(-361034537)))^(tmp = -2347994840.6541123, tmp))*(tmp = -2191739821, tmp))));
+  assertEquals(0, x &= (795727404.0738752));
+  assertEquals(-0, x *= (tmp = -3125944685.3991394, tmp));
+  assertEquals(-0, x *= (x&x));
+  assertEquals(0, x >>= ((tmp = -2045709233, tmp)^x));
+  assertEquals(NaN, x /= (x>>(x/(3102894071))));
+  assertEquals(NaN, x += ((tmp = 2149079756.8941655, tmp)-(tmp = 810121645.305179, tmp)));
+  assertEquals(0, x >>>= (-859842989));
+  assertEquals(0, x >>>= (tmp = 2530531143.9369526, tmp));
+  assertEquals(0, x >>= (((-932981419.6254237)|(tmp = 1591591715, tmp))>>>(x+((3149795006)>>>(tmp = 613352154, tmp)))));
+  assertEquals(-4294967295, x -= ((((-2289331668)%(-282648480.0078714))>>(-1373720705.5142756))>>>((tmp = 15511563.517014384, tmp)/(360279080))));
+  assertEquals(1, x &= x);
+  assertEquals(0, x >>= (x^(-2791872557.5190563)));
+  assertEquals(0, x &= ((tmp = 336466956.7847167, tmp)>>((1235728252.053619)|(x<<((1828176636.13488)%x)))));
+  assertEquals(-0, x *= (-364042830.8894656));
+  assertEquals(0, x >>>= x);
+  assertEquals(-1675298680, x |= ((2323049541.321387)+(296619075)));
+  assertEquals(-0, x %= x);
+  assertEquals(-1583048579.4420977, x += (-1583048579.4420977));
+  assertEquals(0, x -= x);
+  assertEquals(-2, x ^= ((603171992.0545617)/(((-271888695.718297)%(tmp = -400159585, tmp))^((((tmp = 1536123971, tmp)-(tmp = -2310418666.6243773, tmp))|((tmp = 2242779597.1219435, tmp)<<(tmp = 1758127684.4745512, tmp)))/x))));
+  assertEquals(-2, x &= (x&x));
+  assertEquals(0, x &= ((tmp = -1098806007.4049063, tmp)/(((2862384059.3229523)/((((tmp = -92960842, tmp)-(x>>(tmp = 1244068344.2269042, tmp)))&x)*(tmp = -1919148313, tmp)))<<(-2486665929))));
+  assertEquals(0, x &= x);
+  assertEquals(-1441272634.582818, x -= (1441272634.582818));
+  assertEquals(-3, x >>= (tmp = 3186393693.7727594, tmp));
+  assertEquals(-1206855850, x ^= (((tmp = 607979495.303539, tmp)-(tmp = -2480131951, tmp))^(x*((tmp = 1324153477, tmp)/((1248126288)+(x|(1917331780.0741704)))))));
+  assertEquals(-1206855853, x ^= (x>>>(653288765.1749961)));
+  assertEquals(-1206857725, x &= (3149461539.6019173));
+  assertEquals(3088109571, x >>>= (x*(x<<(tmp = 1543540084, tmp))));
+  assertEquals(536903680, x &= (tmp = 644851760, tmp));
+  assertEquals(536903674.312194, x += (((-3183290076)-((tmp = 40738191.12097299, tmp)-x))/((x>>>(3151371851.9408646))^(tmp = 472698205.22445416, tmp))));
+  assertEquals(2127424750.0506563, x -= (tmp = -1590521075.7384624, tmp));
+  assertEquals(2127424750.0506563, x %= (tmp = 3027273433.361373, tmp));
+  assertEquals(0, x >>= (x>>(1445204441.702043)));
+  assertEquals(NaN, x %= (x<<x));
+  assertEquals(0, x ^= ((tmp = -2903841152.136344, tmp)-(x%(2938662860))));
+  assertEquals(0, x <<= (x<<x));
+  assertEquals(0, x >>>= (tmp = -979481631.33442, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x &= (((x%((((((tmp = 1657446354.6820035, tmp)>>(-1916527001.2992697))/x)>>(tmp = 1450467955, tmp))&(277676820))+(x/(-945587805))))/((tmp = -690095354, tmp)^x))+(tmp = -2651195021, tmp)));
+  assertEquals(0, x <<= (752343428.2934296));
+  assertEquals(0, x /= (tmp = 3022310299, tmp));
+  assertEquals(0, x >>= (x%((388245402)>>>x)));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x %= ((tmp = 1205123529.8649468, tmp)>>>(-2848300932)));
+  assertEquals(0, x >>= ((x>>>x)<<(tmp = 487841938, tmp)));
+  assertEquals(0, x *= (((273436000.9463471)|(tmp = 141134074.27978027, tmp))^(tmp = 1220326800.7885802, tmp)));
+  assertEquals(1525600768, x |= (((x^(-2674777396))-(tmp = 1966360716.3434916, tmp))<<(794782595.9340223)));
+  assertEquals(761927595, x %= (tmp = -763673173, tmp));
+  assertEquals(1.1353588586934338, x /= ((x&((-1897159300.4789193)*(-348338328.0939896)))&(978680905.6470605)));
+  assertEquals(8.631173314966319e-10, x /= (1315416592));
+  assertEquals(0, x >>= ((tmp = -2581239435, tmp)-((-628818404.1122074)<<x)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x *= (2925158236));
+  assertEquals(0, x /= (x+(tmp = 1405531594.0181243, tmp)));
+  assertEquals(0, x *= (2712022631.230831));
+  assertEquals(0, x >>= (tmp = 80518779.81608999, tmp));
+  assertEquals(1953477932.8046472, x += (tmp = 1953477932.8046472, tmp));
+  assertEquals(1953477932, x >>= (tmp = 3025539936, tmp));
+  assertEquals(1953477932, x -= ((-2675119685.8812313)>>(x/(-1808264410.9754841))));
+  assertEquals(1292620430, x += ((-660857502)%((((tmp = -698782819, tmp)%(tmp = 2847304199, tmp))<<(-2423443217.1315413))+x)));
+  assertEquals(78895, x >>>= x);
+  assertEquals(2, x >>= x);
+  assertEquals(2, x <<= (tmp = 1313641888.8301702, tmp));
+  assertEquals(1857416935.2532766, x += (tmp = 1857416933.2532766, tmp));
+  assertEquals(-1677721600, x <<= (tmp = -2482476902, tmp));
+  assertEquals(309226853.62854385, x -= (tmp = -1986948453.6285439, tmp));
+  assertEquals(33965156, x &= (2409088742));
+  assertEquals(Infinity, x /= (x-(x<<((x/(tmp = -3106546671.536726, tmp))/((tmp = 2695710176, tmp)-((((-2102442864)&(857636911.7079853))/x)%(-65640292)))))));
+  assertEquals(1270005091, x |= (tmp = 1270005091.0081215, tmp));
+  assertEquals(1270005091, x %= (tmp = -1833876598.2761571, tmp));
+  assertEquals(158750636, x >>>= x);
+  assertEquals(-1000809106.0879555, x -= (tmp = 1159559742.0879555, tmp));
+  assertEquals(72400936, x &= ((2448271389.3097963)%(tmp = 1517733861, tmp)));
+  assertEquals(282816, x >>= x);
+  assertEquals(282816, x %= (tmp = 3192677386, tmp));
+  assertEquals(0.00021521351827207216, x /= (1314118194.2040696));
+  assertEquals(Infinity, x /= (((tmp = 2822091386.1977024, tmp)&x)%(tmp = -3155658210, tmp)));
+  assertEquals(NaN, x %= (-359319199));
+  assertEquals(0, x >>>= (((tmp = -2651558483, tmp)-(x<<(tmp = 2537675226.941645, tmp)))<<(tmp = 667468049.0240343, tmp)));
+  assertEquals(-0, x *= (tmp = -2827980482.12998, tmp));
+  assertEquals(-0, x %= (((tmp = -689972329.3533998, tmp)>>>x)|(tmp = -7488144, tmp)));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x |= x);
+  assertEquals(-2410373675.2262926, x -= (2410373675.2262926));
+  assertEquals(1840423, x >>= ((-1081642113)^x));
+  assertEquals(-4829451429403412, x *= (-2624098606.35485));
+  assertEquals(-94552231, x %= (tmp = -97015883, tmp));
+  assertEquals(-94433287, x ^= (((tmp = -2297735280, tmp)&(((tmp = 2261074987.7072973, tmp)%((((2565078998)^(-2573247878))|x)|(((tmp = -2120919004.7239416, tmp)>>(tmp = -579224101, tmp))>>>(1905808441))))*(x|(3149383322))))>>(542664972)));
+  assertEquals(0, x ^= (x<<(tmp = -3112569312, tmp)));
+  assertEquals(0, x <<= (-2141934818.7052917));
+  assertEquals(0, x >>= (tmp = -2539525922, tmp));
+  assertEquals(-434467613, x ^= (tmp = -434467613, tmp));
+  assertEquals(-274792709, x |= (1233452601.462551));
+  assertEquals(-274726917, x |= (-2130333750));
+  assertEquals(-272629761, x |= (-1516071602.5622227));
+  assertEquals(-272629761, x |= ((tmp = 3012131694, tmp)&((tmp = -2595342375.8674774, tmp)-((tmp = -2710765792, tmp)>>>((x-(tmp = 2397845540, tmp))+(2496667307))))));
+  assertEquals(-4194305, x |= (1343705633.165825));
+  assertEquals(4190207, x >>>= ((tmp = 276587830, tmp)*((tmp = -1517753936, tmp)>>x)));
+  assertEquals(0, x >>= (x|((2247486919)-((-1664642412.4710495)*((((tmp = -358185292.17083216, tmp)-(tmp = -1472193444, tmp))*(tmp = 2699733752, tmp))&((x|(x<<(1137610148.1318119)))>>(((375089690.8764564)*x)&(tmp = 859788933.9560187, tmp))))))));
+  assertEquals(0, x %= (3080673960));
+  assertEquals(0, x >>>= (1328846190.1963305));
+  assertEquals(1249447579, x |= (-3045519717.580775));
+  assertEquals(-0.8743931060971377, x /= (-1428931187));
+  assertEquals(1, x |= ((tmp = -1756877535.7557893, tmp)/((-142900015.93200803)<<(1414557031.347334))));
+  assertEquals(759627265, x ^= (759627264.0514802));
+  assertEquals(741823, x >>= (1106391210));
+  assertEquals(610451, x &= ((x>>>((919849416)+((tmp = -427708986, tmp)^((x%x)|(tmp = -2853100288.932063, tmp)))))*x));
+  assertEquals(372650423401, x *= x);
+  assertEquals(410404493, x >>>= ((((-1425086765)>>>x)>>((2813118707.914771)>>(-424850240)))^x));
+  assertEquals(120511585729013, x *= ((tmp = -1889454669, tmp)>>>x));
+  assertEquals(120513295294304.22, x -= (tmp = -1709565291.2115698, tmp));
+  assertEquals(6164, x >>>= ((2244715719.397763)^(tmp = -741235818.6903033, tmp)));
+  assertEquals(937572790.468221, x -= (tmp = -937566626.468221, tmp));
+  assertEquals(937572790, x |= ((2129102867.156146)*(x%x)));
+  assertEquals(32, x &= ((2700124055.3712993)>>>((1977241506)>>>(-2915605511))));
+  assertEquals(32, x %= (tmp = -2513825862, tmp));
+  assertEquals(0, x <<= (-1379604802));
+  assertEquals(0, x >>>= (tmp = -1033248759, tmp));
+  assertEquals(-1151517050, x ^= (3143450246));
+  assertEquals(-180577, x |= ((738373819.4081701)^(-357134176)));
+  assertEquals(-0, x %= x);
+  assertEquals(-2086887759, x |= (tmp = 2208079537, tmp));
+  assertEquals(-2, x >>= (1460216478.7305799));
+  assertEquals(-2, x %= ((-1979700249.0593133)^(-3156454032.4790583)));
+  assertEquals(-256, x <<= ((1810316926)>>>(tmp = 414362256, tmp)));
+  assertEquals(-1, x >>= (((((((-1616428585.595561)*((tmp = 2574896242.9045777, tmp)|(86659152.37838173)))>>(((tmp = 2476869361, tmp)&((x+((tmp = -2445847462.1974697, tmp)>>(tmp = -1960643509.5255682, tmp)))+(x|(((((2231574372.778028)|(tmp = 1824767560, tmp))>>>((1108035230.2692142)|(tmp = 2354035815, tmp)))/((tmp = -2602922032, tmp)>>(-925080304.7681987)))-x))))-(x>>x)))>>>((tmp = 751425805.8402164, tmp)|(tmp = 1165240270.3437088, tmp)))-x)*(2870745939))-(x>>>((tmp = 2986532631.405425, tmp)>>>(((tmp = 2547448699, tmp)+(((((x<<(((((-2756908638.4197435)>>>(3134770084))-(-1147872642.3756688))%(x*(tmp = -282198341.6600039, tmp)))+(-770969864.2055655)))+((-2725270341)^x))/(-3093925722))>>(x&x))>>((tmp = -2705768192, tmp)>>>(((tmp = 577253091.6042917, tmp)/(((x&(((((x+x)>>>(-1000588972))/(x&(717414336)))^(tmp = 428782104.21504414, tmp))>>>(1084724288.953223)))%(tmp = -2130932217.4562194, tmp))&x))-(-286367389)))))+((x>>(tmp = 2001277117, tmp))>>((tmp = 1028512592, tmp)^((tmp = 2055148650, tmp)+((tmp = 1490798399, tmp)/(tmp = -2077566434.2678986, tmp))))))))));
+  assertEquals(-1, x |= (tmp = 1542129482, tmp));
+  assertEquals(-671816743, x &= (tmp = -671816743.9111726, tmp));
+  assertEquals(-1840333080, x -= (1168516337));
+  assertEquals(-1755382023, x |= ((((tmp = 2625163636.0142937, tmp)>>>((tmp = 1534304735, tmp)^x))-(tmp = -1959666777.9995313, tmp))%x));
+  assertEquals(-1750421896, x += (x>>>(tmp = -1364828055.1003118, tmp)));
+  assertEquals(-72864007, x %= (tmp = 239651127, tmp));
+  assertEquals(-72863956, x -= (((tmp = -1103261657.626319, tmp)*((tmp = 2789506613, tmp)+((tmp = 2294239314, tmp)>>>(2588428607.5454817))))>>x));
+  assertEquals(-170337477, x -= (tmp = 97473521, tmp));
+  assertEquals(-170337477, x |= (((tmp = 246292300.58998203, tmp)/(((tmp = -2664407492, tmp)|((-2416228818)^(tmp = 909802077, tmp)))%(tmp = 532643021.68109465, tmp)))/(tmp = 1015597843.8295637, tmp)));
+  assertEquals(1, x >>>= (((tmp = -2247554641.7422867, tmp)/(1186555294))%(tmp = -785511772.3124621, tmp)));
+  assertEquals(1188939891.668705, x -= (tmp = -1188939890.668705, tmp));
+  assertEquals(1188939891, x &= x);
+  assertEquals(1188413555, x &= (((tmp = -372965330.5709038, tmp)%(((tmp = 3108909487, tmp)|(x^(-1056955571.9951684)))^(-1549217484.009048)))/(x>>>(1403428437.9368362))));
+  assertEquals(-0.7343692094664643, x /= (-1618278026.4758227));
+  assertEquals(0, x -= x);
+  assertEquals(0, x &= (-2701762139.7500515));
+  assertEquals(0, x >>>= (((-1692761485.2299166)^x)+(tmp = -1221349575.938864, tmp)));
+  assertEquals(0, x <<= ((2148160230)<<x));
+  assertEquals(0, x <<= (((x<<(-740907931.38363))&(tmp = -930960051.6095045, tmp))>>(x/((tmp = -1921545150.1239789, tmp)/(-3015379806)))));
+  assertEquals(0, x <<= x);
+  assertEquals(NaN, x /= (x|x));
+  assertEquals(0, x >>= (tmp = -2265988773, tmp));
+  assertEquals(-0, x *= (((x<<(-928153614))<<(-989694208))^(2544757713.481016)));
+  assertEquals(0, x >>= ((tmp = 578009959.5299993, tmp)>>x));
+  assertEquals(0, x /= ((((tmp = 412689800.0431709, tmp)&(1630886276))*(tmp = 2028783080.7296097, tmp))/x));
+  assertEquals(0, x |= ((((x*(-2197198786))>>((2719887264.761987)<<(tmp = 2253246512, tmp)))-(tmp = -150703768.07045603, tmp))/(((-3160098146)%(((((1486098047.843547)>>(((tmp = -593773744.1144242, tmp)&(x<<(2651087978)))|((-680492758.930413)>>(tmp = 88363052.13662052, tmp))))<<x)<<(tmp = 2232672341, tmp))/((x<<x)&(((((348589117.64135563)<<(-1010050456.3097556))^(x/(tmp = -2282328795, tmp)))-(tmp = 1653716293, tmp))-((3157124731)/((tmp = 3007369535.341745, tmp)%(tmp = -2246556917, tmp)))))))+x)));
+  assertEquals(0, x >>= ((1935211663.5568764)>>(x-(tmp = 2116580032, tmp))));
+  assertEquals(-1725272693, x ^= (tmp = -1725272693, tmp));
+  assertEquals(313683, x >>>= (-1782632531.2877684));
+  assertEquals(0.009772287443565642, x /= (tmp = 32099240, tmp));
+  assertEquals(-647945916.9902277, x += (-647945917));
+  assertEquals(3647021380, x >>>= ((((((((2470411371.688199)<<x)>>x)-(x>>>((tmp = 1750747780, tmp)/x)))-x)<<(tmp = -2666186351.695101, tmp))^(((tmp = 2749205312.6666174, tmp)%x)&(2069802830.360536)))<<(tmp = 6051917.9244532585, tmp)));
+  assertEquals(-647939220, x |= ((x>>>((tmp = -2980404582.794245, tmp)>>>(-996846982)))^x));
+  assertEquals(-572178450, x |= ((-800571300.3277931)+(tmp = 2084365671, tmp)));
+  assertEquals(1172311208, x &= (x&((tmp = -1207487657.8953774, tmp)^x)));
+  assertEquals(12176516458994, x += ((((tmp = -1534997221, tmp)%(412142731))*((tmp = 2958726303, tmp)>>(1489169839)))+(((-574726407.2051775)>>>(((1772885017)<<(947804536.9958035))>>(-2406844737)))>>x)));
+  assertEquals(-1480065024, x <<= x);
+  assertEquals(-1736999042.227129, x += (tmp = -256934018.22712898, tmp));
+  assertEquals(-1338699394, x ^= ((((((x%(((tmp = -2551168455.222048, tmp)|(3213507293.930222))/((-1559278033)>>((tmp = 3107774495.3698573, tmp)-(2456375180.8660913)))))*((x*(tmp = 1088820004.8562922, tmp))+((tmp = 1850986704.9836102, tmp)%(tmp = -1226590364, tmp))))*(1786192008))&(((2193303940.310299)%(tmp = 1041726867.0602217, tmp))|((2210722848)/((-1293401295.6714435)&((tmp = 3052430315, tmp)|x)))))>>>(tmp = -2028014470.1524236, tmp))+(((1695818039.0383925)<<((1669068145)*(-2746592133.899276)))<<(tmp = 519092169, tmp))));
+  assertEquals(-334674849, x >>= (1170377794));
+  assertEquals(-10214, x >>= ((tmp = 1074704264.3712895, tmp)>>>((tmp = -1200860192, tmp)^((tmp = 539325023.4101218, tmp)*((tmp = -588989295, tmp)|x)))));
+  assertEquals(1384169472, x &= (1384171140));
+  assertEquals(1384169472, x >>>= ((tmp = -2161405973.830981, tmp)*(tmp = 2054628644, tmp)));
+  assertEquals(1610140972, x |= (527961388));
+  assertEquals(1073273198, x += ((tmp = -259650225.71344328, tmp)&(tmp = -344359694, tmp)));
+  assertEquals(65507, x >>= ((x<<((tmp = 2925070713.5245204, tmp)%(x+((tmp = -1229447799, tmp)/(((x/(x|(((-2337139694)|((((((2996268529.7965417)&x)%(((tmp = -1088587413, tmp)>>(-1384104418.90339))>>((tmp = -1643984822.3946526, tmp)+x)))%(((1118125268.4540217)-((((-1975051668.6652594)-(-704573232))+((tmp = 1674952373, tmp)/(tmp = 1321895696.0062659, tmp)))*(tmp = 1820002533.2021284, tmp)))>>>(tmp = -583960746.9993203, tmp)))|((tmp = -2577675508.550925, tmp)&x))/(tmp = 1459790066, tmp)))/(((((1051712301.7804044)&(tmp = -2726396354, tmp))^(tmp = 263937254.18934345, tmp))+(((x^x)*(((tmp = -2289491571, tmp)+x)%(-2239181148)))&x))>>(tmp = -1743418186.3030887, tmp)))))/(tmp = 1475718622, tmp))<<x)))))|(x&((((tmp = -2934707420, tmp)<<x)/x)^(1022527598.7386684)))));
+  assertEquals(2047, x >>= (x-(tmp = 2300626270, tmp)));
+  assertEquals(8384512, x <<= (tmp = -1917680820, tmp));
+  assertEquals(0, x <<= (2393691134));
+  assertEquals(0, x >>= x);
+  assertEquals(649995936.5853252, x -= (tmp = -649995936.5853252, tmp));
+  assertEquals(649995936, x &= x);
+  assertEquals(-0.33672017582945424, x /= (tmp = -1930374188, tmp));
+  assertEquals(-0.33672017582945424, x += (x&((1208055031)^(-2761287670.968586))));
+  assertEquals(0, x |= x);
+  assertEquals(0, x <<= ((-2038368978)/x));
+  assertEquals(0, x >>= (x&((tmp = 2481378057.738218, tmp)&(x+(1172701643)))));
+  assertEquals(0, x <<= ((x*(((((((tmp = 70690601.3046323, tmp)&(((((((((((x+(x+(x^(3118107461))))<<(264682213.41888392))&(tmp = -709415381.8623683, tmp))%(((((-1840054964)>>>(tmp = -405893120.89603686, tmp))|((-625507229)^(3128979265)))>>(x>>((tmp = -2480442390, tmp)*((x>>(tmp = -421414980.88330936, tmp))>>>((tmp = 1850868592, tmp)&(-2948543832.879225))))))|((2986545185)&((tmp = -1947550706, tmp)%(((tmp = 2590238422.1414256, tmp)/(((tmp = -361038812, tmp)>>x)|(((tmp = 1798444068, tmp)|((x&((tmp = -3104542069, tmp)-x))*((tmp = -1158658918, tmp)+((tmp = 2777031040.5552707, tmp)<<(-2816019335.9008327)))))<<x)))/(((2287795988.231702)/x)/(((-2588712925)>>>(2521189250))*((tmp = -2533527920, tmp)+(tmp = 1762281307.2162101, tmp)))))))))/x)/(tmp = 1047121955.5357032, tmp))|(((-121292251)<<(x^(x-(tmp = 1420006180, tmp))))%((-2278606219)>>>(((tmp = -1412487726, tmp)&(((((tmp = 253596554.16016424, tmp)/(tmp = 2083376247.0079951, tmp))^(x^((1549116789.8449988)>>>((((-1844170084)^(tmp = 1886066422, tmp))&x)<<(34918329)))))^(tmp = -440805555.3369155, tmp))-x))%(-1936512969)))))+(2911511178.4035435))|(1012059391))|(x>>>(tmp = -2551794626.158037, tmp)))+((2926596072.210515)/(tmp = -280299595.0450909, tmp))))&((tmp = 1501086971, tmp)^(tmp = 2114076983, tmp)))-((-1679390574.1466925)-(941349044)))-((x>>x)>>((-2600539474.2033434)+(tmp = 2567056503.9079475, tmp))))*(tmp = 1285896052, tmp))%(((tmp = 1191465410.7595167, tmp)>>((tmp = -2857472754, tmp)%x))>>>(((tmp = 1960819627.6552541, tmp)&(-2651207221.127376))*((((-687312743)+((x>>x)<<x))|((((((1549588195)*((tmp = 2733091019, tmp)^((527322540)<<(x>>x))))%(tmp = -2063962943, tmp))*x)*(734060600))&(-3049417708)))+(((((1084267726)+((x|x)^((tmp = -1917070472.4858549, tmp)%((690016078.9375831)*x))))%((((((tmp = -2091172769, tmp)%(2532365378))>>>(-871354260))/(tmp = 254167019.07825458, tmp))&(1330216175.9871218))>>(tmp = 1931099207, tmp)))^(-1116448185.2618852))>>((961660080.8135855)/x)))))))>>>(-1486048007.7053368)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x %= (tmp = -1202200444.6506357, tmp));
+  assertEquals(-0, x *= (-527500796.4145117));
+  assertEquals(0, x >>= (tmp = -2082822707, tmp));
+  assertEquals(0, x *= ((-1882398459.290778)>>>x));
+  assertEquals(0, x &= (x/(tmp = -1569332286.392817, tmp)));
+  assertEquals(-390169607, x |= (-390169607.11600184));
+  assertEquals(-780339214, x += x);
+  assertEquals(-780339214, x %= (2765959073));
+  assertEquals(-5954, x >>= (tmp = -1900007055, tmp));
+  assertEquals(743563420, x &= ((((-1520146483.5367205)|(-2075330284.3762321))-(tmp = -2263151872, tmp))%(-1264641939.957402)));
+  assertEquals(1487126840, x += (x>>>(((x+((tmp = -1263274491, tmp)>>>x))&(470419048.0490037))%(tmp = -2642587112, tmp))));
+  assertEquals(Infinity, x /= (x^x));
+  assertEquals(0, x ^= ((tmp = -1436368543, tmp)+(x/(tmp = -1125415374.3297129, tmp))));
+  assertEquals(0, x += x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x &= (tmp = 3101147204.2905564, tmp));
+  assertEquals(0, x &= (tmp = 2914487586.606511, tmp));
+  assertEquals(0, x += x);
+  assertEquals(0, x -= (((-1738542908.6138556)&(((x+x)-(tmp = -2801153969, tmp))%(tmp = -1206684064.1477358, tmp)))>>((-2575546469.271897)|(tmp = -2573119106, tmp))));
+  assertEquals(-1468808707, x ^= (tmp = -1468808707, tmp));
+  assertEquals(1357349882, x <<= (tmp = -2808501087.7003627, tmp));
+  assertEquals(-572025862, x |= ((((tmp = -2415486246.573399, tmp)/((tmp = -707895732.4593301, tmp)&x))%((-1960091005.0425267)*(972618070.9166157)))-(1649962343)));
+  assertEquals(327213586796843100, x *= (x%(1337884626)));
+  assertEquals(42991616, x &= (-2905576654.1280055));
+  assertEquals(-26049289585042860, x *= (-605915571.6557121));
+  assertEquals(597809748, x >>= ((362850791.077795)/(tmp = 1222777657.4401796, tmp)));
+  assertEquals(597809748, x |= x);
+  assertEquals(770065246, x -= ((-711227660)|(tmp = -508554506, tmp)));
+  assertEquals(593000483097040500, x *= x);
+  assertEquals(0, x %= x);
+  assertEquals(0, x <<= (317862995.456813));
+  assertEquals(0, x >>= ((tmp = 2518385735, tmp)+((-2973864605.267604)/(-930953312.718833))));
+  assertEquals(1227822411, x ^= (x^(1227822411.8553264)));
+  assertEquals(1090520320, x &= (x+((((-2100097959)>>(x/(tmp = -2002285068, tmp)))/(-364207954.9242482))-((tmp = 2771293106.7927113, tmp)-(tmp = -847237774, tmp)))));
+  assertEquals(1090520320, x >>= (((((2439492849)<<((-2932672756.2578926)*((743648426.7224461)+((2942284935)<<((x/(((tmp = 886289462.6565771, tmp)+(-459458622.7475352))>>(tmp = -785521448.4979162, tmp)))|(tmp = -11630282.877367258, tmp))))))-(tmp = -647511106.9602091, tmp))^x)&x));
+  assertEquals(115944291.48829031, x %= (243644007.12792742));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>>= ((tmp = -819782567, tmp)%(tmp = 2774793208.1994505, tmp)));
+  assertEquals(0, x >>= (tmp = 721096000.2409859, tmp));
+  assertEquals(0, x &= ((x%x)%x));
+  assertEquals(-0, x *= ((-1670466344)<<x));
+  assertEquals(0, x >>= (-677240844.904707));
+  assertEquals(NaN, x %= (((((-1575993236.6126876)/(-2846264078.9581823))^((((-2220459664)-(((-1809496020)>>>(tmp = -3015964803.4566207, tmp))&x))/(tmp = -3081895596.0486784, tmp))>>>(x&x)))%(x^(-1338943139)))^(x-((((2074140963.2841332)^(tmp = 1878485274, tmp))%(((x/(-2568856967.6491556))^x)<<((x+x)^((((2139002721)|(x<<(-1356174045.840464)))>>x)-(tmp = 2305062176, tmp)))))>>>(((((x<<(tmp = -1663280319.078543, tmp))-((1498355849.4158854)-((-1321681257)>>>(tmp = -1321415088.6152222, tmp))))^(-2266278142.1584673))+(858538943))&((((x-((x|(((tmp = -1576599651, tmp)+((tmp = 1595319586, tmp)&(-2736785205.9203863)))>>((x+((-1856237826)+x))<<(tmp = -1590561854.3540869, tmp))))^(((-41283672.55606127)&(tmp = 2971132248, tmp))+x)))/(-849371349.1667476))%(x*((-1705070934.6892798)>>>x)))<<((2418200640)*x)))))));
+  assertEquals(0, x >>>= (tmp = 664214199.5283061, tmp));
+  assertEquals(0, x <<= ((-2827299151)<<(1815817649)));
+  assertEquals(1405772596, x |= (tmp = 1405772596, tmp));
+  assertEquals(-1483422104, x <<= (-2791499935.6822596));
+  assertEquals(-45271, x >>= (1740128943.4254808));
+  assertEquals(-45271, x <<= ((2072269957)-((tmp = -2553664811.4472017, tmp)*(tmp = -2502730352, tmp))));
+  assertEquals(1192951471.6745887, x -= (-1192996742.6745887));
+  assertEquals(-353370112, x <<= (tmp = -1410280844, tmp));
+  assertEquals(0, x ^= (x%((2754092728)*(-1017564599.1094015))));
+  assertEquals(-2662096003.2397957, x -= (tmp = 2662096003.2397957, tmp));
+  assertEquals(-2587094028.50764, x -= (tmp = -75001974.7321558, tmp));
+  assertEquals(6693055512339889000, x *= x);
+  assertEquals(897526784, x %= (x-((tmp = 897526813, tmp)%(-1525574090))));
+  assertEquals(7011928, x >>= ((-440899641.344357)%x));
+  assertEquals(8382047686388683, x += (x*(1195398423.8538609)));
+  assertEquals(16764095372777366, x += x);
+  assertEquals(16764096859576696, x -= (tmp = -1486799329.7207344, tmp));
+  assertEquals(16764099774187724, x += (2914611029));
+  assertEquals(16764102926624664, x -= (-3152436939.724612));
+  assertEquals(-538220648, x |= x);
+  assertEquals(269110324, x /= (((-2114698894.6014318)/(tmp = 767687453, tmp))>>(623601568.1558858)));
+  assertEquals(256, x >>= x);
+  assertEquals(-293446891, x += (x+(-293447403)));
+  assertEquals(119, x >>>= ((1759400753)>>(2481263470.4489403)));
+  assertEquals(14, x >>= (762849027.89693));
+  assertEquals(16, x += (x&(x>>(1104537666.1510491))));
+  assertEquals(-12499808227.980995, x *= (tmp = -781238014.2488122, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(1, x &= x);
+  assertEquals(0, x >>>= ((tmp = 1513381008, tmp)|(tmp = 1593208075.7259543, tmp)));
+  assertEquals(0, x &= (-788154636.2843091));
+  assertEquals(-0, x /= (tmp = -2124830879, tmp));
+  assertEquals(0, x &= (934237436));
+  assertEquals(0, x |= x);
+  assertEquals(-79370942.97651315, x += (-79370942.97651315));
+  assertEquals(-79370942.97651315, x %= ((tmp = -2683255523, tmp)<<(tmp = 2323123280.287587, tmp)));
+  assertEquals(-79370942, x |= x);
+  assertEquals(0.05861647801688159, x /= (-1354072177.061561));
+  assertEquals(0, x <<= (((((((tmp = 1989257036, tmp)&(tmp = 1565496213.6578887, tmp))&x)&(tmp = -2798643735.905287, tmp))&(2354854813.43784))%(tmp = 1118124748, tmp))<<((tmp = 2453617740, tmp)*(((tmp = 1762604500.492329, tmp)<<(-2865619363))%(((2474193854.640994)|((tmp = 1425847419.6256948, tmp)|(((-1271669386)%((x|((tmp = -2059795445.3607287, tmp)+x))*(x*x)))>>>(tmp = -2997360849.0750895, tmp))))/(tmp = 2326894252, tmp))))));
+  assertEquals(0, x >>>= ((-671325215)/((-727408755.8793397)>>(tmp = 315457854, tmp))));
+  assertEquals(0, x >>= (x&x));
+  assertEquals(0, x <<= ((x/x)>>>(((((x&x)-((x*(((tmp = -2689062497.0087833, tmp)^x)/((-1465906334.9701924)<<(tmp = -349000262, tmp))))*x))%(1630399442.5429945))*x)+((tmp = 605234630, tmp)%(tmp = 2325750892.5065155, tmp)))));
+  assertEquals(0, x |= (x%((x>>(((((tmp = 1622100459, tmp)<<x)&((((((tmp = 2411490075, tmp)<<x)|x)>>((x<<x)-(-2133780459)))/x)&(x+x)))%(x/((((tmp = 580125125.5035453, tmp)>>>(-470336002.1246581))|((tmp = 871348531, tmp)*x))>>(2866448831.23781))))-((2352334552)-(-562797641.6467373))))-(x^(tmp = -681731388, tmp)))));
+  assertEquals(0, x <<= (tmp = -1358347010.3729038, tmp));
+  assertEquals(-260967814, x |= ((tmp = -260967814.45976686, tmp)%(tmp = 1126020255.1772437, tmp)));
+  assertEquals(NaN, x %= ((((tmp = 3176388281, tmp)<<(tmp = 611228283.2600244, tmp))>>>((tmp = 3068009824, tmp)+(tmp = 2482705111, tmp)))>>>((tmp = -750778285.2580311, tmp)>>>x)));
+  assertEquals(0, x <<= (x>>>x));
+  assertEquals(0, x /= (1238919162));
+  assertEquals(0, x >>= (x^x));
+  assertEquals(0, x &= (-2137844801));
+  assertEquals(0, x >>>= (x^(x*(-1774217252))));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x |= x);
+  assertEquals(0, x &= (x<<(tmp = 2791377560, tmp)));
+  assertEquals(-1330674638.8117397, x += (tmp = -1330674638.8117397, tmp));
+  assertEquals(353, x >>>= (-212202857.4320326));
+  assertEquals(353, x ^= ((((x+(tmp = 1448262278, tmp))-(-3141272537))>>(tmp = 1116596587.7832575, tmp))>>>((x-(((tmp = 303953098, tmp)>>>((tmp = 691514425, tmp)/((176223098)*(((2876180016)%(-1805235275.892374))|x))))<<(((tmp = 528736141.838547, tmp)^(2556817082))*(2898381286.2846575))))|((-1445518239)&(tmp = 389789481.9604758, tmp)))));
+  assertEquals(0, x >>>= (-227376461.14343977));
+  assertEquals(0, x <<= (tmp = -2575967504, tmp));
+  assertEquals(0, x <<= (x^((-2668391896)>>((x+(tmp = 598697235.9205595, tmp))+((((-2105306785)|((-1174912319.794015)>>>(x-((148979923)%((((tmp = -2459140558.4436393, tmp)|(1265905916.494016))^(tmp = 1213922357.2230597, tmp))|(1028030636))))))%x)+(((tmp = 1393280827.0135512, tmp)^((tmp = 1210906638, tmp)+(-1572777641.1396031)))<<x))))));
+  assertEquals(0, x *= (tmp = 2134187165, tmp));
+  assertEquals(-1084549964, x -= (tmp = 1084549964, tmp));
+  assertEquals(-2045706240, x &= ((tmp = -1250758905.7889671, tmp)*(x+(((x<<(x/(tmp = -738983664.845448, tmp)))>>>x)&(tmp = 2197525295, tmp)))));
+  assertEquals(-2045706240, x ^= (((522049712.14743733)>>(tmp = -2695628092, tmp))>>>(tmp = -2603972068, tmp)));
+  assertEquals(2249261056, x >>>= x);
+  assertEquals(-33291, x |= ((((1891467762)<<(184547486.213719))-((458875403.50689447)^(((x&(x*x))|x)%(-3127945140))))|(-100765232)));
+  assertEquals(-33291, x %= (1460486884.1367688));
+  assertEquals(-1, x >>= (tmp = -2667341441, tmp));
+  assertEquals(-3.6289151568259606e-10, x /= (tmp = 2755644474.4072013, tmp));
+  assertEquals(-3.6289151568259606e-10, x %= (tmp = 1186700893.0751028, tmp));
+  assertEquals(0, x <<= (tmp = -1199872107.9612694, tmp));
+  assertEquals(371216449, x ^= ((tmp = 371324611.1357789, tmp)&(x-(x|((tmp = -518410357, tmp)>>((tmp = 687379733, tmp)/x))))));
+  assertEquals(0.3561383159088311, x /= (((((x%(((((-2293101242)%((((495316779)/x)-((-3198854939.8857965)>>>((tmp = -288916023, tmp)-(x^(tmp = -2504080119.431858, tmp)))))^(-1201674989)))-((2965433901)*(405932927)))/((1974547923)|(tmp = 534069372, tmp)))-(x-((x+(-1258297330))%x))))<<(((-2648166176.4947824)^(-3043930615))&(1550481610)))<<(tmp = -3118264986.743822, tmp))<<x)|x));
+  assertEquals(-46272499.15029934, x -= (tmp = 46272499.50643766, tmp));
+  assertEquals(-6, x >>= ((tmp = -731454087.0621192, tmp)>>>x));
+  assertEquals(-2.7207928474520667e-9, x /= (((x<<(x|((tmp = -1650731700.9540024, tmp)/(tmp = -677823292, tmp))))^((((((1972576122.928667)>>x)%(2952412902.115453))<<((-2888879343)+(tmp = -425663504, tmp)))>>>(((((tmp = 1089969932, tmp)>>>(x|((-2088509661)/(1131470551))))>>>x)+x)|(tmp = 955695979.7982506, tmp)))|(((((tmp = 826954002.6188571, tmp)^(2016485728))|((x/(((x<<(tmp = 2493217141, tmp))/(-2259979800.997408))-(tmp = -427592173.41389966, tmp)))%(((-471172918)/x)>>>((383234436.16425097)&(tmp = 1664411146.5308032, tmp)))))*(tmp = 1863669754.7545495, tmp))*(x>>(2062197604)))))>>>((x-(2624545856))*(tmp = 1025803102, tmp))));
+  assertEquals(0, x >>= ((tmp = 1068702028, tmp)*(296106770)));
+  assertEquals(0, x ^= (x/x));
+  assertEquals(85359536, x ^= (((x|(((tmp = 740629227, tmp)<<(-1107397366))%((tmp = 2315368172, tmp)>>(((-2269513683)|(-2698795048))+(-396757976)))))*(929482738.803125))^(((-1415213955.4198723)-(tmp = -2885808324, tmp))>>>((tmp = -472842353.85736656, tmp)&(tmp = 1684231312.4497018, tmp)))));
+  assertEquals(2075131904, x <<= x);
+  assertEquals(123, x >>>= (x>>>(tmp = 754093009, tmp)));
+  assertEquals(0, x >>= ((-2690948145)/((1988638799)+x)));
+  assertEquals(0, x >>>= (tmp = -798849903.2467625, tmp));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x *= (2431863540.4609756));
+  assertEquals(484934656, x |= ((-2322193663)*(tmp = -2754666771, tmp)));
+  assertEquals(-82505091404694530, x *= (tmp = -170136513, tmp));
+  assertEquals(-82505090515370620, x += ((-148762237)&(tmp = 889417717, tmp)));
+  assertEquals(-908221124, x %= (tmp = -2346393300, tmp));
+  assertEquals(-1242515799, x ^= (2083328917));
+  assertEquals(-1126056310271520600, x *= ((((tmp = -3065605442, tmp)<<(-3012703413))|x)^(-2081329316.4781387)));
+  assertEquals(-1126056309941068000, x += ((((tmp = 1886925157, tmp)&((tmp = -163003119.31722307, tmp)/((tmp = 2094816076, tmp)>>((tmp = -706947027, tmp)^x))))^((1819889650.5261197)<<(-1641091933)))>>x));
+  assertEquals(-1864360191, x |= (((x/x)|x)|x));
+  assertEquals(-1864360191, x &= x);
+  assertEquals(-3728720382, x += x);
+  assertEquals(1042663165, x ^= (535165183.4230335));
+  assertEquals(2644530017.8833704, x += (1601866852.8833704));
+  assertEquals(-574949401, x |= ((tmp = 943193254.5210983, tmp)^((x%(tmp = -2645213497, tmp))*(-1904818769))));
+  assertEquals(1763223578, x ^= ((x^(tmp = -2244359016, tmp))^(tmp = 320955522, tmp)));
+  assertEquals(-1.9640961474334235, x /= (tmp = -897727731.0502782, tmp));
+  assertEquals(1, x >>>= (x-(-3183031393.8967886)));
+  assertEquals(1, x &= (tmp = 1732572051.4196641, tmp));
+  assertEquals(1, x >>= (-1642797568));
+  assertEquals(-2339115203.3140306, x += (-2339115204.3140306));
+  assertEquals(1955852093, x ^= (((((-1469402389)/(-2648643333.1454573))>>>x)<<(x/x))>>x));
+  assertEquals(-965322519, x ^= (3001399252));
+  assertEquals(-2139727840, x &= (tmp = 2298411812.964484, tmp));
+  assertEquals(2103328, x &= (tmp = -2488723009, tmp));
+  assertEquals(1799011007, x |= (tmp = -2498057537.226923, tmp));
+  assertEquals(1799011007, x |= ((-308193085)>>>x));
+  assertEquals(1799011007, x |= x);
+  assertEquals(818879107, x ^= (1542823996.423564));
+  assertEquals(-2601416919234843600, x *= ((-2357923057.076759)-x));
+  assertEquals(-2601416920481796600, x -= (x|(tmp = -3048039765, tmp)));
+  assertEquals(-33690112, x <<= x);
+  assertEquals(1039491072, x &= (tmp = 1039491474.3389125, tmp));
+  assertEquals(126891, x >>= (-3079837011.6151257));
+  assertEquals(-163191923097543, x *= (((tmp = -2847221258.4048786, tmp)*(x-(tmp = 1527622853.5925639, tmp)))^x));
+  assertEquals(753616551, x ^= (-946895202));
+  assertEquals(-347691264, x <<= (tmp = -433184408.33790135, tmp));
+  assertEquals(0, x <<= (x|(tmp = -1911731462.6835637, tmp)));
+  assertEquals(-0, x *= (tmp = -2616154415.1662617, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x *= (2272504250.501526));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x >>>= (2475346113));
+  assertEquals(NaN, x /= (((x+(-2646140897))&(((tmp = 1039073714.142481, tmp)-x)*x))|(x*(((-1277822905.773948)>>(tmp = 2035512354.2400663, tmp))*(77938193.80013895)))));
+  assertEquals(0, x ^= (x<<(tmp = 2491934268, tmp)));
+  assertEquals(0, x &= (tmp = 569878335.4607931, tmp));
+  assertEquals(-88575883, x ^= ((453890820.8012209)-((1569189876)%((-1280613677.7083852)^(-1902514249.29567)))));
+  assertEquals(-88575883, x %= (tmp = 257947563.19206762, tmp));
+  assertEquals(-88575881.7863678, x -= ((tmp = 1257547359.029678, tmp)/(x^(tmp = 948265672.821815, tmp))));
+  assertEquals(-169, x >>= (tmp = -2530523309.6703596, tmp));
+  assertEquals(-1, x >>= x);
+  assertEquals(-1, x |= x);
+  assertEquals(131071, x >>>= (-673590289));
+  assertEquals(1117196836, x -= (-1117065765));
+  assertEquals(3092236000.7125187, x -= (-1975039164.7125185));
+  assertEquals(1, x /= x);
+  assertEquals(-1599945863, x ^= (tmp = 2695021432.453696, tmp));
+  assertEquals(940543782, x ^= (tmp = 2561494111, tmp));
+  assertEquals(891400321673221800, x *= (tmp = 947749949.2662871, tmp));
+  assertEquals(-1509927296, x >>= ((tmp = 1113290009, tmp)-x));
+  assertEquals(-23, x >>= (tmp = 3216989626.7370152, tmp));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x <<= (431687857.15246475));
+  assertEquals(-0, x /= (tmp = -1924652745.081665, tmp));
+  assertEquals(0, x <<= (1312950547.2179976));
+  assertEquals(0, x %= ((tmp = 2110842937.8580878, tmp)|(x<<x)));
+  assertEquals(0, x >>>= ((((-386879000)-((tmp = -2334036143.9396124, tmp)/((tmp = 965101904.2841234, tmp)<<(((3029227182.8426695)<<((tmp = -464466927, tmp)>>((((((tmp = 849594477.4111787, tmp)^(x&((513950657.6663146)%(x>>>x))))-((2898589263)|x))+(tmp = 2842171258.621288, tmp))>>>(tmp = -3158746843, tmp))<<(tmp = -2891369879, tmp))))-(x-(x&(tmp = -1707413686.2706504, tmp)))))))-(-2860419051))*(-1708418923)));
+  assertEquals(-328055783, x += ((((2857010474.8010874)|((tmp = -1415997622.320347, tmp)-(-1706423374)))%(tmp = 824357977.1339042, tmp))^(x>>(x|x))));
+  assertEquals(-168539902503779140, x *= ((tmp = -1057687018, tmp)<<((1408752963)-(2030056734))));
+  assertEquals(-Infinity, x /= ((x-(2232683614.320658))*(((tmp = 195551174, tmp)*((((739595970)>>>(tmp = -2218890946.8788786, tmp))>>>(((tmp = -240716255.22407627, tmp)&(((((1598029916.3478878)|((tmp = -881749732, tmp)+(x>>x)))^(4443059))<<(((tmp = 2453020763, tmp)+((x>>>(tmp = -1904203813, tmp))&(-355424604.49235344)))<<(tmp = 2814696070, tmp)))%((tmp = -250266444, tmp)>>>(((((2710614972)&(((tmp = 910572052.6994087, tmp)^(tmp = -1028443184.3220406, tmp))/((-2718010521)^(tmp = 676361106, tmp))))|x)^(-1326539884))>>(-1573782639.7129154)))))/(tmp = 1923172768, tmp)))>>>(tmp = -2858780232.4886074, tmp)))/((((((-2060319376.353397)%x)>>(tmp = -3122570085.9065285, tmp))/(tmp = -1499018723.8064275, tmp))*((-655257391)<<x))>>x))));
+  assertEquals(NaN, x += ((3059633304)%((((tmp = 2538190083, tmp)*((tmp = -2386800763.356364, tmp)/x))&(1341370996))%(-2929765076.078223))));
+  assertEquals(NaN, x %= ((x&(347774821))>>>(462318570.2578629)));
+  assertEquals(NaN, x *= ((2829810152.071517)*(tmp = 768565684.6892327, tmp)));
+  assertEquals(NaN, x -= x);
+  assertEquals(0, x >>>= (x&(tmp = 1786182552, tmp)));
+  assertEquals(973967377, x ^= ((tmp = 2115869489.836838, tmp)&(994956497)));
+  assertEquals(985246427.4230617, x += (11279050.423061728));
+  assertEquals(985246427, x &= x);
+  assertEquals(0, x >>= ((tmp = 1090502660.1867907, tmp)>>((-1599370623.5747645)-(tmp = -1321550958, tmp))));
+  assertEquals(0, x %= (tmp = -2386531950.018572, tmp));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x >>>= (tmp = -1535987507.682257, tmp));
+  assertEquals(-0, x /= (-2570639987));
+  assertEquals(-542895632, x |= (tmp = -542895632, tmp));
+  assertEquals(-33930977, x >>= (tmp = -861198108.1147206, tmp));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x ^= (x*(-608154714.1872904)));
+  assertEquals(-140011520, x |= ((tmp = 377418995, tmp)<<((1989575902)>>(tmp = -2558458031.066773, tmp))));
+  assertEquals(-140026048, x -= ((((tmp = 1465272774.7540011, tmp)<<((2164701398)<<(tmp = -818119264, tmp)))>>((tmp = -1490486001, tmp)>>(664410099.6412607)))>>(x>>>(((tmp = -2438272073.2205153, tmp)%(tmp = 2142162105.4572072, tmp))-(tmp = 2259040711.6543813, tmp)))));
+  assertEquals(39214588236996610, x *= (x<<(-401696127.06632423)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x %= x);
+  assertEquals(0, x *= ((tmp = -1709874807.176726, tmp)&(-2786424611)));
+  assertEquals(-1320474063.3408537, x += (tmp = -1320474063.3408537, tmp));
+  assertEquals(88, x >>>= (tmp = -3179247911.7094674, tmp));
+  assertEquals(1606348131, x += ((tmp = 1555621121.5726175, tmp)|(-3026277110.9493155)));
+  assertEquals(200793516, x >>>= x);
+  assertEquals(-2952688672.1074514, x -= (tmp = 3153482188.1074514, tmp));
+  assertEquals(1342278624, x >>>= ((x>>>((tmp = 1264475713, tmp)-(-913041544)))>>>((tmp = 2008379930, tmp)%(tmp = 3105129336, tmp))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x /= (tmp = 788363717, tmp));
+  assertEquals(430466213, x -= (tmp = -430466213, tmp));
+  assertEquals(164757385222499550, x *= (tmp = 382741735, tmp));
+  assertEquals(164757385222499550, x %= (((tmp = 1974063648, tmp)%((806015603)>>>x))*((tmp = 2836795324, tmp)<<(tmp = -1785878767, tmp))));
+  assertEquals(-190957725.86956096, x /= (x^((-2939333300.066044)-(x|(-2085991826)))));
+  assertEquals(-190957725.86956096, x %= (tmp = -948386352, tmp));
+  assertEquals(0.6457336106922105, x /= (-295722141));
+  assertEquals(0, x |= ((415991250)&((x>>(tmp = -3188277823, tmp))<<(511898664.1008285))));
+  assertEquals(0, x &= ((793238922)|x));
+  assertEquals(-1576701979, x ^= (2718265317));
+  assertEquals(-49271937, x >>= x);
+  assertEquals(-49271937, x |= x);
+  assertEquals(-49271937, x &= x);
+  assertEquals(775316382, x -= (-824588319));
+  assertEquals(912498176, x <<= (tmp = -2223542776.836312, tmp));
+  assertEquals(0, x -= (x&((tmp = 1999412385.1074471, tmp)/(-1628205254))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= (-768730139.7749677));
+  assertEquals(-1861304245, x |= (((5128483)^(((tmp = -1768372004, tmp)/(x^(tmp = 1310002444.757094, tmp)))*((tmp = 188242683.09898067, tmp)^(tmp = -2263757432, tmp))))^((tmp = 2223246327, tmp)*((tmp = -2360528979, tmp)-((tmp = 2442334308, tmp)>>(458302081))))));
+  assertEquals(1, x /= x);
+  assertEquals(2, x += x);
+  assertEquals(1, x /= x);
+  assertEquals(0, x ^= x);
+  assertEquals(-0, x *= (-1852374359.3930533));
+  assertEquals(0, x <<= (tmp = 1223645195.148961, tmp));
+  assertEquals(1789655087, x |= ((-2505312209.770559)>>x));
+  assertEquals(-65568768, x <<= x);
+  assertEquals(4229398528, x >>>= x);
+  assertEquals(-8408187, x |= (-3029781627));
+  assertEquals(-8408187, x |= (((2322165037)-((tmp = -1424506897.362995, tmp)%x))&x));
+  assertEquals(-7884926, x += (x>>>(x|(2738095820))));
+  assertEquals(-7884926, x %= (576507013));
+  assertEquals(751801768, x ^= (tmp = -750241238, tmp));
+  assertEquals(-1986010067668600800, x *= (tmp = -2641667195, tmp));
+  assertEquals(1921196240, x ^= (x%(-1954178308)));
+  assertEquals(847388880, x ^= ((tmp = 1632856124, tmp)&((tmp = -1536309755, tmp)<<(tmp = -3158362800, tmp))));
+  assertEquals(-469662000.6651099, x += (tmp = -1317050880.6651099, tmp));
+  assertEquals(-812358332, x ^= ((-2832480471)>>>(2016495937)));
+  assertEquals(21, x ^= (((tmp = 1815603134.2513008, tmp)/((tmp = 147415927, tmp)%(-1059701742)))+x));
+  assertEquals(-2844409139.792712, x += (tmp = -2844409160.792712, tmp));
+  assertEquals(177070, x >>>= x);
+  assertEquals(0, x %= x);
+  assertEquals(0, x >>= x);
+  assertEquals(1459126376, x ^= (tmp = -2835840920, tmp));
+  assertEquals(1459126376, x %= (-1462864282));
+  assertEquals(0, x >>>= (tmp = 2922724319, tmp));
+  assertEquals(338995506, x ^= (338995506.6411549));
+  assertEquals(336896258, x &= (2635904967));
+  assertEquals(336634112, x -= (x&(tmp = 1659656287, tmp)));
+  assertEquals(NaN, x %= (x-x));
+  assertEquals(NaN, x /= (tmp = -674606200, tmp));
+  assertEquals(NaN, x %= ((x|(2788108542))/(x+(tmp = 600941473, tmp))));
+  assertEquals(0, x >>>= ((-1858251597.3970242)>>>x));
+  assertEquals(1951294747, x |= (tmp = 1951294747, tmp));
+  assertEquals(1951294747, x &= x);
+  assertEquals(-153190625, x |= (-1500095737));
+  assertEquals(23467367587890624, x *= x);
+  assertEquals(346531290.1813514, x /= (((((-513617734.11148167)|x)/((tmp = -2042982150.1170752, tmp)%((x%((x%x)>>>(((-1369980151)&(((922678983)%(x&(tmp = -855337708, tmp)))-((tmp = -2717183760, tmp)>>>((1939904985.4701347)%(((tmp = -2473316858, tmp)&((tmp = -599556221.9046664, tmp)>>((tmp = -6352213, tmp)/x)))&x)))))%x)))/((tmp = -1842773812.8648412, tmp)>>>(((x>>>(tmp = 499774063, tmp))<<(((tmp = -1353532660.5755146, tmp)*(-3070956509))>>(((-905883994.0188017)>>(tmp = -16637173, tmp))<<((tmp = 471668537, tmp)*((tmp = -232036004.26637793, tmp)/x)))))&(tmp = 85227224, tmp))))))>>>(x|(-2528471983)))-((tmp = 1531574803, tmp)+((x>>>x)-(2889291290.158888)))));
+  assertEquals(-94.42225749399837, x /= (((tmp = 2381634642.1432824, tmp)>>(tmp = -2637618935, tmp))|(2307200473)));
+  assertEquals(-47, x >>= (1524333345.141235));
+  assertEquals(-2.8699253616435082e-8, x /= (1637673252));
+  assertEquals(0, x |= x);
+  assertEquals(1083427040, x += ((-2012055268)<<(tmp = -2192382589.6911573, tmp)));
+  assertEquals(1083427040, x %= (x*x));
+  assertEquals(2694039776, x += ((((-1740065704.9004602)<<(-736392934))%(2781638048.424092))>>>(x&x)));
+  assertEquals(-1600927520, x |= ((tmp = 2904430054.869525, tmp)*(((1054051883.4751332)*x)*((-939020743)-(tmp = 1636935481.1834455, tmp)))));
+  assertEquals(-1600927520, x -= (x%x));
+  assertEquals(3037584978216498700, x *= (tmp = -1897390694, tmp));
+  assertEquals(372598954.1823988, x %= (tmp = 1553763703.5082102, tmp));
+  assertEquals(-1476395008, x <<= ((x>>((tmp = 282496335.49494267, tmp)^((-1948623419.6947453)|((((((tmp = -1203306995, tmp)-(-5554612.355098486))>>>(1867254951.4836824))>>x)|(-695777865))/((-59122652.19377303)<<(-609999229.7448442))))))>>(x/(tmp = -1207010654.9993455, tmp))));
+  assertEquals(-2.2540185787941605, x /= (((tmp = 1364159859.9199843, tmp)*x)>>x));
+  assertEquals(-2, x |= x);
+  assertEquals(2241824008, x *= ((3174055292.962967)>>(((-2379151623.602476)>>(tmp = -1423760236, tmp))>>(tmp = -522536019.2225733, tmp))));
+  assertEquals(-2138158385, x ^= ((x>>((((1316131966.9180691)-((x*x)>>x))>>>x)>>((-2712430284)|(((((x<<(-616185937.6090865))-(((x-(tmp = 2957048661, tmp))<<(tmp = 617564839.888214, tmp))/(x%((tmp = -447175647.9393475, tmp)<<(2203298493.460617)))))-((x&((x<<(914944265))^(((-1294901094)*((tmp = 2512344795, tmp)+((((tmp = -1227572518, tmp)%(1831277766.4920158))*((x|x)^(tmp = 2515415182.6718826, tmp)))*x)))-(961485129))))>>>(tmp = 2079018304, tmp)))>>(tmp = 734028202, tmp))^(554858721.6149715)))))-((tmp = 1312985279.5114603, tmp)^(tmp = 2450817476.179955, tmp))));
+  assertEquals(2.759030298237921, x /= (x|(tmp = -775901745.3688724, tmp)));
+  assertEquals(8, x <<= x);
+  assertEquals(NaN, x %= (((x&((1792031228.831834)>>(-1174912501)))%(((-2351757750)+(tmp = -2610099430, tmp))*(-2811655968)))*(x&(tmp = -1881632878, tmp))));
+  assertEquals(0, x &= ((x*(616116645.7508612))^(2789436828.536846)));
+  assertEquals(0, x *= x);
+  assertEquals(35097452, x ^= ((tmp = 1023684579, tmp)%(((x|((tmp = -757953041, tmp)+(772988909)))+(tmp = -2934577578, tmp))>>>((tmp = -1973224283, tmp)>>>((x*(2244818063.270375))|(x-(-716709285)))))));
+  assertEquals(0.015207441433418992, x /= (2307913014.4056892));
+  assertEquals(-5865042.942076175, x -= (5865042.957283616));
+  assertEquals(-67719.94207617454, x %= (((1464126615.2493973)+(398302030.0108756))>>>x));
+  assertEquals(4294899577, x >>>= (x<<x));
+  assertEquals(-1, x >>= (tmp = 607447902, tmp));
+  assertEquals(-1, x >>= (3081219749.9119744));
+  assertEquals(6.53694303504065e-10, x /= (tmp = -1529767040.4034374, tmp));
+  assertEquals(6.53694303504065e-10, x %= ((tmp = 899070650.7190754, tmp)&(tmp = -1101166301, tmp)));
+  assertEquals(6.53694303504065e-10, x %= (tmp = -2207346460, tmp));
+  assertEquals(NaN, x %= (((x&x)>>x)%(((-10980184)+x)&(tmp = -1473044870.4729445, tmp))));
+  assertEquals(NaN, x -= x);
+  assertEquals(-1755985426, x ^= (tmp = 2538981870, tmp));
+  assertEquals(-13842, x %= ((((-2258237411.3816605)+(-1325704332.0531585))<<((tmp = -877665450.1877053, tmp)>>(((((2420989037)+(2084279990.6278818))*(-327869571.9348242))+x)^x)))>>>x));
+  assertEquals(1, x /= x);
+  assertEquals(1, x >>= ((2241312290)^(2859250114)));
+  assertEquals(0, x >>= x);
+  assertEquals(-1615631756, x |= (-1615631756.1469975));
+  assertEquals(-1615631756, x |= x);
+  assertEquals(-627245056, x <<= ((x*(tmp = -1308330685.5971081, tmp))|(tmp = 1479586158, tmp)));
+  assertEquals(-627245056, x |= x);
+  assertEquals(1786953888, x ^= (-1340096352.1839824));
+  assertEquals(1668014353, x -= (tmp = 118939535, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(-645681, x ^= ((-1322356629)>>(tmp = 1829870283, tmp)));
+  assertEquals(-1322354688, x <<= (-794779253));
+  assertEquals(-4310084378.672725, x += (-2987729690.6727247));
+  assertEquals(-8620168757.34545, x += x);
+  assertEquals(-8720421, x |= (tmp = -748107877.6417065, tmp));
+  assertEquals(-1508858270, x ^= (1500137913));
+  assertEquals(-0.825735756765112, x /= (1827289490.1767085));
+  assertEquals(1253449509.1742642, x += (((tmp = 1253449509.9576545, tmp)-(((tmp = 2860243975, tmp)+(367947569.85976696))>>(((((530960315)>>>((((x%(tmp = -2203199228, tmp))<<(x*(((tmp = -117302283, tmp)/(x-((2579576936)%(-1225024012))))&(tmp = -2857767500.1967726, tmp))))/((x/((tmp = -166066119, tmp)<<x))|x))>>>x))|(((2771852372)>>(((tmp = -3103692094.1463976, tmp)-(tmp = 2867208546.069278, tmp))>>>(702718610.1963737)))|(tmp = 2680447361, tmp)))>>x)>>(-2006613979.051014))))^((-1665626277.9339101)/(x<<(tmp = 342268763, tmp)))));
+  assertEquals(1693336701.1742642, x += (tmp = 439887192, tmp));
+  assertEquals(0.8479581831275719, x /= ((1171383583)+(((x&x)>>>(51482548.618915915))-(tmp = -825572595.1031849, tmp))));
+  assertEquals(28, x |= ((tmp = -2355932919.6737213, tmp)>>(tmp = -2395605638, tmp)));
+  assertEquals(0, x %= x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x <<= (x^((tmp = 2793423893.484949, tmp)*(1585074754.3250475))));
+  assertEquals(0, x >>= (x/(x-((957719861.9175875)&(1288527195)))));
+  assertEquals(0, x >>>= ((-1429196921.4432657)/x));
+  assertEquals(-852424225.734199, x -= (tmp = 852424225.734199, tmp));
+  assertEquals(-46674433, x |= ((tmp = -2335242963, tmp)*((2135206646.2614377)>>(tmp = 505649511.8292929, tmp))));
+  assertEquals(2944662357, x += (tmp = 2991336790, tmp));
+  assertEquals(1404, x >>>= (849155189.1503456));
+  assertEquals(-846755170, x ^= (tmp = -846753822.4471285, tmp));
+  assertEquals(52615, x >>>= ((-517068110)+x));
+  assertEquals(1475021859.9916897, x += (tmp = 1474969244.9916897, tmp));
+  assertEquals(0, x %= x);
+  assertEquals(0, x %= ((539583595.8244679)*(tmp = 1469751690.9193692, tmp)));
+  assertEquals(0, x &= (807524227.2057163));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x -= (x^((tmp = -362481588, tmp)%(2611296227))));
+  assertEquals(NaN, x *= x);
+  assertEquals(0, x >>= ((-2519875630.999908)<<x));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x += (((tmp = 2485209575, tmp)>>(tmp = 2326979823, tmp))%(x-(((-1296334640.7476478)&x)<<x))));
+  assertEquals(0, x >>= (((tmp = 1370704131, tmp)^((((tmp = 793217372.7587746, tmp)>>(((-1455696484.109328)|(((((-2186284424.5379324)<<(tmp = 3052914152.254852, tmp))-(x>>(tmp = 3121403408, tmp)))+((778194280)-(((((tmp = 2398957652, tmp)-(x+(((-2592019996.937958)>>((tmp = 1648537981, tmp)>>x))<<(-677436594))))<<(39366669.09012544))|((tmp = 3133808408.9582872, tmp)-(-2987527245.010673)))*x)))|((tmp = -2178662629, tmp)<<x)))^(((tmp = 909652440.3570575, tmp)%(-2572839902.6852217))%(-1879408081))))*(tmp = -2910988598, tmp))&(((x^x)>>(2822040993))|((x*x)^(((1072489842.6785052)|(x-(((464054192.7390214)^x)<<(tmp = -2754448095, tmp))))*((tmp = -1544182396, tmp)/(tmp = -3198554481, tmp)))))))^(tmp = 1946162396.9841106, tmp)));
+  assertEquals(371272192, x |= (((x^((x-(x/x))&(tmp = 2370429394, tmp)))-(tmp = -403692829, tmp))*(tmp = 2808636109, tmp)));
+  assertEquals(929786482, x |= ((729966239.8987448)^(x-((tmp = 120127779, tmp)^((tmp = -3088531385, tmp)>>>((x+((tmp = 2364833601, tmp)>>>(((599149090.6666714)>>(tmp = 2838821032, tmp))%(tmp = -662846011, tmp))))-(tmp = 1168491221.1813436, tmp)))))));
+  assertEquals(-681121542, x += ((-1610909505.998718)^((tmp = -957338882, tmp)>>>(tmp = 1935594133.6531684, tmp))));
+  assertEquals(-2147483648, x <<= ((tmp = 15161708, tmp)|(2453975670)));
+  assertEquals(-2147483648, x >>= x);
+  assertEquals(0, x <<= (2080486058));
+  assertEquals(0, x &= (((x&(tmp = -767821326, tmp))/((tmp = 1877040536, tmp)>>>(tmp = 2378603217.75597, tmp)))*(-1601799835)));
+  assertEquals(0, x %= (-1820240383));
+  assertEquals(1621233920, x ^= ((tmp = 820230232, tmp)*(1727283900)));
+  assertEquals(1621233920, x |= (x>>>x));
+  assertEquals(1621233931, x += ((tmp = 794966194.9011587, tmp)>>(tmp = -597737830.5450518, tmp)));
+  assertEquals(1621276543, x |= (((x^((2354444886)+(tmp = 685142845.4708651, tmp)))-(tmp = 790204976.9120214, tmp))>>>((((tmp = -2792921939, tmp)/(((((tmp = -80705524, tmp)<<x)-(((((((tmp = 1951577216.379527, tmp)>>>x)%((-529882150)>>>(tmp = -1682409624, tmp)))<<((-42043756.29025769)-(-1803729173.6855814)))/(2937202170.118023))*(tmp = -1998098798.5722106, tmp))*(tmp = -2996229463.904228, tmp)))&x)>>>(-301330643)))/(-2858859382.0050273))-(tmp = 1571854256.0740635, tmp))));
+  assertEquals(810638271, x >>>= (x/(1553632833)));
+  assertEquals(810638271, x <<= (tmp = -1467397440, tmp));
+  assertEquals(-2147483648, x <<= x);
+  assertEquals(871068871, x ^= (tmp = 3018552519, tmp));
+  assertEquals(-1073743881, x |= ((tmp = 2294122324.020989, tmp)|(tmp = -1799706842.4493146, tmp)));
+  assertEquals(-77816868, x += (((-2225296403)&x)>>(tmp = -2667103424.445239, tmp)));
+  assertEquals(-1215889, x >>= (tmp = 1876107590.8391647, tmp));
+  assertEquals(-2431778, x += x);
+  assertEquals(4292535518, x >>>= (((x>>(-1825580683))/x)%x));
+  assertEquals(4292802560, x -= (x|(1492864090)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x %= (tmp = 2173121205, tmp));
+  assertEquals(0, x *= (x>>x));
+  assertEquals(1565261471, x |= ((1565261471.323931)>>>x));
+  assertEquals(0, x -= x);
+  assertEquals(-86980804, x |= (-86980804));
+  assertEquals(-698956484, x -= (((((2754713793.1746016)*(((((-1514587465.0698888)>>(tmp = -1307050817, tmp))/(tmp = 2368054667.438519, tmp))*(-1908125943.5714772))<<(x>>>(-357164827.4932244))))+(1257487617))<<(2954979945))&(612330472)));
+  assertEquals(-1073741824, x <<= x);
+  assertEquals(54497747, x ^= (-1019244077.098908));
+  assertEquals(54501375, x |= (((tmp = 1944912427, tmp)>>>x)%x));
+  assertEquals(0, x -= x);
+  assertEquals(0, x -= x);
+  assertEquals(-0, x *= (-1748215388));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>>= (((tmp = 988769112, tmp)%(tmp = -3133658477, tmp))<<x));
+  assertEquals(0, x %= (1685221089.2950323));
+  assertEquals(0, x >>>= (x+((793467168)-(tmp = 135877882, tmp))));
+  assertEquals(0, x %= ((tmp = -2406801984, tmp)%(tmp = -987618172, tmp)));
+  assertEquals(0, x *= ((-2943444887.953456)|(tmp = -2327469738.4544783, tmp)));
+  assertEquals(0, x >>= x);
+  assertEquals(-145484729.70167828, x += (tmp = -145484729.70167828, tmp));
+  assertEquals(1140855872, x &= (x^(tmp = 3151437967.965556, tmp)));
+  assertEquals(1486808408, x += (tmp = 345952536, tmp));
+  assertEquals(107846582.36594129, x %= (-1378961825.6340587));
+  assertEquals(-642031616, x <<= (x+x));
+  assertEquals(151747770.95108718, x *= (x/(tmp = 2716379907, tmp)));
+  assertEquals(192723456, x <<= (tmp = -1731167384, tmp));
+  assertEquals(2151208003, x -= ((-2151208003)+x));
+  assertEquals(1, x /= x);
+  assertEquals(1, x |= x);
+  assertEquals(1996766603, x |= (1996766602));
+  assertEquals(895606123, x ^= (tmp = 1113972960.966081, tmp));
+  assertEquals(-1500036886, x ^= (tmp = 2482412929, tmp));
+  assertEquals(-1542644247, x ^= (x>>>((tmp = 51449105, tmp)>>>(((-2057313176)*x)/(-1768119916)))));
+  assertEquals(-1496074063273093600, x *= ((tmp = 786152274, tmp)^(387292498)));
+  assertEquals(-794329073, x %= (((tmp = -2314637675.617696, tmp)*((((x*(411053423.29070306))-(2889448433.4240828))/((-970630131)/(tmp = -2886607600.7423067, tmp)))<<(tmp = 1263617112.9362245, tmp)))|(2816980223.8209996)));
+  assertEquals(2468008436047106600, x *= (tmp = -3107035257.725115, tmp));
+  assertEquals(3040956928, x >>>= ((tmp = 1514372119.1787262, tmp)*(3169809008)));
+  assertEquals(-19, x >>= (tmp = -266966022.10604453, tmp));
+  assertEquals(-1.6505580654964654e-8, x /= ((-3143841480)>>(x-x)));
+  assertEquals(-2.2420284729165577e-7, x *= (x*((((703414102.2523813)%(tmp = 2989948152, tmp))-((-1583401827.2949386)^((tmp = -1916731338, tmp)%((331500653.3566053)|(((tmp = 29865940, tmp)+((tmp = -2294889418.6764183, tmp)<<(tmp = -1558629267.255229, tmp)))>>>(x*(x+x)))))))|((988977957)&(-2986790281)))));
+  assertEquals(0, x ^= (x/(tmp = 781117823.345541, tmp)));
+  assertEquals(NaN, x *= (((x^((((tmp = -2969290335, tmp)+(((((tmp = -175387021, tmp)&(tmp = -1080807973, tmp))<<(tmp = -2395571076.6876855, tmp))|((tmp = -1775289899.4106793, tmp)^x))|(-2963463918)))*(tmp = -1761443911, tmp))^(tmp = 847135725, tmp)))<<((146689636)<<x))%x));
+  assertEquals(0, x ^= x);
+  assertEquals(1720182184, x -= (((tmp = 3184020508, tmp)|((-489485703)+(tmp = -2644503573, tmp)))&(tmp = 2575055579.6375213, tmp)));
+  assertEquals(1720182184, x >>= (x<<(-45408034)));
+  assertEquals(5.759243187540471e+27, x *= (((x&(1456298805))+(x<<(106573181)))*((566861317.2877743)+(2262937360.3733215))));
+  assertEquals(5.759243187540471e+27, x -= (tmp = -1365873935, tmp));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>= (1960073319.3465362));
+  assertEquals(0, x <<= x);
+  assertEquals(560463904, x += ((tmp = 1844076589.9286406, tmp)&((((((-691675777.5800121)|(-745631201))|x)+(tmp = 1504458593.2843904, tmp))-x)<<x)));
+  assertEquals(-513210271, x -= (x|(1052702623.7761713)));
+  assertEquals(3781757025, x >>>= ((-1346666404.362477)*(tmp = 2798191459, tmp)));
+  assertEquals(1080100929, x &= (1122097879.882534));
+  assertEquals(1276833905.8093092, x *= ((1276833905.8093092)/x));
+  assertEquals(1276833905.8093092, x %= (1796226525.7152414));
+  assertEquals(1276833905, x <<= (((tmp = -491205007.83412814, tmp)*(tmp = 1496201476.496839, tmp))>>(x+((tmp = -854043282.114594, tmp)-((x|(tmp = -807842056, tmp))*x)))));
+  assertEquals(1276833905, x %= (((-1870099318)>>>(((tmp = -2689717222, tmp)/(248095232))/(tmp = 1036728800.5566598, tmp)))&(((((857866837)>>(tmp = 3034825801.740485, tmp))|(-1676371984))>>>(x<<x))%((-3035366571.0221004)*(1578324367.8819473)))));
+  assertEquals(1, x /= x);
+  assertEquals(2819223656.189109, x += (2819223655.189109));
+  assertEquals(-1475743640, x >>= (((tmp = 2586723314.38089, tmp)/(x&(tmp = -697978283.9961061, tmp)))<<(x%((-1167534676)>>(x^((tmp = -284763535, tmp)*((x%x)&((((tmp = 2916973220.726839, tmp)%x)/(tmp = -1338421209.0621986, tmp))|((tmp = -834710536.803335, tmp)%x)))))))));
+  assertEquals(-3267683406, x -= (tmp = 1791939766, tmp));
+  assertEquals(-2090420900700614100, x *= (639725653));
+  assertEquals(-1540353536, x %= ((-1800269105)<<((((x&(((tmp = 1135087416.3945065, tmp)^(613708290))>>x))>>(tmp = -1234604858.7683473, tmp))^(2404822882.7666225))>>>((tmp = -287205516, tmp)-((1648853730.1462333)^((x+(x%((tmp = 359176339, tmp)%((2856479172)<<(tmp = -1995209313, tmp)))))^(((tmp = 2857919171.839304, tmp)>>>(tmp = 2779498870, tmp))>>x)))))));
+  assertEquals(-2093767030, x ^= (654554250.498078));
+  assertEquals(1, x >>>= ((tmp = -166296226.12181997, tmp)^(x/x)));
+  assertEquals(-1487427474, x -= ((x<<x)|(1487427475.4063978)));
+  assertEquals(-1487427470.562726, x += ((-1226399959.8267038)/((tmp = 2172365551, tmp)<<x)));
+  assertEquals(-3457859227618939400, x *= (tmp = 2324724597.3686075, tmp));
+  assertEquals(396221312, x >>= (-1354035390));
+  assertEquals(0, x %= x);
+  assertEquals(0, x &= (tmp = 2733387603, tmp));
+  assertEquals(1485905453, x |= ((((tmp = -1321532329.304437, tmp)&((((tmp = 1817382709.4180388, tmp)%(((tmp = 2089156555.7749293, tmp)-(-1555460267))|(tmp = 717392475.9986715, tmp)))%(tmp = 1976713214, tmp))^x))>>>x)+(tmp = -2812404197.002721, tmp)));
+  assertEquals(1485905453, x |= x);
+  assertEquals(-997658264, x <<= (-1409757949.6038744));
+  assertEquals(-997657290, x -= ((-2041106361)>>(tmp = -2014750507, tmp)));
+  assertEquals(-2138512124, x &= (tmp = 2565597060, tmp));
+  assertEquals(8422400, x &= ((-2819342693.5172367)*(tmp = 1441722560, tmp)));
+  assertEquals(111816531.81703067, x -= (-103394131.81703067));
+  assertEquals(59606682.673836395, x *= ((tmp = -1451690098, tmp)/(x-(2835050651.717734))));
+  assertEquals(-119213365.34767279, x *= (x|((-2656365050)/((-66180492)+(tmp = 284225706.32323086, tmp)))));
+  assertEquals(-232839, x >>= (1694344809.435083));
+  assertEquals(-1, x >>= x);
+  assertEquals(1, x *= x);
+  assertEquals(1, x |= x);
+  assertEquals(0, x >>= (tmp = 397239268, tmp));
+  assertEquals(-1525784563, x -= (tmp = 1525784563, tmp));
+  assertEquals(-153.62740888512675, x /= (((tmp = -2040622579.5354173, tmp)*(tmp = -1149025861.549324, tmp))%(((tmp = 2981701364.0073133, tmp)*(tmp = 2993366361, tmp))|(x|(tmp = 1800299979, tmp)))));
+  assertEquals(-1671795135, x &= (-1671795135.6173766));
+  assertEquals(-4253, x |= ((((x*((1533721762.8796673)<<((tmp = 1026164775.0081646, tmp)<<x)))<<(((x-((((x>>((((((tmp = -481536070.7067797, tmp)&(tmp = 1663121016, tmp))>>>(-2974733313.5449667))+(tmp = -493019653, tmp))>>x)&(tmp = 879307404.8600142, tmp)))>>>x)%(x-(tmp = -1806412445.788453, tmp)))%x))<<(x<<(x+x)))+x))>>((tmp = -332473688.28477216, tmp)<<((tmp = 1701065928, tmp)+(((((tmp = -2407330783, tmp)+x)-((tmp = 584100783, tmp)%(tmp = -3077106506, tmp)))^x)>>x))))<<x));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>>= (1578470476.6074834));
+  assertEquals(0, x >>>= (974609751));
+  assertEquals(-120, x += (x-((tmp = -245718438.0842378, tmp)>>>(tmp = -1870354951, tmp))));
+  assertEquals(-6.134465505515781e-8, x /= (1956160645));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x *= (tmp = -399718472.70049024, tmp));
+  assertEquals(-1803198769.8413258, x += (-1803198769.8413258));
+  assertEquals(988624943, x ^= ((((tmp = 320776739.5608537, tmp)*(((tmp = -983452570.3150327, tmp)^x)&(tmp = -3181597938, tmp)))-(tmp = -1367913740.9036021, tmp))/(((tmp = -535854933.2943456, tmp)-(717666905.8122432))>>>(((((x^(tmp = 380453258.60062766, tmp))^(tmp = -1242333929, tmp))/((tmp = 1072416261, tmp)+(((2090466933)*(x*(tmp = -386283072, tmp)))|((tmp = 789259942, tmp)<<(tmp = -1475723636.1901488, tmp)))))>>>x)%((x>>(tmp = -1243048658.3818703, tmp))|((((((tmp = -619553509, tmp)|x)/(878117279.285609))|((x<<(x>>>(tmp = -749568437.7390883, tmp)))*x))/(tmp = 1674804407, tmp))-(x*(tmp = 1528620873, tmp))))))));
+  assertEquals(988625135, x |= (x>>>(tmp = 2402222006, tmp)));
+  assertEquals(988625135, x %= (-2691094165.990094));
+  assertEquals(0, x %= x);
+  assertEquals(-0, x *= (tmp = -1409904262, tmp));
+  assertEquals(-0, x /= ((1176483512.8626208)<<x));
+  assertEquals(0, x &= ((((1677892713.6240005)^(tmp = 2575724881, tmp))^(tmp = -2935655281.208194, tmp))*(216675668)));
+  assertEquals(0, x >>= (tmp = -1296960457, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x <<= (x>>(-3127984289.9112387)));
+  assertEquals(0, x %= ((tmp = 190018725.45957255, tmp)<<((x>>>x)/x)));
+  assertEquals(0, x /= (1185681972));
+  assertEquals(0, x &= ((tmp = -1285574617, tmp)>>x));
+  assertEquals(0, x >>>= ((tmp = 2498246277.2054763, tmp)+(((tmp = 924534435, tmp)&x)>>(tmp = 1379755429, tmp))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x /= (3093439341));
+  assertEquals(0, x *= (x>>>x));
+  assertEquals(0, x &= (tmp = 551328367, tmp));
+  assertEquals(-0, x /= (-3153411714.834353));
+  assertEquals(1217585288, x ^= (tmp = -3077382008.637764, tmp));
+  assertEquals(-639702017, x |= ((tmp = -640922633, tmp)%(tmp = -879654762, tmp)));
+  assertEquals(-1645297680, x <<= (tmp = 1418982820.8182912, tmp));
+  assertEquals(-1.4059558868398736, x /= (1170234212.4674253));
+  assertEquals(-2650856935.66554, x *= (1885448157));
+  assertEquals(1326259953.26931, x *= (((x>>(x|(-496195134.78045774)))+((2029515886)%(tmp = 1148955580, tmp)))/(tmp = -1760016519, tmp)));
+  assertEquals(0, x &= (((((-273334205)+(tmp = 797224093.682485, tmp))/x)>>>((((tmp = -887577414, tmp)/x)+x)%(tmp = 720417467, tmp)))^(((x-(tmp = -309071035, tmp))>>(-3123114729.33889))/x)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= ((tmp = -2243857462, tmp)/((((((2642220700.6673346)&x)*(tmp = 1454878837, tmp))|((-25825087.30002737)%(851535616.3479034)))<<(tmp = -697581582, tmp))%(tmp = 2248990486, tmp))));
+  assertEquals(0, x >>= (((x|(((tmp = -220437911, tmp)&((((255690498)*(((2993252642)>>>(tmp = 300426048.0338713, tmp))>>x))&((-364232989)+(x<<(-1824069275))))%(x+(tmp = 2696406059.026349, tmp))))+((tmp = 2911683270, tmp)/(tmp = 2718991915, tmp))))*(x/(((tmp = -982851060.0744538, tmp)^((-2903383954)<<((-85365803.80553412)^x)))%(1489258330.5730634))))>>>x));
+  assertEquals(0.7805921633088815, x += (((-1886920875)/(-2417294156.5304217))%(tmp = -1176793645.8923106, tmp)));
+  assertEquals(0, x <<= x);
+  assertEquals(-2215008905, x -= (2215008905));
+  assertEquals(1931542900, x &= (-215923724.72133207));
+  assertEquals(907191462, x ^= (-3133954606.357727));
+  assertEquals(453595731, x >>>= (((tmp = 2726241550, tmp)/(tmp = -332682163, tmp))*((((tmp = 2500467531, tmp)>>>(((x<<(tmp = -1847200310.4863105, tmp))/x)^x))+x)<<(191688342.22953415))));
+  assertEquals(-0.21671182880645923, x /= ((((-1169180683.1316955)%x)>>>(1650525418))^((2198033206.797462)&((-6913973.910871983)%(1758398541.8440342)))));
+  assertEquals(-375102237.1603561, x += (tmp = -375102236.9436443, tmp));
+  assertEquals(1, x &= (((84374105.89811504)|((tmp = -2480295008.926951, tmp)>>((605043461)>>(tmp = -2495122811, tmp))))>>(-2129266088)));
+  assertEquals(1, x |= x);
+  assertEquals(0.0000024171579540208214, x /= (((-2600416098)>>(-2076954196))^x));
+  assertEquals(0.0000024171579540208214, x %= (tmp = -2632420148.815531, tmp));
+  assertEquals(1809220936.0126908, x -= (-1809220936.0126884));
+  assertEquals(1682452118.2686126, x += (((2358977542)<<(x/(tmp = -2862107929, tmp)))+(x+(x%((-3101674407)/(((x*((x>>(tmp = 630458691.3736696, tmp))>>>(tmp = -852137742, tmp)))/x)-((-1875892391.1022017)&(tmp = -1027359748.9533749, tmp))))))));
+  assertEquals(1682452118, x <<= (((tmp = -80832958.07816291, tmp)>>x)%(x-((x^(x<<(tmp = -156565345, tmp)))|((tmp = -1208807363.727137, tmp)/(tmp = 2614737513.304538, tmp))))));
+  assertEquals(6572078, x >>= (-1573364824));
+  assertEquals(13144156, x += x);
+  assertEquals(1731678184, x ^= ((tmp = 593370804.9985657, tmp)|(-3124896848.53273)));
+  assertEquals(845545, x >>>= (tmp = -605637621.2299933, tmp));
+  assertEquals(-1383361088, x ^= (tmp = -1383632087, tmp));
+  assertEquals(-82545896480031520, x += ((x+(1023183845.7316296))*((((tmp = 576673669, tmp)>>(((-584800080.1625061)/(2388147521.9174623))+((((x>>>(-905032341.5830328))^(tmp = -2170356357, tmp))-x)+((136459319)+(-1799824119.689473)))))|x)&(tmp = -2688743506.0257063, tmp))));
+  assertEquals(-895206176, x |= x);
+  assertEquals(-0, x %= x);
+  assertEquals(1791306023, x ^= ((tmp = -3219480856, tmp)+(tmp = 715819582.0181161, tmp)));
+  assertEquals(1791306023, x &= x);
+  assertEquals(2725167636753240600, x *= (1521330025));
+  assertEquals(-281190679, x |= (tmp = -1422045975.798171, tmp));
+  assertEquals(-281190679, x += (x%x));
+  assertEquals(-2342097426.906673, x -= (tmp = 2060906747.906673, tmp));
+  assertEquals(-4651462701.906673, x -= (2309365275));
+  assertEquals(1878, x >>>= (2544974549.345834));
+  assertEquals(1964, x += (x&((1067649861)>>(182139255.7513579))));
+  assertEquals(2209, x += (x>>(tmp = -1775039165, tmp)));
+  assertEquals(0, x -= x);
+  assertEquals(-0, x /= (tmp = -1634697185, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x >>>= ((tmp = 3075747652, tmp)&(tmp = 819236484, tmp)));
+  assertEquals(0, x /= ((1276203810.476657)%(-2434960500.784484)));
+  assertEquals(0, x >>>= (tmp = -503633649, tmp));
+  assertEquals(-982731931, x |= (-982731931));
+  assertEquals(-1965463862, x += x);
+  assertEquals(-0.221469672913716, x %= ((tmp = -1742292120, tmp)/x));
+  assertEquals(-0.221469672913716, x %= (-2021391941.1839576));
+  assertEquals(0, x <<= (((((tmp = -2802447851, tmp)>>((2534456072.6518855)&x))%(tmp = 2841162496.610816, tmp))<<((89341820)/(2565367990.0552235)))>>(tmp = 2700250984.4830647, tmp)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>= ((tmp = -636189745, tmp)>>>(x/(((tmp = 2634252476, tmp)%(2026595795))>>(tmp = -2048078394.743723, tmp)))));
+  assertEquals(NaN, x %= ((x%((((x%((tmp = -2583207106, tmp)&x))|(190357769))<<(tmp = 595856931.2599536, tmp))%x))*((-2433186614.6715775)<<((2856869562.1088696)^(tmp = 1112328003, tmp)))));
+  assertEquals(1621713910, x |= (tmp = 1621713910.0282416, tmp));
+  assertEquals(3243427820, x += x);
+  assertEquals(0, x *= (x&(x-x)));
+  assertEquals(0, x >>>= (((2871235439)<<((x+((tmp = -1319445828.9659343, tmp)+(tmp = 1595655077.959171, tmp)))>>(tmp = -86333903, tmp)))-(x/(2907174373.268768))));
+  assertEquals(0, x >>= (-1091774077.2173789));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x *= (tmp = 1976023677.7015994, tmp));
+  assertEquals(NaN, x -= (-3013707698));
+  assertEquals(NaN, x += ((x+(((tmp = -3119865782.9691515, tmp)<<(1327383504.0158405))^(((-143382411.7239611)>>>((-2157016781)+(((-335815848)/x)<<(tmp = 1953515427, tmp))))&(-2715729178))))/(413738158.2334299)));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x += (-845480493));
+  assertEquals(-789816013, x |= (tmp = -789816013.129916, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= (3032573320));
+  assertEquals(47630, x ^= ((1086705488)%((x^(tmp = -1610832418, tmp))>>>(tmp = 1136352558, tmp))));
+  assertEquals(47630, x >>= (tmp = 1035320352.4269229, tmp));
+  assertEquals(47630, x >>= ((((x^x)<<(x*((((x&((-1657468419)*((tmp = -674435523, tmp)&((tmp = 2992300334, tmp)|x))))*((tmp = -489509378.31950426, tmp)*(tmp = 2276316053, tmp)))>>>x)<<x)))%(tmp = -1209988989, tmp))/(tmp = -2080515253.3541622, tmp)));
+  assertEquals(3192518951.8129544, x += (3192471321.8129544));
+  assertEquals(648116457.8129544, x %= (-2544402494));
+  assertEquals(0, x -= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x *= (tmp = 30051865, tmp));
+  assertEquals(0, x ^= ((x&(((x&x)>>>(((((((x+(2319551861.0414495))>>>(tmp = -3099624461, tmp))^((((tmp = 1574312763, tmp)|x)>>>((-2723797246)&(tmp = -1993956152, tmp)))|(-1830179045)))|(((((((-2545698704.3662167)>>>x)-(((-79478653)|x)%(x+(x>>((tmp = 2386405508.2180576, tmp)/x)))))>>((((-1947911815.2808042)*((x+(368522081.2884482))-(tmp = 2452991210, tmp)))>>(343556643.1123545))>>((((tmp = 1869261547.537739, tmp)>>(3193214755))|x)&(x*(2027025120)))))<<((-1149196187)>>>(814378291.8374172)))+((((((((-160721403)/(2079201480.2186408))+((x|((((tmp = -299595483.16805863, tmp)>>>((x|((x+x)/(-2359032023.9366207)))<<(tmp = -3095108545, tmp)))>>((tmp = -1547963617.9087071, tmp)*(x>>x)))&((tmp = -1568186648.7499216, tmp)+(((2646528453)^(-2004832723.0506048))>>>(tmp = -3188715603.921877, tmp)))))+(tmp = 1578824724, tmp)))^x)^x)/(tmp = -985331362, tmp))|(tmp = 445135036, tmp))<<(tmp = -73386074.43413758, tmp)))+(((-1674995105.9837937)-(tmp = 1392915573, tmp))>>x)))%(tmp = 1215953864, tmp))&((tmp = -439264643.5238693, tmp)>>>x))+(((tmp = 2311895902, tmp)|(1604405793.6399229))&(tmp = -565192829, tmp))))-x))>>(-2455985321)));
+  assertEquals(0, x %= ((1177798817)>>(tmp = 2081394163.5420477, tmp)));
+  assertEquals(0, x >>>= ((x^(tmp = -41947528.33954811, tmp))>>(x>>>((tmp = 1367644771, tmp)+x))));
+  assertEquals(0, x %= ((x+((tmp = 163275724, tmp)<<((tmp = -514460883.3040788, tmp)+x)))|(tmp = -287112073.2482593, tmp)));
+  assertEquals(0, x &= (3067975906));
+  assertEquals(201342051, x |= (tmp = 201342051, tmp));
+  assertEquals(0, x %= (((((-2580351108.8990865)<<(tmp = 2675329316, tmp))&((1338398946)%((-1548041558)+((x>>(-1568233868.7366815))|((x>>((tmp = -1064582207, tmp)/(-1062237014)))>>(tmp = 854123209, tmp))))))<<(((989032887)*(1842748656))%(tmp = -1566983130, tmp)))-x));
+  assertEquals(-0, x /= (tmp = -828519512.617768, tmp));
+  assertEquals(0, x &= ((((1449608518)+(-1829731972))*(1828894311))*(((tmp = -1121326205.614264, tmp)^(-2057547855))<<(tmp = -2758835896, tmp))));
+  assertEquals(NaN, x %= ((tmp = -2138671333, tmp)%x));
+  assertEquals(0, x &= x);
+  assertEquals(665568613.0328879, x += (665568613.0328879));
+  assertEquals(317, x >>= (2627267349.735873));
+  assertEquals(0, x -= x);
+  assertEquals(0, x &= (((tmp = 3030611035, tmp)*(((tmp = 476143340.933007, tmp)>>(x-(2238302130.2331467)))|(x|x)))%(tmp = 320526262, tmp)));
+  assertEquals(0, x <<= (tmp = 729401206, tmp));
+  assertEquals(0, x >>>= (1721412276));
+  assertEquals(217629949.3530736, x += ((tmp = 217629949.3530736, tmp)%((-931931100.601475)%(x^(tmp = -2149340123.548764, tmp)))));
+  assertEquals(217629949.3530736, x %= (tmp = 2275384959.4243402, tmp));
+  assertEquals(0, x >>>= (1112677437.5524077));
+  assertEquals(0, x *= (500256656.7476063));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x &= (-1076968794));
+  assertEquals(0, x /= (tmp = 1774420931.0082943, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x %= (-2978890122.943079));
+  assertEquals(-0, x /= (tmp = -2954608787, tmp));
+  assertEquals(-800048201, x ^= ((tmp = -800048201.7227018, tmp)>>>((-2016227566.1480863)/(tmp = -2263395521, tmp))));
+  assertEquals(3333, x >>>= (-2038839052));
+  assertEquals(487957736.625432, x += (487954403.625432));
+  assertEquals(-1650983426, x |= (2643918270));
+  assertEquals(-1861867448, x &= (tmp = -251254199.12813115, tmp));
+  assertEquals(-7.934314690172143e-18, x %= ((((x^(-703896560.6519544))>>(tmp = -1853262409, tmp))/(tmp = -1168012152.177894, tmp))/(tmp = 837616075.1097361, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= (tmp = -2328150260.5399947, tmp));
+  assertEquals(-1954860020, x |= (tmp = 2340107276, tmp));
+  assertEquals(-1954860020, x >>= ((tmp = 159177341, tmp)*(x&(-705832619))));
+  assertEquals(-1954895727, x -= (x>>>((-1443742544.7183702)^((((tmp = 869581714.0137681, tmp)+x)^((x%(tmp = -1036566362.5189383, tmp))^(x%x)))>>x))));
+  assertEquals(1.0241361338078498, x /= (tmp = -1908824093.2692068, tmp));
+  assertEquals(16777216, x <<= (x*(((-1925197281)^(tmp = -1392300089.4750946, tmp))|x)));
+  assertEquals(-225882765524992, x *= (tmp = -13463662, tmp));
+  assertEquals(-1845493760, x |= x);
+  assertEquals(-1845493760, x %= (tmp = 3181618519.786825, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x /= (x>>>x));
+  assertEquals(NaN, x %= (((((tmp = -521176477, tmp)>>(((tmp = 370693623, tmp)/(((tmp = -1181033022.4136918, tmp)>>(x|(x*(2601660441))))+(tmp = -1696992780, tmp)))|(x|(-1197454193.198036))))>>>(((2512453418.3855605)+((((((tmp = 799501914, tmp)&(((1788580469.7069902)*(((((1476778529.5109258)<<(tmp = -1873387738.3541565, tmp))-((tmp = -521988584.7945764, tmp)*(-1598785351.3914914)))&(-1899161721.8061454))&((x/x)*(690506460))))>>>((tmp = 2255896398.840741, tmp)>>((tmp = -1331486014.6180065, tmp)+(-1159698058.534132)))))*((1112115365.2633948)&((x>>((x>>(-784426389.4693215))&(-492064338.97227573)))>>x)))^((x-((tmp = 2986028023, tmp)>>(tmp = 2347380320.00517, tmp)))*(tmp = -1463851121, tmp)))*(tmp = -1059437133, tmp))%(x-(tmp = 1238739493.7636225, tmp))))^(2029235174)))*(-1923899530))>>>x));
+  assertEquals(0, x >>>= (2848792983.510682));
+  assertEquals(0, x >>= (((tmp = 3042817032.705198, tmp)>>>x)&((((tmp = -829389221, tmp)-((2669682285.8576303)+(tmp = 1812236814.3082042, tmp)))^x)%((tmp = -2401726554, tmp)^((tmp = 2464685683, tmp)|(-2685039620.224061))))));
+  assertEquals(2069649722, x |= (2069649722.311271));
+  assertEquals(NaN, x %= (((((-68757739.39282179)&(-1382816369))/(3122326124))<<(x-(-507995800.3369653)))<<(((-1962768567.343907)+((tmp = 1357057125, tmp)/x))^(tmp = 1997617124, tmp))));
+  assertEquals(NaN, x += x);
+  assertEquals(0, x >>= (26895919));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x %= (tmp = 1092448030, tmp));
+  assertEquals(0, x <<= (tmp = -477672441.46258235, tmp));
+  assertEquals(0, x /= (2113701907));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x /= x);
+  assertEquals(1341078673, x |= (-2953888623));
+  assertEquals(1341078673, x &= x);
+  assertEquals(0, x %= x);
+  assertEquals(414817852.151006, x -= (-414817852.151006));
+  assertEquals(1006632960, x <<= ((((((126465614.8316778)+(x-(2511803375)))+(tmp = 1620717148.352402, tmp))*x)/(tmp = -3013745105.5275207, tmp))-((tmp = -418034061.6865432, tmp)/(-300492911))));
+  assertEquals(1055624813, x |= (tmp = 921407085, tmp));
+  assertEquals(-3, x |= ((((tmp = 1382397819.7507677, tmp)+(tmp = -111851147.7289567, tmp))+x)/((tmp = 247980405.7238742, tmp)^(tmp = -592156399.8577058, tmp))));
+  assertEquals(35161, x &= (((((((-2973570544.725141)*(tmp = -1244715638, tmp))+x)<<(x/((x>>>(-2143371615.073137))/(226072236))))%((x-(tmp = 1971392936, tmp))^(tmp = 2653103658, tmp)))%((tmp = 2828319571.7066674, tmp)>>((1528970502)^((tmp = -55869558, tmp)%x))))>>(889380585.6738582)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x *= (2749718750));
+  assertEquals(0, x >>>= ((((-1633495402.6252813)*(tmp = 2943656739.1108646, tmp))+(tmp = 977432165, tmp))&((tmp = -2338132019, tmp)*(408176349.8061733))));
+  assertEquals(-1778794752, x -= (((tmp = -1391412154.5199084, tmp)-((-3172342474)|x))&(1854366052)));
+  assertEquals(-1778794752, x %= (tmp = 2024807296.6901965, tmp));
+  assertEquals(-1114410.466337204, x %= ((tmp = -240344444.24487805, tmp)%(-47661164)));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>= (x>>x));
+  assertEquals(0, x *= x);
+  assertEquals(0, x /= ((-3134902611)|(tmp = -3131158951, tmp)));
+  assertEquals(-0, x /= (((tmp = 1430247610.634234, tmp)&x)+((tmp = -2047191110.8623483, tmp)-((((x%((((x/(tmp = -2599234213, tmp))|(tmp = 2650380060, tmp))|x)+x))>>>x)&(-1961373866))<<x))));
+  assertEquals(-718394682, x -= ((x|(tmp = 1764417670.8577194, tmp))%(1046022988)));
+  assertEquals(3576572614, x >>>= (((tmp = 2480472883.078992, tmp)<<x)>>((2035208402.8039393)&(tmp = 492980449, tmp))));
+  assertEquals(434034142, x %= (x&((x>>>(311110449.48751545))|(-243530647))));
+  assertEquals(524703439.3065736, x += (((tmp = 1392771723.3065736, tmp)%(x&x))%(tmp = -2199704930, tmp)));
+  assertEquals(373686272, x &= (x<<((tmp = 2103372351.9456532, tmp)%(tmp = -1367109519, tmp))));
+  assertEquals(373686272, x >>= x);
+  assertEquals(-0.12245430020241108, x /= (tmp = -3051638622.5907507, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(1, x %= (3095983855));
+  assertEquals(-1454736871, x ^= (x*(tmp = -1454736872, tmp)));
+  assertEquals(-1454736866, x ^= (((724989405.7338341)|(tmp = -2834298786.384371, tmp))>>>(tmp = -2029602148.1758833, tmp)));
+  assertEquals(-1454736866, x &= x);
+  assertEquals(-197394432, x <<= (tmp = -1562128975, tmp));
+  assertEquals(251658240, x <<= (tmp = 2126510950, tmp));
+  assertEquals(3295700610.703306, x -= (tmp = -3044042370.703306, tmp));
+  assertEquals(-51152917, x |= ((949179883.1784958)|(((tmp = -2046168220, tmp)>>(x/x))/(((835064313)*(tmp = 2197600689, tmp))^(((tmp = 2717104216, tmp)&x)<<(-1402661995.3845913))))));
+  assertEquals(-1549204421, x ^= ((((tmp = -481013711, tmp)>>>((tmp = 119589341.80209589, tmp)%(-995489985.2905662)))-(635717011))^(x+(x*x))));
+  assertEquals(-1078356672.3999934, x += (470847748.6000067));
+  assertEquals(1484987268.4638166, x += (tmp = 2563343940.86381, tmp));
+  assertEquals(277020804, x &= (tmp = 2532819117, tmp));
+  assertEquals(-2097118208, x <<= (x>>>x));
+  assertEquals(-2147483648, x <<= (tmp = 761285045, tmp));
+  assertEquals(2147483648, x >>>= x);
+  assertEquals(-935909870282997800, x *= ((-2583300643)|x));
+  assertEquals(-370753566.54721737, x %= (-1084543510.4524941));
+  assertEquals(-177, x >>= (-946264747.6588805));
+  assertEquals(-416077682, x ^= (tmp = 416077761, tmp));
+  assertEquals(NaN, x %= ((((tmp = 779607408, tmp)*(((tmp = -3007128117, tmp)*(851442866.6153773))+x))&(1283388806))/(-876363553)));
+  assertEquals(NaN, x %= (x/(tmp = -1668413939.652408, tmp)));
+  assertEquals(-1726405921, x ^= (tmp = -1726405921, tmp));
+  assertEquals(-1, x >>= ((3031008213.807012)>>x));
+  assertEquals(4294967295, x >>>= ((x>>>x)&(tmp = 2788082290, tmp)));
+  assertEquals(8544111670008449000, x *= (tmp = 1989331020.0417833, tmp));
+  assertEquals(268435456, x <<= (tmp = 3121736017.2098465, tmp));
+  assertEquals(-2.1011176170964474e+26, x -= (((tmp = 1392503299, tmp)*(tmp = 1446108825.1572113, tmp))*(x^(tmp = 372776014.213725, tmp))));
+  assertEquals(0, x |= x);
+  assertEquals(0, x >>= ((-112413907.70074797)*(-702798603)));
+  assertEquals(1829518838, x |= (tmp = -2465448458, tmp));
+  assertEquals(57172463, x >>= ((tmp = 2979642955.241792, tmp)%(tmp = -2464398693.291434, tmp)));
+  assertEquals(114344926, x += x);
+  assertEquals(113279134, x &= (2397742238.6877637));
+  assertEquals(54, x >>= (1908522709.6377516));
+  assertEquals(-2.966982919573829e-7, x /= (tmp = -182003070, tmp));
+  assertEquals(0, x <<= (-1078417156));
+  assertEquals(-147831390, x ^= (((-147831390)>>>x)+x));
+  assertEquals(0, x -= x);
+  assertEquals(-242221450.44696307, x -= (tmp = 242221450.44696307, tmp));
+  assertEquals(-484442900, x <<= (((tmp = -2033947265.088614, tmp)&x)/(x^(tmp = -2893953848, tmp))));
+  assertEquals(-3227648, x <<= (x<<((tmp = -193993010, tmp)*((983187830)|(3146465242.2783365)))));
+  assertEquals(-6455296, x += x);
+  assertEquals(-1771542585, x -= (x^(tmp = -1767335879, tmp)));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>>= ((((tmp = -1612864670.4532743, tmp)*(tmp = 786265765.210487, tmp))*((((tmp = -893735877.3250401, tmp)*((x^(tmp = -2804782464.233885, tmp))<<x))&(x-x))^x))<<x));
+  assertEquals(0, x -= (x>>>(-1648118674.380736)));
+  assertEquals(0, x >>= ((tmp = -2706058813.0028524, tmp)>>(2745047169)));
+  assertEquals(0, x += x);
+  assertEquals(0, x %= (-898267735.137356));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>= ((265527509)/((tmp = 2190845136.7048635, tmp)+((x>>x)>>>((x%(x-x))&((((-2080184609.8989801)&((-327231633)>>>((tmp = 864849136, tmp)%(((-524363239)*(((((tmp = 2245852565.3713694, tmp)&(1918365.8978698254))>>>(tmp = -2463081769, tmp))-(((2438244059.471446)|((((-135303645.38470244)*(-861663832.2253196))%(tmp = 1273185196.0261836, tmp))|((2261539338.832875)%((320267076.2363237)+x))))>>(tmp = -2731398821, tmp)))/(tmp = -1947938611, tmp)))^x))))>>(tmp = 833666235, tmp))|x))))));
+  assertEquals(-1116704570, x ^= (-1116704570));
+  assertEquals(1379561710, x ^= (tmp = -280362968.19654894, tmp));
+  assertEquals(-1673822208, x <<= x);
+  assertEquals(-1673822208, x |= (x<<(tmp = 1389479193.9038138, tmp)));
+  assertEquals(2559712, x >>>= (-2703763734.0354066));
+  assertEquals(2593499, x ^= (x>>>((tmp = 148668150.03291285, tmp)^(tmp = -1580360304, tmp))));
+  assertEquals(2070393855, x |= (tmp = -2227002907, tmp));
+  assertEquals(304197770, x &= (tmp = 2453257354, tmp));
+  assertEquals(304197770, x <<= ((-669331453.8814087)-(x^(x^(tmp = 33804899.98928583, tmp)))));
+  assertEquals(297068, x >>= x);
+  assertEquals(Infinity, x /= (x-x));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= ((tmp = 1723087085, tmp)%(2859382131.304421)));
+  assertEquals(0, x %= (((tmp = 2935439763, tmp)<<(-3163992768.637094))%(tmp = 67176733, tmp)));
+  assertEquals(0, x &= (tmp = 2480771277, tmp));
+  assertEquals(0, x >>>= (x+(tmp = -3168690063, tmp)));
+  assertEquals(0, x *= ((tmp = -1915275449.1806245, tmp)>>>((tmp = -1644482094.1822858, tmp)/(tmp = -432927173, tmp))));
+  assertEquals(0, x += (((2766509428.071809)/(x/((942453848.5423365)/(((tmp = -1284574492, tmp)&((tmp = 760186450.7301528, tmp)-(2464974117.358138)))/((x/(x|(672536969)))*(x>>(-1272232579)))))))>>(x*(-3175565978))));
+  assertEquals(-1277710521, x -= (1277710521));
+  assertEquals(-1277710521, x >>= (((tmp = -2349135858, tmp)-x)-x));
+  assertEquals(-1277710521, x >>= ((tmp = 2135645051, tmp)*(tmp = -2468555366, tmp)));
+  assertEquals(-155971, x >>= (-1294859507));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>>= (((861078292.6597499)|(-268063679))-(((((-221864206.9494424)-(-3186868203.2201176))&(tmp = 1287132927, tmp))<<(((tmp = 1964887915, tmp)<<((25908382)^(tmp = -688293519.875164, tmp)))*(2075946055)))&(x-((x>>x)&(1395338223.7954774))))));
+  assertEquals(788002218, x -= (-788002218));
+  assertEquals(716399906, x &= (-1145868506));
+  assertEquals(145776674, x &= (-1661931477.360386));
+  assertEquals(145776674, x |= x);
+  assertEquals(-0.05255700469257692, x /= (tmp = -2773686873, tmp));
+  assertEquals(-660918434, x |= (-660918434.2915542));
+  assertEquals(1223537346, x ^= (tmp = -1871274596, tmp));
+  assertEquals(305884336, x >>= (x&x));
+  assertEquals(-1.1123775647978218e-8, x *= ((tmp = -793393031.4229445, tmp)/((tmp = -503919284, tmp)*(((((tmp = 429810625, tmp)>>>x)-((2091544148.870375)<<(((((x^x)%x)|x)/(-260773261))<<((tmp = -1323834653, tmp)&x))))*((-1231800099.3724015)+x))*((x+((-559726167)^x))>>>((-549148877)<<((((tmp = 1196115201, tmp)/((tmp = -2654658968.390111, tmp)%(tmp = -1044419580, tmp)))*(((((x>>>(733571228))+(2919762692.511447))/(-2718451983.570547))^x)+((2891533060.1804514)^((tmp = -2514488663, tmp)&x))))<<(tmp = -2526139641.6733007, tmp))))))));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x *= x);
+  assertEquals(0, x |= x);
+  assertEquals(3076984066.336236, x -= ((tmp = -3076984066.336236, tmp)+((tmp = -446575828.5155368, tmp)&x)));
+  assertEquals(1, x /= x);
+  assertEquals(1513281647.839972, x *= (1513281647.839972));
+  assertEquals(1251138155, x ^= ((tmp = 2124481052, tmp)&(2431937351.4392214)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x &= (tmp = 627050040, tmp));
+  assertEquals(497153016, x ^= (497153016));
+  assertEquals(-1112801283, x |= (tmp = 2752196557, tmp));
+  assertEquals(0.5735447276296568, x /= ((((tmp = -500878794, tmp)%(tmp = -2559962372.2930336, tmp))%(2661010102))+(tmp = -1439338297, tmp)));
+  assertEquals(1.0244795995097235e-9, x /= (559840067));
+  assertEquals(0.43468811912309857, x *= (424301391));
+  assertEquals(-1972757928, x ^= (tmp = -1972757928.9227014, tmp));
+  assertEquals(-606757265, x ^= (tmp = -2923461577.264596, tmp));
+  assertEquals(-37, x >>= (((-2736561559.7474318)%(tmp = -27668972.662741184, tmp))*(2774711606)));
+  assertEquals(-1923785671, x += ((-1923785597)+x));
+  assertEquals(-3877639176, x += (tmp = -1953853505, tmp));
+  assertEquals(-4688259242, x -= ((810620066.4394455)>>(((-1474285107.459875)>>x)/(((((-570672326.4007359)>>(tmp = -3086802075, tmp))%x)>>>(((tmp = 286938819.28193486, tmp)>>>((1712478502)>>(tmp = 3045149117.796816, tmp)))<<(tmp = 750463263.292952, tmp)))&(tmp = 2055350255.5669963, tmp)))));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x <<= (1037856162.5105649));
+  assertEquals(0, x *= x);
+  assertEquals(0, x &= (997845077.4917375));
+  assertEquals(0, x *= x);
+  assertEquals(0, x *= x);
+  assertEquals(0, x <<= (((x<<x)&(57691805))>>(786927663)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x &= (-2131910624.1429484));
+  assertEquals(0, x >>>= (-43787814));
+  assertEquals(-2415062021, x += (tmp = -2415062021, tmp));
+  assertEquals(-4830124042, x += x);
+  assertEquals(-186683401, x |= (tmp = 1960135383, tmp));
+  assertEquals(NaN, x *= ((tmp = -1674740173.9864025, tmp)%(((((((-432895485.7261934)-x)^x)>>>(((-1627743078.3383338)>>(179992151))<<((tmp = 911484278.0555259, tmp)|(((tmp = -3042492703, tmp)>>(((-663866035.302746)>>(((x-((440661929.50030375)>>>(tmp = 263692082, tmp)))*x)+x))/((1546004407)^(((tmp = 2023662889.1594632, tmp)*(tmp = -2456602312, tmp))+(tmp = 755602286.1810379, tmp)))))%((tmp = -336449961, tmp)|(tmp = 206780145, tmp))))))/(1068005219.1508512))<<(tmp = -474008862.6864624, tmp))/(((((((1518711056.5437899)>>>(tmp = 287418286.63085747, tmp))<<(tmp = 2823048707, tmp))^(((x<<(x^(-1600970311)))&(x>>(((tmp = 157300110.7636031, tmp)*(tmp = -3047000529, tmp))&(1743024951.3535223))))>>x))-(tmp = -2895435807, tmp))*((tmp = -314120704, tmp)&(tmp = 1759205369, tmp)))>>(tmp = 1833555960.046526, tmp)))));
+  assertEquals(NaN, x -= (tmp = 694955369, tmp));
+  assertEquals(NaN, x *= (x%x));
+  assertEquals(0, x |= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= x);
+  assertEquals(NaN, x /= (x+x));
+  assertEquals(NaN, x %= ((tmp = -1595988845, tmp)*((1754043345)>>>(-601631332))));
+  assertEquals(0, x >>>= (tmp = 862768754.5445609, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x *= (tmp = -1774545519, tmp));
+  assertEquals(0, x >>>= (tmp = -2492937784, tmp));
+  assertEquals(0, x %= ((((x<<(-1657262788.2028513))&((x^(tmp = -671811451, tmp))<<(-2984124996)))^(1455422699.7504625))-((-340550620)>>x)));
+  assertEquals(918278025, x ^= ((tmp = -918278027, tmp)^((tmp = 2889422870, tmp)/(tmp = -657306935.7725658, tmp))));
+  assertEquals(918278025, x %= (2603186571.0582614));
+  assertEquals(107034679.32509923, x %= (tmp = -811243345.6749008, tmp));
+  assertEquals(53517339, x >>= (x%((((x*((tmp = -983766424, tmp)^(-1881545357.8686862)))|(tmp = -1429937087, tmp))>>((x<<x)>>((((tmp = -2347470476, tmp)&x)+((x&x)<<(396061331.6476157)))*(tmp = -3136296453.209073, tmp))))>>>(((tmp = 908427836, tmp)|(tmp = 207737064, tmp))|(((1253036041)-(tmp = 2705074182, tmp))+(-431215157.82083917))))));
+  assertEquals(53477378, x &= ((((-1128036654.165636)*x)+x)>>(x>>(3080099059))));
+  assertEquals(0, x >>= (-590692293));
+  assertEquals(0, x %= (-2395850570.9700127));
+  assertEquals(0, x *= ((tmp = 1377485272, tmp)&(1129370608)));
+  assertEquals(0, x += (x>>>(x%(((((tmp = -1746827236, tmp)+((tmp = -326913490, tmp)&((-58256967)&x)))*(tmp = -1176487022.001651, tmp))>>>(-2089147643))-x))));
+  assertEquals(0, x <<= (tmp = 1073298160.2914447, tmp));
+  assertEquals(-837811832, x ^= (-837811832));
+  assertEquals(102760448, x <<= (tmp = 2833582450.4544373, tmp));
+  assertEquals(0, x &= (((((((tmp = 2595641175, tmp)*x)+(tmp = -2049260172.1025927, tmp))%((2986747823)>>(tmp = -2120598518, tmp)))&((tmp = -2742408622, tmp)&x))>>x)*((1043474247.9601482)&(tmp = 1686365779.9885998, tmp))));
+  assertEquals(0, x >>= ((tmp = 1717862848, tmp)-(tmp = 1077024446.4160957, tmp)));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x /= (-1669429787.975099));
+  assertEquals(NaN, x -= (-2299895633.4807186));
+  assertEquals(138173970, x ^= (138173970.56627905));
+  assertEquals(-2084183776, x <<= (3073345316));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>= (-3080556066.068573));
+  assertEquals(0, x &= ((tmp = -2587514820, tmp)*(x-((x^(1995672257))*(1125326747.2339358)))));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x >>= (tmp = 2139186585, tmp));
+  assertEquals(-1904096640, x |= ((-602301360.1919911)*(-1270444810)));
+  assertEquals(1073741824, x <<= (tmp = -1069467849, tmp));
+  assertEquals(1073741824, x ^= (x-x));
+  assertEquals(536870912, x >>>= (-1579466367.160293));
+  assertEquals(512, x >>= (972402804.3890183));
+  assertEquals(512, x &= (tmp = 2664796831, tmp));
+  assertEquals(16777216, x <<= (-2738292561));
+  assertEquals(0, x >>>= ((((1397663615.3889246)|(1117420260.6730964))-(-1173734560))<<((tmp = 1007006104.0172879, tmp)<<((tmp = -623002097, tmp)%(tmp = -35829654.379403114, tmp)))));
+  assertEquals(1200191544, x ^= (tmp = -3094775752, tmp));
+  assertEquals(71, x >>>= x);
+  assertEquals(71, x |= x);
+  assertEquals(1394763772, x += (1394763701));
+  assertEquals(-1.492717171027427, x /= ((x&(tmp = 1243787435, tmp))-(2043911970.26752)));
+  assertEquals(-1.1002448961224718e-8, x /= ((((835185744)*(((tmp = 2165818437, tmp)^(tmp = 2567417009.1166553, tmp))/x))/x)/(((63485842.39971793)^(2668248282.597389))/x)));
+  assertEquals(0, x <<= (tmp = 1598238578.637568, tmp));
+  assertEquals(0, x |= (x&((tmp = -1812945547.5373957, tmp)>>>x)));
+  assertEquals(0, x >>>= (x+(-1969679729.7299538)));
+  assertEquals(1582033662, x += (tmp = 1582033662, tmp));
+  assertEquals(1, x >>>= x);
+  assertEquals(-550748739, x += ((tmp = -550748740, tmp)/(x&((2537822642.235506)^((-2167656297)%(tmp = 1161201210, tmp))))));
+  assertEquals(-268921, x >>= (tmp = 1916069547.7381654, tmp));
+  assertEquals(-0.00021776939364231114, x /= (tmp = 1234888868, tmp));
+  assertEquals(0, x <<= (-1036375023));
+  assertEquals(0, x &= ((((x/(2398886792.27443))&(x|((-1813057854.1797302)-x)))&(x/(((tmp = 3091133731.4967556, tmp)|(3013139691.823039))<<x)))>>>(2542784636.963599)));
+  assertEquals(0, x += ((x*x)/(tmp = 347079383, tmp)));
+  assertEquals(788347904, x |= ((1462257124.6374629)*((3180592147.4065146)-(x&(1922244678)))));
+  assertEquals(2130672735, x |= (tmp = -2846986145, tmp));
+  assertEquals(-1331327970, x ^= ((656251304)-(tmp = 1489152359, tmp)));
+  assertEquals(-0.14377179742889856, x %= (((2889747597.813753)-(1730428996))/(((tmp = -1378710998, tmp)&x)|x)));
+  assertEquals(-1754612583.143772, x += ((-1754725729)^((-2285838408)>>>(1434074349))));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x &= (tmp = -1031961332, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x /= (3059476325));
+  assertEquals(NaN, x *= ((x*((((tmp = 13529540.462185979, tmp)&x)^((x<<(-1312696238.1628869))&(-2029766712.3852897)))>>x))/x));
+  assertEquals(1657339940, x ^= ((tmp = -488956817.1491232, tmp)&(tmp = -2352413900.1983714, tmp)));
+  assertEquals(-530683621952432200, x *= (tmp = -320202035.2882054, tmp));
+  assertEquals(229226258, x ^= ((tmp = -1263410990.026416, tmp)+(((-808046349)&(tmp = -1294442506, tmp))&((tmp = 1147437219, tmp)<<((tmp = -820299900, tmp)-(tmp = -1947748943.3443851, tmp))))));
+  assertEquals(7163320, x >>= (-2631307131));
+  assertEquals(-68, x |= (((-1271721343)>>x)%x));
+  assertEquals(-39956523818.38862, x *= (587595938.505715));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>>= ((x^(x+x))<<(tmp = 265212367, tmp)));
+  assertEquals(0, x |= (((x>>((tmp = 2294761023, tmp)/(x>>(2125624288))))&((-2125650113)|(tmp = 1014409884, tmp)))%(tmp = -527324757, tmp)));
+  assertEquals(0, x >>= ((tmp = 2267075595, tmp)*(-1681569641.8304193)));
+  assertEquals(0, x >>>= x);
+  assertEquals(0.5738410949707031, x -= ((tmp = -1846572645.573841, tmp)%((((((x^(((-156613905.64173532)/x)<<x))+((x|((2405109060)>>>x))^x))/(570585894.8542807))+(x&(-2544708558)))^((((tmp = -2539082152.490635, tmp)+((((-657138283)/(2204743293))-((tmp = -1422552246.565012, tmp)+x))<<(x-x)))>>(x/(x>>>(tmp = -3027022305.484394, tmp))))<<x))&((-2066650303.3258202)/(tmp = -1666842593.0050385, tmp)))));
+  assertEquals(0, x >>>= ((((tmp = 2473451837.613817, tmp)>>((2526373359.1434193)>>(x<<x)))+((tmp = -579162065, tmp)+((tmp = -3115798169.551487, tmp)-(tmp = 933004398.9618305, tmp))))&(tmp = 131167062, tmp)));
+  assertEquals(-2067675316, x ^= (-2067675316.6300585));
+  assertEquals(543772, x >>>= x);
+  assertEquals(-1073741824, x <<= x);
+  assertEquals(3221225472, x >>>= ((x*(1478586441.081221))&(tmp = -3050416829.2279186, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x *= x);
+  assertEquals(-1017771903.0298333, x -= (1017771903.0298333));
+  assertEquals(0.6404112721149928, x /= ((tmp = -144667370, tmp)^(-2849599562)));
+  assertEquals(-2410517638773644000, x -= (((tmp = 1759631550, tmp)*x)*((((tmp = -2949481475, tmp)>>>x)*x)|(tmp = -2977983804, tmp))));
+  assertEquals(-0, x %= (x+((((tmp = -1307866327.7569134, tmp)<<((x&((tmp = -2380043169.8405933, tmp)|x))>>(472992789.7639668)))|(((((x<<(tmp = -1416427232.7298179, tmp))%(-1404989679.409946))*((x/(tmp = -992416608, tmp))/(tmp = 524646495, tmp)))-(tmp = 734405570, tmp))>>x))/(1079256317.7325506))));
+  assertEquals(0, x <<= (tmp = 2459834668, tmp));
+  assertEquals(-0, x /= (tmp = -1892164840.5719755, tmp));
+  assertEquals(0, x >>= (x|(((1299844244)>>>(((tmp = -2422924469.9824634, tmp)|x)-((((1914590293.2194016)+(-3033885853.8243046))-((tmp = -1720088308, tmp)%x))<<(tmp = 2210817619, tmp))))<<x)));
+  assertEquals(0, x <<= (((tmp = 3192483902.841396, tmp)>>>(((x^(2944537154))|(tmp = -1334426566, tmp))*(((((((-2705218389)&x)+(1987320749))+(tmp = -111851605, tmp))|(2894234323))-(265580345))&x)))%(((tmp = 1431928204.6987057, tmp)&(tmp = 914901046, tmp))&(x>>>x))));
+  assertEquals(0, x >>>= (tmp = 1941940941, tmp));
+  assertEquals(0, x %= (3089014384));
+  assertEquals(0, x += ((tmp = 2948646615, tmp)*x));
+  assertEquals(-0, x /= (tmp = -1480146895, tmp));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x %= (-2995257724.158043));
+  assertEquals(NaN, x %= (tmp = 2714835455, tmp));
+  assertEquals(NaN, x /= (tmp = -311440765.98078775, tmp));
+  assertEquals(NaN, x -= (-1600234513.697098));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x <<= (-1499045929));
+  assertEquals(-0, x *= (-2491783113));
+  assertEquals(0, x ^= (x%((x>>(((1234398704.3681123)>>>x)%(x+x)))>>(402257223.4673699))));
+  assertEquals(-643225204, x ^= (((-55960194.698637486)+((((721411198)-(((tmp = 1308676208.7953796, tmp)%(2242904895))-x))>>((((tmp = 332791012, tmp)&((tmp = -2094787948, tmp)/((x/(2427791092))^(2444944499.6414557))))%(((x+(1253986263.5049214))+(((((3135584075.248715)+((tmp = -2569819028.5414333, tmp)%(440908176.1619092)))>>>(x<<((3061615025)-x)))%x)%(x+((2369612016)*((((tmp = 1173615806, tmp)*(-1910894327))&(2428053015.077821))*(-55668334.70082307))))))<<(tmp = -2129259989.0307562, tmp)))+(1579400360)))%((-3053590451.8996153)>>x)))+(x>>(x%(x^((-1772493876)^x))))));
+  assertEquals(413738663060841600, x *= x);
+  assertEquals(1581062538.4501781, x %= ((tmp = -1298397672.0300272, tmp)-((2237197923)+(tmp = -1385478459, tmp))));
+  assertEquals(755644566.8709538, x %= (tmp = -825417971.5792243, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>>= ((89330582)%(-1012731642.4855506)));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x %= ((x>>>((x/(tmp = -1848848941.2352903, tmp))>>>(tmp = -71862893, tmp)))&(-2385996598.2015553)));
+  assertEquals(NaN, x += (-2292484503.318904));
+  assertEquals(NaN, x *= (2961064461));
+  assertEquals(NaN, x += (x<<((2076798243.6442)/((tmp = -81541044.75366282, tmp)^((3041366498.551101)+((2126874365)/(tmp = -177610359, tmp)))))));
+  assertEquals(NaN, x %= ((x/((x/x)+x))>>>x));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x += (1171761980.678));
+  assertEquals(NaN, x += ((2355675823)<<(-390497521)));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= (tmp = -658428225.56619, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= (1643310725.5713737));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x <<= (-397005335.3712895));
+  assertEquals(0, x >>>= (tmp = -2804713458.166788, tmp));
+  assertEquals(0, x <<= (((((((tmp = 1879988501, tmp)%(1528081313.9360204))+(1376936736))*((((x>>>((1736268617.339198)>>>(-2598735297.4277673)))<<((((((((-2742982036)/(231867353.4549594))-(875335564))<<x)|((2241386341.742653)<<((-22024910.828409433)&(x<<x))))*(-756987803.5693252))+x)^(tmp = 1084498737, tmp)))<<(1920373881.8464394))&(2370827451.82652)))&(x^(tmp = -891503574, tmp)))<<x)>>>((-1519588625.2332087)^(483024636.2600144))));
+  assertEquals(52193878.40997505, x -= ((tmp = -341753803.40997505, tmp)%(tmp = -96519975, tmp)));
+  assertEquals(-1665844168.938803, x -= (1718038047.348778));
+  assertEquals(3.6962232549405003e-19, x /= (((((-809583468.5507183)>>>((tmp = 286797763, tmp)%((1579183142.7321532)/(1853824036.001172))))<<x)>>(((x|x)^((tmp = -2641304815, tmp)<<(x<<x)))>>(((((268338128.8300134)&(-1778318362.8509881))*(751081373.346478))<<(((525066612)>>(-1139761212))*(2949167563.299916)))<<x)))+((tmp = 664905121, tmp)*((-2208280205)*(3069462420)))));
+  assertEquals(4710721795.110161, x += (((217604832)+((1307891481.781326)-x))+(tmp = 3185225481.328835, tmp)));
+  assertEquals(0, x %= x);
+  assertEquals(0, x -= (((x>>>(x/(tmp = 46977522.46204984, tmp)))>>(-2466993199.615269))&(tmp = 14524430.287991166, tmp)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x /= (tmp = 578120637, tmp));
+  assertEquals(-17267104, x -= (((tmp = 1515285919.495792, tmp)+(((tmp = -1364790286.7057304, tmp)+((954599071)>>((897770243.1509961)*x)))^x))>>>(566027942.1732262)));
+  assertEquals(-17267104, x &= x);
+  assertEquals(189138241, x ^= ((tmp = 1565742675.9503145, tmp)-((tmp = 1737806643, tmp)|((x*(tmp = -1382435297.5955122, tmp))*(-2820516692.153056)))));
+  assertEquals(189138241, x %= (x*(tmp = -1670678493, tmp)));
+  assertEquals(1693, x %= ((-2328713314)>>>(1623637325)));
+  assertEquals(1693, x %= ((-1019394014)*(x|x)));
+  assertEquals(3386, x += x);
+  assertEquals(9268970871604, x *= (2737439714));
+  assertEquals(-4720.120483643183, x /= (tmp = -1963714889, tmp));
+  assertEquals(-1, x >>= ((x^(((-2404688047.455056)|((1439590234.6203847)<<(tmp = -2496557617, tmp)))/((x<<((tmp = 1865549512.282249, tmp)/(((360384191.55661833)>>(tmp = -1225297117.344188, tmp))>>>(2703264010.4122753))))*(1521960888.0071676))))%(tmp = 2834001448.0508294, tmp)));
+  assertEquals(63, x >>>= (x&(-3079339174.6490154)));
+  assertEquals(0, x >>>= (1039770956.6196513));
+  assertEquals(0, x >>>= (-1074820214));
+  assertEquals(0, x >>>= (x/x));
+  assertEquals(0, x >>= ((tmp = -449117604.2811785, tmp)&x));
+  assertEquals(-0, x /= (tmp = -118266935.1241343, tmp));
+  assertEquals(2226140134, x += (tmp = 2226140134, tmp));
+  assertEquals(2068827161, x ^= ((tmp = -1950744808.846384, tmp)>>((2258661151)^((tmp = -1118176421.8650177, tmp)<<(2828634014)))));
+  assertEquals(123, x >>>= (-1779624840.0515127));
+  assertEquals(0, x >>>= (x|((tmp = -239082904, tmp)<<(tmp = 1404827607, tmp))));
+  assertEquals(0, x >>>= x);
+  assertEquals(1793109749, x ^= (tmp = -2501857547.710491, tmp));
+  assertEquals(855, x >>>= x);
+  assertEquals(0, x >>>= (-847289833));
+  assertEquals(0, x %= (-2271241045));
+  assertEquals(169648072, x ^= (((tmp = 169648072.66759944, tmp)^x)|x));
+  assertEquals(176025927479164930, x *= ((tmp = 1111997198.8803885, tmp)<<(tmp = 2913623691, tmp)));
+  assertEquals(176025926613281700, x += ((tmp = -865883245, tmp)<<(x+(-2624661650))));
+  assertEquals(3406506912, x >>>= ((x|(tmp = 2436016535, tmp))*(((tmp = -1222337225, tmp)<<((1765930268)&x))*(tmp = 1600702938, tmp))));
+  assertEquals(1.694694170868292, x %= (x/(-1597121830.794548)));
+  assertEquals(0, x >>= (tmp = -2443203089, tmp));
+  assertEquals(0, x >>>= (1323174858.2229874));
+  assertEquals(0, x &= ((tmp = 846556929.2764134, tmp)|(((1483000635.0020065)|(-3151225553))|(tmp = -229028309, tmp))));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>= ((((((-2677334787)>>>x)>>((tmp = 496077992, tmp)&((((x<<(x*(tmp = 1095163344.2352686, tmp)))+(-952017952))%((x<<((x*x)/(tmp = 2983152477, tmp)))^((tmp = -939521852.1514642, tmp)^(tmp = 143967625.83755958, tmp))))*((tmp = 551827709.8366535, tmp)>>>x))))^((-1552681253.69869)-(-1874069995)))>>>(x>>(x%(tmp = -2554673215, tmp))))|(tmp = -190693051.77664518, tmp)));
+  assertEquals(0, x /= (tmp = 427402761.37668264, tmp));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x |= (x>>>(((((-543326164.0673618)>>>(-2344090136.707964))>>>((((-563350246.6026886)/x)/(1525481037.3332934))&(tmp = -2917983401.88958, tmp)))^(-1094667845.1208413))^x)));
+  assertEquals(0, x &= (1080322749.897747));
+  assertEquals(0, x %= (tmp = -1572157280, tmp));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x %= ((377280936)|x));
+  assertEquals(708335912, x -= (tmp = -708335912, tmp));
+  assertEquals(2766937, x >>>= x);
+  assertEquals(547342779, x += (tmp = 544575842, tmp));
+  assertEquals(546273751, x -= ((x>>>(472833385.9560914))|((tmp = -1164832103.9970903, tmp)/(3147856452.1699758))));
+  assertEquals(546273751, x &= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>>= (tmp = -3181805175, tmp));
+  assertEquals(-375546685, x |= (-375546685.08261824));
+  assertEquals(1089992785780217200, x *= (tmp = -2902416209, tmp));
+  assertEquals(0, x %= x);
+  assertEquals(-1854981526, x -= ((x-x)-(-1854981526)));
+  assertEquals(-3709963052, x += x);
+  assertEquals(-316772482, x %= (tmp = -1696595285, tmp));
+  assertEquals(-316772482, x |= x);
+  assertEquals(1, x /= x);
+  assertEquals(0, x -= x);
+  assertEquals(-1418375842, x ^= (-1418375842));
+  assertEquals(-2, x >>= x);
+  assertEquals(-4, x += x);
+  assertEquals(-8388608, x &= (x<<(-350555339.30086184)));
+  assertEquals(-16777216, x += x);
+  assertEquals(-0, x %= x);
+  assertEquals(1083355129, x += (tmp = 1083355129, tmp));
+  assertEquals(0, x &= (((tmp = 389729053, tmp)-(tmp = 2944192190.0939536, tmp))/(x-(2081712461.2657034))));
+  assertEquals(0, x += x);
+  assertEquals(-3, x += ((3147270119.5831738)>>((2455837253.1801558)%((-2100649096)>>(((290236808.01408327)|(x&((2661741230.3235292)|((tmp = 1686874589.4690177, tmp)<<x))))*(x+(tmp = 2327674670, tmp)))))));
+  assertEquals(-3, x %= ((x>>(((-2962686431)%x)>>((((2438370783)-(tmp = 2667305770.4839745, tmp))>>>x)>>>x)))<<((x&(tmp = 1428498616, tmp))|((tmp = 2621728539.102742, tmp)/(-204559901)))));
+  assertEquals(2, x ^= (x|((((tmp = 1751230118.6865973, tmp)/(-867465831.207304))>>((-808143600.0912395)+(-2882191493.0506454)))^x)));
+  assertEquals(2, x %= (-2015954220.2250996));
+  assertEquals(0, x >>>= (tmp = 401373999, tmp));
+  assertEquals(0, x >>= (2371830723));
+  assertEquals(0, x >>>= ((((tmp = 2765919396, tmp)-x)-(530310269.7131671))|(tmp = -615761207.9006102, tmp)));
+  assertEquals(-145389011, x ^= (tmp = -145389011, tmp));
+  assertEquals(-145389011, x |= x);
+  assertEquals(1632929832, x &= (-2518898392));
+  assertEquals(4190540017.751949, x += (tmp = 2557610185.751949, tmp));
+  assertEquals(4980024282.153588, x += ((1841304364.1177452)%(tmp = 1051820099.7161053, tmp)));
+  assertEquals(0, x >>>= (((((1379314342.4233718)>>((-2782805860)^((x%(tmp = 1328845288, tmp))>>>(tmp = 901403219.858733, tmp))))+(x/((tmp = -3078904299, tmp)/x)))/x)|(x|(1399702815))));
+  assertEquals(-1820494882, x ^= (tmp = -1820494882.407127, tmp));
+  assertEquals(-305870376, x %= (tmp = -757312253, tmp));
+  assertEquals(-577530443, x += (x|(tmp = -1958083619.6653333, tmp)));
+  assertEquals(333541412591776260, x *= x);
+  assertEquals(-949341696, x >>= ((((1550069663)<<((x>>>(tmp = 2406565178.902887, tmp))>>>((1844746612.632984)/((tmp = 2233757197, tmp)*((-1524891464.1028347)>>(tmp = 2498623474.5616803, tmp))))))&x)<<(x&(tmp = -370379833.3884752, tmp))));
+  assertEquals(-277202090, x |= ((-762200848.8405354)-(tmp = 1749136282, tmp)));
+  assertEquals(0.13704539927239265, x /= (tmp = -2022702633.373563, tmp));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= ((132951580.19304836)-((427623236.27544415)-(1212242858))));
+  assertEquals(0, x &= ((449148576)&(-1609588210.249217)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x -= x);
+  assertEquals(-0, x /= (tmp = -1640777090.9694843, tmp));
+  assertEquals(0, x &= (((tmp = -1923412153, tmp)>>>((x>>(tmp = 3027958119.0651507, tmp))+(60243350)))>>(tmp = -2610106062, tmp)));
+  assertEquals(0, x ^= (((-186998676)/(tmp = 2697937056, tmp))-x));
+  assertEquals(-1147950080, x |= ((2425449461)*(tmp = -2525854833, tmp)));
+  assertEquals(457688198, x ^= (2698274950.660941));
+  assertEquals(8724, x %= ((1174351031)>>>((371599047.36048746)+(3025292010))));
+  assertEquals(0, x <<= (tmp = -710011617, tmp));
+  assertEquals(0, x >>>= (1693410026));
+  assertEquals(1443005362, x ^= ((tmp = -2851961934, tmp)+((((x%x)-(tmp = 547622400, tmp))<<(((tmp = 722396486.5553623, tmp)|x)>>>((((tmp = -542268973.5080287, tmp)<<(tmp = 1347854903.771954, tmp))>>>(tmp = -889664427.7115686, tmp))&((tmp = 1549560114, tmp)*(tmp = 964918035, tmp)))))&(-2422502602.920377))));
+  assertEquals(3986573462, x -= (-2543568100));
+  assertEquals(7973146924, x += x);
+  assertEquals(-1, x >>= (-75987297));
+  assertEquals(-12, x += ((2940824338.64834)>>(tmp = 3061467355, tmp)));
+  assertEquals(-3.8229398525977614e-8, x /= (313894554));
+  assertEquals(-2.890709270374084e-17, x /= (tmp = 1322491989, tmp));
+  assertEquals(0, x |= (x-x));
+  assertEquals(0, x >>>= (tmp = -1205300664, tmp));
+  assertEquals(-0, x /= (((2869505187.6914144)>>(tmp = 1541407065, tmp))/(((-571132581)>>>(x>>x))/((x^(170373762.8793683))>>>((((tmp = -363073421.05897164, tmp)|(((tmp = -1591421637, tmp)>>(1095719702.8838692))&(636687681.9145031)))^x)^(x|x))))));
+  assertEquals(-1487828433, x ^= (-1487828433.3462324));
+  assertEquals(-0, x %= x);
+  assertEquals(1716342498, x -= ((tmp = 2578624798, tmp)^x));
+  assertEquals(1636, x >>= ((264194540)>>>(-801900756)));
+  assertEquals(0, x >>>= ((tmp = 2502688876, tmp)+((x<<(x|((-628272226.0338528)|((x<<(-2083074091))>>>(tmp = 1692123246.8418589, tmp)))))>>(1594759826.990993))));
+  assertEquals(0, x <<= (tmp = -904399643, tmp));
+  assertEquals(NaN, x /= ((x^(x-x))%((tmp = 1744962024.4882128, tmp)%x)));
+  assertEquals(NaN, x /= (-1013142883.1845908));
+  assertEquals(NaN, x /= ((tmp = 793633198, tmp)^(-2993598490.8659954)));
+  assertEquals(0, x &= (x>>((tmp = 1200937851, tmp)<<(((tmp = -2807378465, tmp)&(tmp = -143778237, tmp))|(tmp = -1200772223, tmp)))));
+  assertEquals(0, x <<= x);
+  assertEquals(88144, x |= (((((tmp = 3002723937.8560686, tmp)*(tmp = -3171720774.2612267, tmp))%(((tmp = -2586705978.7271833, tmp)%((x+(-1553704278))&(2405085526.501994)))>>((-240842053)>>>(((((tmp = -1886367228.4794896, tmp)>>>x)^(tmp = 2604098316, tmp))^(tmp = 1362808529, tmp))<<((tmp = -1062263918, tmp)|((-172718753)%(tmp = -1910172365.4882073, tmp)))))))^((1444153362)>>((x&((-1205465523.2604182)^(tmp = -2062463383, tmp)))>>(tmp = 956712476, tmp))))>>((((-1004215312)^((((-1707378612.5424936)^(tmp = 2372161553, tmp))/((tmp = 1802586581, tmp)*((2082257.1896460056)&((tmp = -1270773477, tmp)^(tmp = 942517360.3447798, tmp)))))+x))%((((666494127)^(x^x))>>>(tmp = -2592829775, tmp))+((-1601528223)+((x+(tmp = -2417034771.7409983, tmp))>>>((tmp = -730673817, tmp)*x)))))>>x)));
+  assertEquals(-2603179111.7557006, x -= ((2603267255.755627)+(x/(1200979191.2823262))));
+  assertEquals(1691788185, x >>= (tmp = 3088840032, tmp));
+  assertEquals(-168382533, x |= (tmp = -780750941.4590135, tmp));
+  assertEquals(-168382533, x >>= (60741120.48285198));
+  assertEquals(-134287365, x |= (x*(tmp = 834637940.7151251, tmp)));
+  assertEquals(-1481917089, x -= (tmp = 1347629724, tmp));
+  assertEquals(1, x >>>= x);
+  assertEquals(262144, x <<= (2680216914));
+  assertEquals(1075132032, x ^= (x-((tmp = 3220359552.3398685, tmp)^(((-434474746.6039338)|((((((((tmp = 1945689314.9683735, tmp)>>(1300022273))>>>(333705550))&x)%(588357521))-(x+(x^(((tmp = -134560382, tmp)+x)-((((994246147.7195556)-(-1506599689.7383268))%(x<<x))>>((1256426985.5269494)+(tmp = 1860295952.8232574, tmp)))))))^(((tmp = 917333220.2226384, tmp)>>x)>>>(tmp = 865898066, tmp)))%((x|(x%((tmp = -2660580370, tmp)&(tmp = 2966426022, tmp))))*x)))/(((tmp = 682585452, tmp)&(-3219368609))+((tmp = -1330253964, tmp)+((x&(2857161427))/x)))))));
+  assertEquals(274944, x &= ((2606953028.1319966)-(-1707165702)));
+  assertEquals(266752, x &= ((x<<((x+(x+(x^(-1570175484))))^x))^(x+(x<<(tmp = 90330700.84649956, tmp)))));
+  assertEquals(266752, x &= ((((x*(tmp = 2033225408, tmp))-(x-((tmp = 1507658653, tmp)/(-3016036094))))>>>((1497480588)>>(2784070758)))|(tmp = -3025904401.93921, tmp)));
+  assertEquals(-1680442631, x |= ((x/(445284843))|((tmp = 2614520057.2723284, tmp)<<x)));
+  assertEquals(40851947, x >>>= (tmp = -1577031386.938616, tmp));
+  assertEquals(2493, x >>= ((3044630989.3662357)-(-2670572992.8580284)));
+  assertEquals(-0.0000017317105653562252, x /= (-1439617017.9207587));
+  assertEquals(0, x &= (2359806567));
+  assertEquals(623768541, x ^= (623768541));
+  assertEquals(1028567149.0716183, x += (((tmp = 1307794561, tmp)%(x>>x))-(-404798608.0716183)));
+  assertEquals(-1.2971762489811298, x /= (tmp = -792927830.6471529, tmp));
+  assertEquals(-1.2971762489811298, x %= ((-2426421701.2490773)/(-689566815.3393874)));
+  assertEquals(-2147483648, x <<= x);
+  assertEquals(-2147483648, x &= (tmp = -869991477, tmp));
+  assertEquals(-268435456, x >>= (1383186659));
+  assertEquals(0, x -= x);
+  assertEquals(-2009742037, x |= (-2009742037.5389993));
+  assertEquals(-1386630820, x ^= (627864695));
+  assertEquals(-1033479103975173600, x *= (tmp = 745316697.9046186, tmp));
+  assertEquals(-1628048487, x |= (2662654361));
+  assertEquals(325551, x >>>= (340874477));
+  assertEquals(-1235730537, x ^= (tmp = 3059533880.0725217, tmp));
+  assertEquals(-1235730537, x %= (2247137328));
+  assertEquals(-220200960, x <<= ((x>>x)-x));
+  assertEquals(0, x <<= ((tmp = 337220439.90653336, tmp)|(tmp = 2901619168.375105, tmp)));
+  assertEquals(0, x >>>= ((-2114406183)/x));
+  assertEquals(0, x %= ((1425828626.3896675)/x));
+  assertEquals(0, x >>>= ((3213757494)>>>(2595550834.3436537)));
+  assertEquals(0, x <<= x);
+  assertEquals(-0, x /= ((1544519069.5634403)/((tmp = -1332146306, tmp)&(-762835430.0022461))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= (x|((((x*((-786272700)+x))<<x)+((tmp = -1868484904, tmp)-(tmp = -1692200376, tmp)))+(-1010450257.6674457))));
+  assertEquals(0, x -= x);
+  assertEquals(0, x ^= (x>>>(706010741)));
+  assertEquals(-964928697, x |= (-964928697));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= ((((tmp = 1778003555.3780043, tmp)>>(x%((tmp = -766158535, tmp)^((-2681449292.8257303)%((x-(x|(tmp = 1966478387.2443752, tmp)))^(((tmp = -1848398085, tmp)&x)>>>(tmp = -2860470842, tmp)))))))%(tmp = 2315077030, tmp))^x));
+  assertEquals(0, x ^= x);
+  assertEquals(-288007757, x ^= ((tmp = 183607156.1803962, tmp)-(tmp = 471614914, tmp)));
+  assertEquals(-270573581, x |= (tmp = -849475741.9424644, tmp));
+  assertEquals(-2129929, x |= (((((1942852445)&(tmp = 1280372312, tmp))*(x*(tmp = -1601900291, tmp)))^((509080002.81080174)-(tmp = 2699498226.9164257, tmp)))>>(((-335361221)>>(tmp = 843134832, tmp))%(-35532542))));
+  assertEquals(-232622355, x ^= ((-3060885134.5375547)-(((tmp = 1965966723, tmp)-((tmp = 1248630129.6970558, tmp)<<(tmp = 1859637857.5027392, tmp)))*x)));
+  assertEquals(-52149658093200070, x *= (224181627.31264615));
+  assertEquals(-697122968, x ^= (x-(x+(tmp = 2747211186.407712, tmp))));
+  assertEquals(-2146269688, x &= ((tmp = -1466710519, tmp)^(x/(1419998975))));
+  assertEquals(-536567422, x >>= (((((tmp = -1760701688.999274, tmp)>>(-1821976334))/(((tmp = -1660849531, tmp)>>>x)-((x+((tmp = -2489545009.4327965, tmp)>>>((tmp = -267360771.39148235, tmp)^x)))*(((-1453528661)%x)>>>(((243967010.3118453)/((((((2977476024)>>>((-1630798246)<<x))&(591563895.2506002))*(((2668543723.9720144)>>>x)|(1600638279)))^x)>>(x<<(tmp = -152589389, tmp))))>>>(x|(2821305924.9225664)))))))+(618968002.8307843))%(tmp = -1005408074.368274, tmp)));
+  assertEquals(40962, x &= (114403906));
+  assertEquals(19741977727890, x *= ((-2367133915.963945)>>>(-3119344126)));
+  assertEquals(1313341440, x <<= x);
+  assertEquals(626, x >>>= ((((-333992843)%(tmp = -2742280618.6046286, tmp))>>>x)|x));
+  assertEquals(0, x <<= (2598188575));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x ^= (x%((2507288229.3233204)&(tmp = -1714553169.9276752, tmp))));
+  assertEquals(0, x /= ((633436914.3859445)>>>(tmp = 1579804050.6442273, tmp)));
+  assertEquals(0, x *= ((tmp = 1172218326, tmp)<<((tmp = -2491306095.8456626, tmp)*(((tmp = 1305371897.9753594, tmp)>>((x^(((3077992060)*x)<<(492815553.904796)))>>((652151523)|x)))%x))));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x %= (1118131711));
+  assertEquals(0, x &= ((tmp = 2734673884, tmp)|(x-((tmp = 2694578672.8975897, tmp)*(((x>>(2350811280.974167))*(1052548515))&(x^(x*(tmp = -1336287059.0982835, tmp))))))));
+  assertEquals(-2632782867.1256156, x += ((tmp = -2743992725.1256156, tmp)+(tmp = 111209858, tmp)));
+  assertEquals(-0, x %= x);
+  assertEquals(0, x >>>= (((tmp = -2050519887, tmp)^(106865302.74529803))>>(1642851915.2909596)));
+  assertEquals(-171964826, x |= (tmp = -171964826.6087358, tmp));
+  assertEquals(-2.113405951193522, x /= (tmp = 81368572.80206144, tmp));
+  assertEquals(3, x >>>= x);
+  assertEquals(0, x %= x);
+  assertEquals(-1717345907.837667, x += (-1717345907.837667));
+  assertEquals(-100964883, x |= (tmp = -109574931.80629134, tmp));
+  assertEquals(-33849857, x |= (-974111718.2433801));
+  assertEquals(1, x >>>= (tmp = -2556222849.005595, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>>= (-1796630999.4739401));
+  assertEquals(0, x >>>= x);
+  assertEquals(2031695758, x += (((x/(((tmp = -2364918403, tmp)%(x^((tmp = 277767803.6375599, tmp)>>((((tmp = 540036080, tmp)/(x|(2665298931)))/(x|((x>>(-2035456216.6165116))<<(2143184420.5651584))))^x))))&(tmp = 927798419.8784283, tmp)))-(-2031695758))>>>x));
+  assertEquals(2031695758, x |= x);
+  assertEquals(2031695758, x <<= (((x>>(x%x))|(tmp = -1164531232.7384055, tmp))*x));
+  assertEquals(124004, x >>>= x);
+  assertEquals(529846352, x += ((529722348)%((2417645298.865121)|(x>>(x>>>(x+x))))));
+  assertEquals(60067920, x &= (((tmp = -3166008541.8486233, tmp)-x)|(x%x)));
+  assertEquals(1415594240755200, x *= ((-2786707452.873729)>>(((tmp = -2369315809, tmp)*((1559868465)|(1011218835.1735028)))>>>x)));
+  assertEquals(1415595182259140, x += (941503939.9023957));
+  assertEquals(0, x <<= ((tmp = 2887184784.265529, tmp)/(-2575891671.0881453)));
+  assertEquals(0, x &= ((tmp = -1546339583, tmp)>>>(tmp = -587433830, tmp)));
+  assertEquals(0, x *= (((tmp = 1356991166.5990682, tmp)%(tmp = -284401292, tmp))*(1869973719.9757812)));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x ^= (((tmp = 92575404.43720293, tmp)>>>(263475358.17717505))%x));
+  assertEquals(0, x <<= (((561514358)*(tmp = -439584969, tmp))%((((-3005411368.7172136)+x)|(-2230472917))&x)));
+  assertEquals(0, x >>= ((x>>>x)-((x-(1630649280.510933))+x)));
+  assertEquals(0, x >>= (tmp = -1772403084.7012017, tmp));
+  assertEquals(0, x *= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x &= x);
+  assertEquals(0, x >>= (tmp = 1622680387, tmp));
+  assertEquals(1033887633558225200, x -= ((-510616337)*(tmp = 2024783695, tmp)));
+  assertEquals(-2.8073538539158063e+27, x *= (tmp = -2715337492, tmp));
+  assertEquals(-2.8073538539158063e+27, x -= ((tmp = -1664804757, tmp)&((tmp = -226616419, tmp)>>>(1006711498))));
+  assertEquals(1894539615, x |= (tmp = -2400427681.1831083, tmp));
+  assertEquals(7400545, x >>= (774629608.4463601));
+  assertEquals(456756268, x += (449355723));
+  assertEquals(285771784, x &= (-1316427366));
+  assertEquals(17, x >>= ((tmp = -220509931.20787525, tmp)*(((tmp = 2518859292, tmp)+(-1477543005.1586645))>>(tmp = 3172820250.687789, tmp))));
+  assertEquals(85924262443, x *= (x*((tmp = -2856669745.965829, tmp)&(((tmp = 401420695, tmp)^(tmp = 2355371132, tmp))|(tmp = 590645330.021911, tmp)))));
+  assertEquals(1703875715, x ^= ((-2576394029.7843904)-x));
+  assertEquals(1703875715, x %= (tmp = 2234144310, tmp));
+  assertEquals(271405807, x ^= (1973569132));
+  assertEquals(1060178, x >>>= (tmp = -84823096, tmp));
+  assertEquals(8, x >>>= (tmp = 2246120561.905554, tmp));
+  assertEquals(-2846791089, x += (-2846791097));
+  assertEquals(104933962, x &= (x-(-2969030955.99584)));
+  assertEquals(489215611.96215343, x -= (-384281649.96215343));
+  assertEquals(489215611, x |= x);
+  assertEquals(1186191360, x <<= ((tmp = 774407142.993727, tmp)%x));
+  assertEquals(1186191360, x %= (1555004022));
+  assertEquals(-1697134080, x ^= (tmp = -597421568, tmp));
+  assertEquals(-1102053376, x <<= ((-927370769.4059179)^((tmp = 1093490918, tmp)>>(((-2522227493.3821955)%x)+(-2657319903)))));
+  assertEquals(1086450058, x ^= (-23991926.187098265));
+  assertEquals(1086450058, x |= x);
+  assertEquals(-1.6554590588410778, x /= (x|(x<<(x+x))));
+  assertEquals(67108863, x >>>= ((-926530233)+x));
+  assertEquals(494553310, x ^= (tmp = 512079649, tmp));
+  assertEquals(207751168, x &= (2892146720.6261826));
+  assertEquals(207751168, x &= x);
+  assertEquals(207751168, x |= x);
+  assertEquals(6340, x >>>= (((((x<<(x-((-2819638321)*((x<<x)+x))))>>x)+(tmp = 2016170261, tmp))+(tmp = 2755496043.772017, tmp))+(-841368625.1402085)));
+  assertEquals(6340, x ^= ((x/(tmp = -192734784, tmp))>>>(((-140306239)&x)-x)));
+  assertEquals(1, x /= x);
+  assertEquals(0, x >>= x);
+  assertEquals(26786600, x ^= (tmp = 26786600, tmp));
+  assertEquals(-0.014657576899542954, x /= ((-1454855938.0338)+(-372635753.3681567)));
+  assertEquals(0, x &= ((tmp = 2480635933, tmp)&(-2986584704.9165974)));
+  assertEquals(-2108639122, x += ((tmp = 2108639123.8683565, tmp)^((-881296055)/(((x<<(2026200582))|(tmp = -862495245.138771, tmp))-(-1111596494.892467)))));
+  assertEquals(1893466112, x <<= (tmp = 607974481, tmp));
+  assertEquals(1893466112, x |= x);
+  assertEquals(1133122783.997418, x += ((tmp = -760343332, tmp)-((x-(tmp = -878561823.4218843, tmp))/(tmp = -693454632.596637, tmp))));
+  assertEquals(8, x >>>= (tmp = 700339003.3919828, tmp));
+  assertEquals(4.605305035175536e-9, x /= (1737127060.8343256));
+  assertEquals(4.605305035175536e-9, x -= ((x%(897221779))>>>x));
+  assertEquals(-1864423625.5704088, x += (tmp = -1864423625.5704088, tmp));
+  assertEquals(1132240092, x <<= (1304417186.1193643));
+  assertEquals(-2088985380, x ^= (x<<x));
+  assertEquals(-4, x >>= ((tmp = 1959823884.0935726, tmp)%(-1679792398.569136)));
+  assertEquals(-268435456, x <<= ((tmp = 2586838136, tmp)|((tmp = -481716750.718518, tmp)>>>((1485826674.882607)/(tmp = -2826294011, tmp)))));
+  assertEquals(-32768, x >>= (2060648973));
+  assertEquals(1, x /= x);
+  assertEquals(-2838976297, x -= (tmp = 2838976298, tmp));
+  assertEquals(-1382985298, x <<= ((tmp = -2104305023, tmp)&x));
+  assertEquals(10, x >>>= (x+x));
+  assertEquals(10, x -= (x>>>(361588901.70779836)));
+  assertEquals(854603510, x -= (-854603500));
+  assertEquals(-557842432, x <<= (tmp = 1212985813.6094751, tmp));
+  assertEquals(-459390188241943040, x *= (tmp = 823512450.6304014, tmp));
+  assertEquals(-232800033621957060, x /= ((((((686635689)/(tmp = 2013252543, tmp))*(tmp = -1591617746.8678951, tmp))|(((tmp = -1777454093.5611362, tmp)>>>((tmp = 2680809394, tmp)^(((x>>((((((tmp = -265022244, tmp)%((tmp = -3075004537, tmp)>>(((((1427784269.5686688)^((tmp = -1095171528.911587, tmp)^(-942424985.7979553)))>>(-1279441481.1987405))*((2493620394)>>(-2769016043)))/(x&((tmp = 2059033657, tmp)%(((tmp = 1948606940.1488457, tmp)-(tmp = -2645984114.13219, tmp))^x))))))^x)^x)%(x%((((tmp = 3209433446.4551353, tmp)%(tmp = 1364430104.0424738, tmp))/(tmp = -2103044578.349498, tmp))+(tmp = -2613222750, tmp))))*(2099218034)))&(((tmp = -378500985.49700975, tmp)>>(((x+x)|(x%(((-1841907486)<<(-1220613546.194021))<<(tmp = -1260884176, tmp))))^(tmp = 1858784116, tmp)))>>>((x%x)%((x>>>(tmp = -2540799113.7667685, tmp))|x))))/((((tmp = 642072894.6455215, tmp)-(-324951103.6679399))*(tmp = 1424524615, tmp))+((x<<(tmp = -904578863.5945344, tmp))*(tmp = 49233475.435349464, tmp))))))<<(tmp = 1680210257, tmp)))+((tmp = -1516431503, tmp)>>>(-1105406695.3068116)))/(-275019361.6764543)));
+  assertEquals(192359387.42913792, x /= (-1210234846));
+  assertEquals(192359387.42913792, x %= (-2920206625.0154076));
+  assertEquals(192359387.42913803, x -= (((((((tmp = -1263203016.3258834, tmp)-(2432034005.6011124))&x)<<(1479434294))>>((tmp = -1695856315.523002, tmp)>>>(tmp = 557391345, tmp)))/(tmp = -1280240246.2501266, tmp))%((tmp = -2196489823.034029, tmp)>>(((x&((912221637.1101809)+((tmp = -3003677979.652423, tmp)>>(tmp = -716129460.1668484, tmp))))-((x+(x-(-2780610859)))>>>(-2445608016)))<<((x*(x+(x+(((-2124412727.9007604)%(tmp = -593539041.5539455, tmp))&(tmp = 2404054468.768749, tmp)))))%(x>>(tmp = -2913066344.404591, tmp)))))));
+  assertEquals(11740, x >>= (688848398.7228824));
+  assertEquals(11740, x >>= ((1545765912)*(307650529.9764147)));
+  assertEquals(23480, x += x);
+  assertEquals(0, x >>>= ((tmp = 1313078391, tmp)|x));
+  assertEquals(1726251264, x -= ((1939413887)<<(1004888744.2840619)));
+  assertEquals(765324793.5278986, x %= (960926470.4721014));
+  assertEquals(747387, x >>= ((2483010044)-(tmp = -413698190, tmp)));
+  assertEquals(1, x /= x);
+  assertEquals(3016811624, x *= (3016811624));
+  assertEquals(17408, x &= (((tmp = -991624868, tmp)<<(((63107932)/(tmp = 2659939199, tmp))|(tmp = -1968768911.3575773, tmp)))>>(((-2876822038.9910746)|(tmp = 2550230179.243425, tmp))<<((x*(x<<((x<<((tmp = -1627718523.616604, tmp)|((2154120561.254636)-(x%(x<<(1484563622.1791654))))))<<((((x^(tmp = 3016524169, tmp))<<(((x+(tmp = 1887816698.2455955, tmp))+x)-x))-(-3023329069))-x))))+x))));
+  assertEquals(0, x <<= (((1247441062.177967)/(-1717276234))+x));
+  assertEquals(0, x |= ((x%((-1648299429.4520087)>>(-137511052)))>>(tmp = 221301016.4926411, tmp)));
+  assertEquals(0, x /= ((-2598501544.913707)>>>(-2177037696)));
+  assertEquals(NaN, x %= (x>>x));
+  assertEquals(0, x &= (tmp = 1852419158, tmp));
+  assertEquals(-829029120, x |= (((2122339180)*((((((tmp = 768748914, tmp)<<((1008490427)&((1937367899.957056)-(((635094486)>>(((tmp = -795046025, tmp)*(2665104134.4455256))^(tmp = 706594584.2462804, tmp)))/(504397522)))))/(-556057788))>>((x/(tmp = -2732280594, tmp))-x))+(-1989667473))+(tmp = 2766802447.789895, tmp)))<<(((tmp = -2969169096, tmp)-x)+(tmp = 2093593159.0942125, tmp))));
+  assertEquals(0.6451933462602606, x /= ((-1284931292)<<(x<<(tmp = 1294716764, tmp))));
+  assertEquals(1515416866.520901, x *= (2348779440));
+  assertEquals(-1620606242886682600, x *= ((-993898625.5357854)&(((tmp = -571100481, tmp)/x)*((2428590177.311031)%(tmp = -2671379453, tmp)))));
+  assertEquals(-1137472828, x %= (tmp = -1195183004, tmp));
+  assertEquals(-3096634005473250000, x *= (tmp = 2722380640, tmp));
+  assertEquals(-3096634003996758500, x -= (-1476491033.833419));
+  assertEquals(-3096634000805538000, x += (3191220521.978341));
+  assertEquals(-3096634000805468000, x += ((((tmp = -3024976741, tmp)&(952616360))|((x*(-1547952311))+(x*x)))>>>(tmp = 981373323, tmp)));
+  assertEquals(-3096633998655594000, x += (2149873927));
+  assertEquals(-118812224101.54297, x %= (((2641881276.9898443)*(((502159480)^x)<<x))%((tmp = -2840045365.547772, tmp)*(((((-2297661528)>>>(x>>(-229103883.94961858)))&(((-1285047374.6746495)<<((-360045084)>>>((x-(tmp = -956123411.1260898, tmp))%x)))>>((tmp = -2375660287.5213504, tmp)+((((tmp = -2753478891, tmp)>>>(((tmp = 101438098, tmp)>>(((tmp = -2736502951, tmp)<<((tmp = -3084561882.368902, tmp)&(tmp = 1491700884, tmp)))|x))&(tmp = 1627412882.6404104, tmp)))>>>(tmp = 1039002116.6784904, tmp))<<((tmp = -2840130800, tmp)-(tmp = -740035567, tmp))))))&(tmp = -416316142, tmp))>>x))));
+  assertEquals(86, x >>>= (tmp = -293489896.5572462, tmp));
+  assertEquals(172, x += (x%((((-2635082487.364155)|((-2361650420.634912)&(-2147095650.7451198)))<<((tmp = 2258905145.9231243, tmp)%((((tmp = -1365987098.5130103, tmp)*(((((((932437391)/x)/(289270413.0780891))%(x-x))+((((2194986374.917528)>>(((((tmp = -1553805025, tmp)|x)^(((x>>(-564400586.0780811))^(tmp = 1738428582.0238137, tmp))>>(tmp = 1717774140, tmp)))&(tmp = -2789427438, tmp))%(((tmp = -1386118057, tmp)*(-2333221237.7915535))*(x>>>(((((41346648.46438944)&x)%(-478973697.6792319))|(tmp = 2108106738, tmp))/x)))))-(tmp = -133437701.64136505, tmp))>>>x))+(tmp = -1567210003, tmp))*(x+((x&x)-(2942851671)))))>>>(tmp = -446377136, tmp))*((((((tmp = 1597203255, tmp)>>>(619157171))|(-2766246629.005985))>>((tmp = 3130227370, tmp)%x))*(tmp = 2072227901.6101904, tmp))|((tmp = 1369019520, tmp)^(759659487))))))>>>x)));
+  assertEquals(1996475731, x ^= ((1456327892.2281098)|(1728022827)));
+  assertEquals(0, x %= x);
+  assertEquals(0, x &= (1323847974));
+  assertEquals(3076829073.8848357, x += (3076829073.8848357));
+  assertEquals(9569842648396755000, x *= (3110293883.2782717));
+  assertEquals(9569842646260304000, x -= (2136450372.9038036));
+  assertEquals(9.158188827418242e+37, x *= x);
+  assertEquals(0, x <<= ((x&(tmp = -2241179286, tmp))+((tmp = 2553144081, tmp)&((tmp = -1914709694, tmp)^(tmp = -1469651409.0651562, tmp)))));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x /= (2177840666.276347));
+  assertEquals(0, x %= (-690827104));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x ^= x);
+  assertEquals(-0, x /= (tmp = -803415280, tmp));
+  assertEquals(-2355576914.316743, x += (-2355576914.316743));
+  assertEquals(-833671722514674000, x *= ((3053388806.692315)-(tmp = 2699474775.081724, tmp)));
+  assertEquals(1, x /= x);
+  assertEquals(1898147684, x += ((tmp = 1898147683, tmp)|(x<<x)));
+  assertEquals(2.192324660388075, x %= ((tmp = 2630187518, tmp)/((2868794982.790862)|(490860748))));
+  assertEquals(0, x >>>= ((2751021779)/(-952522559)));
+  assertEquals(321040461, x ^= ((321040461.153594)-x));
+  assertEquals(-2.3814602031636922, x /= ((tmp = -170472190, tmp)|x));
+  assertEquals(-1, x >>= (2200125174.177402));
+  assertEquals(-2964432647.9379396, x += (-2964432646.9379396));
+  assertEquals(-370116502.93793964, x %= (tmp = -518863229, tmp));
+  assertEquals(777927355.2283959, x -= (-1148043858.1663356));
+  assertEquals(0, x *= ((tmp = 1134913539, tmp)&(((x>>>((tmp = -989822787, tmp)>>>x))%x)&(tmp = 1078636160.7313156, tmp))));
+  assertEquals(-1089245637, x ^= (3205721659.3548856));
+  assertEquals(-1192493056, x <<= (-1173291054));
+  assertEquals(78013832, x += ((tmp = 2462999944, tmp)+x));
+  assertEquals(0, x %= x);
+  assertEquals(0, x >>>= (1794908927.7409873));
+  assertEquals(1708338504, x += ((-2586628792.3484306)<<x));
+  assertEquals(12, x >>= (-545794789.3827574));
+  assertEquals(0, x &= ((2753207225)<<(((-1776581207.557251)+((tmp = -2414140402, tmp)*x))+(x<<(x|(tmp = 772358560.3022032, tmp))))));
+  assertEquals(0, x <<= ((tmp = -2755724712.152605, tmp)/((x>>(-732875466))&x)));
+  assertEquals(NaN, x *= (((tmp = 2617815318.1134562, tmp)/x)%(x|((((((-851659337.194871)<<(tmp = 2072294700, tmp))%((x+(2193880878.5566335))^((tmp = 3005338026, tmp)-(2947963290))))/x)/(x+(2091745239.4210382)))-(x>>x)))));
+  assertEquals(NaN, x /= (tmp = -427684595.0278094, tmp));
+  assertEquals(NaN, x /= (tmp = -263945678, tmp));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x -= (((x>>((x&x)-(tmp = -673697315, tmp)))>>(((1575095242.2330558)/(x-(-1816886266)))%(-1580195729)))>>>x));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x >>= (-2815518206));
+  assertEquals(0, x -= (x/(1795634670.692437)));
+  assertEquals(-2753579891, x += (tmp = -2753579891, tmp));
+  assertEquals(2.7773776150171776, x /= (tmp = -991431585, tmp));
+  assertEquals(5.554755230034355, x += x);
+  assertEquals(3.362161997528237e-9, x /= (1652137890.4758453));
+  assertEquals(3.362161997528237e-9, x %= (tmp = -10848734.527020693, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(-2978012493, x -= (x+(2978012493)));
+  assertEquals(-5.158905851797543, x /= (((x+((tmp = -2548840164, tmp)>>x))<<(x^((tmp = -533281232.7294345, tmp)&x)))&(tmp = -1502692171, tmp)));
+  assertEquals(-5.158905851797543, x %= (-3009435255.5612025));
+  assertEquals(-20971520, x <<= ((tmp = -2728812464, tmp)%(2619809573.672677)));
+  assertEquals(-1900019712, x &= (2398099552));
+  assertEquals(-1991377, x %= ((tmp = 1562364373.7334614, tmp)>>>(((x-(-946283217))<<(-2044590694))^(((tmp = 1681238509, tmp)>>(-2801649769))-x))));
+  assertEquals(1, x /= x);
+  assertEquals(1, x %= (x/(x-x)));
+  assertEquals(1.3525631913093335e-9, x /= (739336991));
+  assertEquals(0, x &= ((x&(x|(-1530424204)))<<((((tmp = -295143065.9115021, tmp)>>x)+x)<<x)));
+  assertEquals(0, x <<= (-1311017801));
+  assertEquals(-0, x /= (-667133339.1918633));
+  assertEquals(1038307283, x += (1038307283));
+  assertEquals(506985, x >>>= ((tmp = 1550624472.9157984, tmp)^x));
+  assertEquals(506985, x >>>= ((254646626)<<(tmp = 1572845412.744642, tmp)));
+  assertEquals(32447040, x <<= (tmp = -2427326042, tmp));
+  assertEquals(0, x -= (x<<((x|x)>>>x)));
+  assertEquals(0, x &= x);
+  assertEquals(0, x &= ((-484420357)|((tmp = 807540590.6132902, tmp)/(x/x))));
+  assertEquals(-890607324, x ^= ((tmp = -890607324, tmp)>>((((-2876826295)>>x)<<((tmp = 2351495148.117994, tmp)>>(tmp = 1368611893.274765, tmp)))*(tmp = 1531795251, tmp))));
+  assertEquals(-729075363, x += (x+(tmp = 1052139285, tmp)));
+  assertEquals(531550884933581760, x *= x);
+  assertEquals(1980836332, x ^= ((-746269795.2320724)-((2400458512)>>((1290672548)>>>((((1536843439.5629003)&(3185059975.158061))*(tmp = -1339249276.2667086, tmp))&x)))));
+  assertEquals(941373096, x %= ((x+(-451098412))^(tmp = 1725497732, tmp)));
+  assertEquals(-1766019323, x += (tmp = -2707392419, tmp));
+  assertEquals(2528947973, x >>>= (x^(-896237435.3809054)));
+  assertEquals(-263192576, x <<= (-866361580));
+  assertEquals(-2008, x >>= (-2608071791));
+  assertEquals(-88, x %= (((-1076807218.4792447)&((tmp = 601044863, tmp)>>((tmp = 1228976729, tmp)+((((-2711426325)*x)|x)|(x%(-2700007330.3266068))))))&(tmp = 3147972836.778858, tmp)));
+  assertEquals(1762886843, x ^= (tmp = 2532080403, tmp));
+  assertEquals(1762886843, x %= ((((((tmp = -2059247788, tmp)>>x)/x)+(x<<x))^x)>>>(-1969283040.3683646)));
+  assertEquals(4812334726.587896, x += (tmp = 3049447883.587897, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(1, x *= x);
+  assertEquals(-2150507334, x -= ((tmp = 1578221999, tmp)+(tmp = 572285336, tmp)));
+  assertEquals(-4546475858941548500, x *= ((tmp = -931533139.5546813, tmp)^(tmp = 3061503275, tmp)));
+  assertEquals(-269064192, x |= ((207217276.91936445)<<(tmp = -957353678.4997551, tmp)));
+  assertEquals(1, x /= x);
+  assertEquals(1, x <<= (((1463856021.8616743)%(x*(tmp = -2286419102, tmp)))/(-2852887593)));
+  assertEquals(2223868564.8383617, x *= (tmp = 2223868564.8383617, tmp));
+  assertEquals(918797189.9033995, x -= ((1305071374.9349623)%(x+(2211992629))));
+  assertEquals(-2212004787.4668465, x -= (tmp = 3130801977.370246, tmp));
+  assertEquals(31783, x >>= (2951958960));
+  assertEquals(31783, x ^= ((((tmp = -2441511566, tmp)&((tmp = 91427553.90168321, tmp)+((tmp = 3001737720.327718, tmp)%x)))>>>(-2263859841))>>>((2109161329)>>(tmp = -2816295136.7443414, tmp))));
+  assertEquals(4068224, x <<= (x%((tmp = -682576250.4464607, tmp)*(x/(((x-x)>>>(x&((((x<<(x<<x))>>>((((2243036981.528562)/(((-1839328916.9411087)>>(-1907748022.162144))<<(x+x)))+((tmp = 2362574171, tmp)<<(tmp = 1987834539, tmp)))|(-444329240)))|(399451601.1717081))>>x)))&(968363335.6089249))))));
+  assertEquals(0.0030991932898194294, x /= ((tmp = 1067316540.5529796, tmp)^(-2388640366)));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>>= (tmp = -393433349.1636851, tmp));
+  assertEquals(0, x *= (((x^(((1806955787.471396)<<x)^((517668047.55566347)>>>(x%(x<<(tmp = -276586733.4844558, tmp))))))%(1661242196.1472542))|x));
+  assertEquals(0, x |= (x>>x));
+  assertEquals(-155236210, x |= (tmp = -155236210.19366312, tmp));
+  assertEquals(-606392, x >>= ((tmp = -1533446042.97781, tmp)^x));
+  assertEquals(-1, x >>= (936126810));
+  assertEquals(2325115611, x -= (-2325115612));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>= (tmp = -354826623, tmp));
+  assertEquals(-0, x *= (-1232528947.7321298));
+  assertEquals(0, x |= x);
+  assertEquals(0, x <<= (((tmp = 187758893.4254812, tmp)&(x-(tmp = 648201576, tmp)))&(385106597)));
+  assertEquals(0, x >>= (tmp = 2554891961, tmp));
+  assertEquals(-1311492611.2970417, x += (-1311492611.2970417));
+  assertEquals(-688179220.3221785, x += (623313390.9748632));
+  assertEquals(1416835528, x &= (tmp = 1953739224, tmp));
+  assertEquals(-11.04719252755072, x /= (-128252995));
+  assertEquals(-6.287413042114223e-9, x /= (tmp = 1757033052.1558928, tmp));
+  assertEquals(-4231171, x |= (((((2022730885.7773404)*((-2495777565.221855)|(tmp = 274627292, tmp)))<<(-3072596920.4902725))>>>((-2215057529)+(-1134713759.4247034)))^((tmp = -1888181788, tmp)/(572025985.2748461))));
+  assertEquals(-4194305, x |= ((tmp = 167328318.038759, tmp)>>>(153800904.34551537)));
+  assertEquals(-1316525687, x -= (1312331382));
+  assertEquals(1448723245.7863903, x += (2765248932.7863903));
+  assertEquals(1.7219707102205526, x /= (tmp = 841317008, tmp));
+  assertEquals(1872027792.5217001, x *= (x|(tmp = 1087142645.6665378, tmp)));
+  assertEquals(3504488055973669400, x *= x);
+  assertEquals(-1075254784, x |= x);
+  assertEquals(-5, x >>= (((844461331.8957539)-((x&x)<<((tmp = 1443904777, tmp)+(tmp = 736164505.3670597, tmp))))-(((tmp = 1348422110, tmp)>>((tmp = -2878252514, tmp)/(-1175443113)))|((-2138724317)%(2057081133)))));
+  assertEquals(-3.038875804165675e-9, x /= (1645345292.8698258));
+  assertEquals(1.25204541454491e-18, x /= (-2427129055.274914));
+  assertEquals(-1.7151576137235622e-9, x *= (-1369884505.6247284));
+  assertEquals(1590804618, x ^= (1590804618.4910607));
+  assertEquals(5061318665300252000, x *= (x+x));
+  assertEquals(5061318665300252000, x %= ((tmp = 1102144242, tmp)*x));
+  assertEquals(-7, x >>= (2772167516.624264));
+  assertEquals(16383, x >>>= (-2979259214.5855684));
+  assertEquals(47108415435, x *= ((2944456517.839616)>>>(1041288554.5330646)));
+  assertEquals(61, x >>>= (x^(((-1305163705)<<((948566605)-x))-x)));
+  assertEquals(0, x %= x);
+  assertEquals(0, x ^= (((tmp = 1918861879.3521824, tmp)/((x%(tmp = 945292773.7188392, tmp))%(x|x)))>>x));
+  assertEquals(-0, x *= ((((x|((2810775287)|(tmp = 1265530406, tmp)))^((tmp = 3198912504.175658, tmp)-(((tmp = 1422607729.281712, tmp)<<(tmp = 2969836271.8682737, tmp))&x)))<<((tmp = 844656612, tmp)*(((((tmp = -828311659, tmp)%(((-2083870654)>>>(x^(((((933133782)-(tmp = 1033670745, tmp))-(629026895.4391923))%((-605095673.8097742)*((((-227510375.38460112)*x)+x)&(((((tmp = 472873752.68609154, tmp)^(tmp = 2815407038.712165, tmp))+((x>>>((tmp = -1331030665.3510115, tmp)>>>(2281234581)))-(x>>>x)))&(tmp = -2160840573.325921, tmp))&x))))<<(tmp = 1411888595, tmp))))|(((tmp = -915703839.0444739, tmp)/((x+(418836101.8158506))%(-1112605325.4404268)))&((-3098311830.6721926)-x))))-((49446671.477988124)*(-2522433127)))+((tmp = 443068797, tmp)>>(tmp = 418030554.97275746, tmp)))*((tmp = 38931296.738208175, tmp)+(1842742215.3282685)))))-((tmp = 1325672181.205841, tmp)^(tmp = 669284428, tmp))));
+  assertEquals(-0, x *= (tmp = 93843030, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>>= (x%((((((tmp = -107458601, tmp)>>(x*((x|((tmp = 2117286494, tmp)>>((x^(tmp = 114214295.42048478, tmp))>>>(tmp = 1032826615, tmp))))&((x*x)&(-225386977.67686415)))))^((-780566702.5911419)+(-1113319771)))|(((x^x)<<(1288064444))>>(-2292704291.619477)))>>(365125945))-((tmp = -1986270727.235776, tmp)/x))));
+  assertEquals(-0, x *= (((-18925517.67125845)|((((-1975220517)+(tmp = -1250070128.296064, tmp))+(1085931410.5895243))<<(((x|(((x*(tmp = 160207581.50536323, tmp))|(tmp = 1798744469.7958293, tmp))-x))>>>(((x+((x%x)&((((x^x)<<((tmp = 2538012074.623554, tmp)^x))*x)&x)))/(x+(tmp = -2563837407, tmp)))/(tmp = 2189564730, tmp)))/(((-1703793330.5770798)<<((176432492)|x))<<(1347017755.345185)))))<<(((tmp = -577100582.7258489, tmp)&x)/(-31246973))));
+  assertEquals(0, x >>>= x);
+  assertEquals(NaN, x %= ((x*(tmp = 1167625971, tmp))&(((tmp = -770445060, tmp)>>((339248786)^((2058689781.2387645)-((-2381162024)*(660448066)))))&x)));
+  assertEquals(NaN, x += ((3088519732.515986)-(-267270786.06493092)));
+  assertEquals(0, x &= (tmp = 2748768426.3393354, tmp));
+  assertEquals(-1109969306, x ^= ((-1109969306)>>>x));
+  assertEquals(-1109969306, x %= (tmp = 1150376563.581773, tmp));
+  assertEquals(-2058145178, x &= (-2057586057));
+  assertEquals(-850185626, x |= ((x^(tmp = 1223093422, tmp))&((-589909669)<<(2299786170))));
+  assertEquals(1489215443, x += (2339401069));
+  assertEquals(-23592960, x <<= x);
+  assertEquals(2063937322, x ^= (-2053296342.2317986));
+  assertEquals(12922122, x %= (x^((-2259987830)>>(x*(((tmp = -799867804.7716949, tmp)&(tmp = -1068744142, tmp))*(((((1091932754.8596292)-((tmp = -1778727010, tmp)>>(((tmp = 1207737073.2689717, tmp)-(x-(tmp = -1191958946, tmp)))+(-631801383.7488799))))-(-618332177))>>>(-156558558))>>>(3032101547.6262517)))))));
+  assertEquals(12922122, x &= x);
+  assertEquals(Infinity, x /= (x%x));
+  assertEquals(0, x &= (x*(-227800722.62070823)));
+  assertEquals(-865648691, x ^= (-865648691));
+  assertEquals(1, x /= (x%(tmp = 1524739353.8907173, tmp)));
+  assertEquals(16, x <<= (x<<(2335214658.789205)));
+  assertEquals(0, x &= ((tmp = 570332368.1239192, tmp)^(-2278439501)));
+  assertEquals(1881145344, x -= (((-569715735.8853142)+(2093355159))<<(tmp = 2788920949, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x -= ((tmp = -1427789954, tmp)%((((((411038329.49866784)-x)-(x<<((-1330832247)+x)))/x)^((x*(845763550.2134092))>>(tmp = 1427987604.5938706, tmp)))>>>(1857667535))));
+  assertEquals(NaN, x /= (-313793473));
+  assertEquals(0, x >>>= (x/x));
+  assertEquals(1869358566, x -= (-1869358566));
+  assertEquals(-1901664519209545200, x += ((tmp = 944729941.3936644, tmp)*(-2012918653)));
+  assertEquals(-1901664519209545200, x += ((tmp = 1348246793, tmp)/(x&x)));
+  assertEquals(-1576791552, x &= (tmp = 2719250966.739456, tmp));
+  assertEquals(-305087899, x ^= (-2955630491.030272));
+  assertEquals(0, x ^= (x%(1575252839.559443)));
+  assertEquals(4184604407, x += ((((tmp = -244720076.17657042, tmp)|(2819320515))^((((tmp = 1222623743.9184055, tmp)*(-95662379.577173))/(x/(x+(((x-(tmp = -3024718107.6310973, tmp))^(-1494390781))&(tmp = 2284054218.8323536, tmp)))))>>>(tmp = 2090069761, tmp)))>>>(x%x)));
+  assertEquals(3148907440, x -= (((tmp = -332379100.7695112, tmp)-(-1145399547))^(((((((tmp = 3133792677.785844, tmp)+x)<<(2306999139.5799255))>>((tmp = -2051266106, tmp)*(((((x+(((-728654312.8954825)>>(x>>>(((x%x)&(-1587152364))|(((((-2114138294)&x)&(1547554688))^x)-(-1856094268)))))*(((-1135018784)&((x+(tmp = -1444020289, tmp))|x))+x)))>>x)&x)/(2449005489))<<((131073798.64314616)%(x>>>((-2592101383.2205048)^(tmp = -757096673.0381112, tmp)))))))^(2766467316.8307915))-(-2465892914.515834))-((((tmp = 234064056, tmp)^((x>>>(1622627548.7944543))+(-1750474146)))|(-1959662039.4687617))^((-1222880974)&(-2794536175.906498))))));
+  assertEquals(-1157627488, x &= (-1156639323));
+  assertEquals(-1342170624, x <<= ((x/((((1829945345.0613894)/(x*((tmp = 1278865203.0854595, tmp)/(((tmp = -2298274086.519347, tmp)+(tmp = -545203761, tmp))-(tmp = 2712195820, tmp)))))>>>((tmp = 240870798.9384452, tmp)-(tmp = -3188865300.4768195, tmp)))>>>(x%((648799266)>>>(tmp = 24460403.864815235, tmp)))))|((tmp = 232533924, tmp)|x)));
+  assertEquals(-2684341248, x += x);
+  assertEquals(1073755136, x &= (((-662718514.9245079)>>(tmp = -1915462105, tmp))+(tmp = 1478850441.8689613, tmp)));
+  assertEquals(-1073755136, x /= (x|((tmp = -1767915185, tmp)|((325827419.1430224)|(((-1343423676)|(tmp = -1929549501, tmp))|(-866933068.9585254))))));
+  assertEquals(-1073755136, x %= ((tmp = 547342356, tmp)-((tmp = 2213249646.7047653, tmp)-((((((-2463314705)^(tmp = -993331620, tmp))^(((x%x)>>(tmp = 1798026491.3658786, tmp))-(((1024072781)/(tmp = -2407354455, tmp))%(1973295010))))<<(-1966787233))^x)|(-1787730004)))));
+  assertEquals(-1073754452, x |= (tmp = 3099823788.077907, tmp));
+  assertEquals(-1540683096, x &= (-1540674632.7013893));
+  assertEquals(-1540683052, x ^= ((tmp = -126183090, tmp)>>>((-622437575.5788481)|((((tmp = -2947914022, tmp)%(((tmp = 2512586745, tmp)>>x)>>>((27238232.23677671)/(tmp = 3203958551, tmp))))/(tmp = 2906005721.402535, tmp))^((((tmp = 1763897860.737334, tmp)^(1445562340.2485332))/x)+(-2393501217.716533))))));
+  assertEquals(-1258599433, x |= (tmp = 351291767.59661686, tmp));
+  assertEquals(-1241560065, x |= (626346046.5083935));
+  assertEquals(-1241560065, x ^= ((2263372092)/((tmp = -2868907862, tmp)>>>x)));
+  assertEquals(-893685228, x -= (tmp = -347874837, tmp));
+  assertEquals(3401282068, x >>>= (x*x));
+  assertEquals(0, x %= x);
+  assertEquals(0, x >>>= x);
+  assertEquals(-2079237393, x ^= (tmp = 2215729903, tmp));
+  assertEquals(NaN, x %= ((((tmp = 3203450436, tmp)/(2867575150.6528325))&(1864945829))&((x&((((tmp = -1927086741.3438427, tmp)|x)|(-1783290909.3240588))*((-1074778499.0697656)*(x-((tmp = -848983542.8456669, tmp)^(tmp = -1324673961, tmp))))))>>(tmp = -2144580304.245896, tmp))));
+  assertEquals(-43334009, x |= (x^(-43334009.72683525)));
+  assertEquals(-43334009, x &= x);
+  assertEquals(-43334009, x %= (tmp = 1252450645.060542, tmp));
+  assertEquals(-43334009, x |= (((((((tmp = 968062202, tmp)/(x|(tmp = 2766801984, tmp)))*((2173353793.938968)>>(((tmp = -2459317247, tmp)<<(tmp = -2333601397, tmp))>>>((tmp = -578254251.8969193, tmp)*(tmp = 839964110.7893236, tmp)))))&(((1675305119)&(tmp = -929153707, tmp))*((x*x)*x)))/x)|(x/(tmp = 384740559.43867135, tmp)))%(1657362591)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x %= (-1334758781.1087842));
+  assertEquals(0, x -= x);
+  assertEquals(-54, x += ((tmp = -1787151355.470972, tmp)>>((tmp = 237028977, tmp)>>(((2829473542)<<(x>>>(((((((x-(-1950724753))*(((x>>>(2807353513.6283565))<<((-583810779.1155353)>>(x*x)))>>(-1068513265)))^(x^(-696263908.5131407)))%(((tmp = -1325619399, tmp)<<((tmp = -1030194450, tmp)-x))^x))+((-2852768585.3718724)>>(tmp = -3160022361, tmp)))%(x&x))>>(tmp = 2667222702.5454206, tmp))))+((804998368.8915854)<<x)))));
+  assertEquals(-54, x %= (-1601267268.4306633));
+  assertEquals(1, x >>>= (tmp = -543199585.579128, tmp));
+  assertEquals(4.732914708226396e-10, x /= (tmp = 2112862922, tmp));
+  assertEquals(-4266932650, x -= ((((x^((((tmp = 2784618443, tmp)^(tmp = -2271260297.9010153, tmp))|((((tmp = -599752639.7516592, tmp)*(2751967680.3680997))^(tmp = -1478450055.578217, tmp))*x))-x))&((tmp = -520061982, tmp)-((tmp = 1400176711.9637299, tmp)^(((2100417541)|(x+(tmp = -674592897.0420957, tmp)))>>x))))^(tmp = -365650686.7947228, tmp))>>>((-2943521813)&(((tmp = -1888789582, tmp)>>(tmp = 700459655.488978, tmp))+(tmp = -1725725703.655931, tmp)))));
+  assertEquals(224277168, x <<= (tmp = 2885115011.8229475, tmp));
+  assertEquals(224277168, x %= (tmp = -2655345206.442777, tmp));
+  assertEquals(850395136, x <<= (x-(((((-769868538.1729524)/((tmp = -298603579, tmp)%(x^x)))+((2691475692)|(((x>>>(628995710.4745524))^(x<<(((tmp = -1046054749, tmp)|(919868171))-x)))^((-1377678789.8170452)&((3065147797)%(tmp = 2638804433, tmp))))))^(tmp = -2036295169, tmp))&(((tmp = -157844758.08476114, tmp)*(tmp = -2819601496, tmp))&((((tmp = 78921441, tmp)<<(653551762.5197772))/(1801316098))*(-1479268961.8276927))))));
+  assertEquals(1645565728, x ^= (tmp = 1353013024, tmp));
+  assertEquals(1645565728, x >>>= x);
+  assertEquals(3020513544, x += (1374947816));
+  assertEquals(0, x %= x);
+  assertEquals(0, x %= ((((((tmp = -304228072.4115715, tmp)>>>((-90523260.45975709)-(tmp = -3013349171.084838, tmp)))%((-1640997281)*((tmp = -1600634553, tmp)%((tmp = 557387864, tmp)<<((888796080.766409)|(x^((((x%(((((tmp = 1164377954.1041703, tmp)*x)|(2742407432.192806))&((tmp = 1707928950, tmp)<<(1279554132.4481683)))+(tmp = -2108725405.7752397, tmp)))%(tmp = -465060827, tmp))^((tmp = 2422773793, tmp)+x))^((((((((tmp = -1755376249, tmp)^((-267446806)^x))/(((tmp = -1808578662.4939392, tmp)+((tmp = -1997100217, tmp)+x))+(((tmp = -2469853122.411479, tmp)/x)>>(tmp = 660624616.7956645, tmp))))%((x<<((((((tmp = -1701946558, tmp)-(tmp = 133302235, tmp))>>>x)/(738231394))<<(-1060468151.4959564))&(((((-1877380837.4678264)|(tmp = 2366186363, tmp))%x)>>>(-2382914822.1745577))>>((-1874291848.9775913)<<(tmp = 2522973186, tmp)))))<<(-2672141993)))|(tmp = 732379966, tmp))%x)^x)^x))))))))%(tmp = 2385998902.7287374, tmp))*x)+(tmp = -2195749866.017106, tmp)));
+  assertEquals(401488, x ^= (((-320896627)>>>(tmp = 2812780333.9572906, tmp))&(tmp = -2088849328, tmp)));
+  assertEquals(-1661116571.0046256, x += (tmp = -1661518059.0046256, tmp));
+  assertEquals(-1616122720, x <<= x);
+  assertEquals(-1616122720, x >>= x);
+  assertEquals(-390439413, x %= (tmp = -1225683307, tmp));
+  assertEquals(-84189205, x |= ((x|(2054757858))^(((x<<(((x|x)|(((x>>>((-2938303938.1397676)<<((2993545056)^((tmp = -643895708.5427527, tmp)/((1371449825.5345795)-(1896270238.695752))))))-(tmp = 1061837650, tmp))+(x+(tmp = 3072396681, tmp))))>>(x-((((tmp = -1877865355.1550744, tmp)&x)%(-2766344937))>>>(2055121782)))))-((x<<x)|(tmp = -2742351880.1974454, tmp)))<<((-2600270279.219802)>>(-1625612979)))));
+  assertEquals(-168378410, x += x);
+  assertEquals(-168378410, x &= x);
+  assertEquals(-1534983792, x &= (-1501412943));
+  assertEquals(-1821543761, x ^= (938439487));
+  assertEquals(-1821543761, x &= (x^(((tmp = -4237854, tmp)>>x)/x)));
+  assertEquals(2358, x >>>= (2954252724.620632));
+  assertEquals(4716, x <<= ((-75522382.8757689)/((tmp = 1074334479, tmp)|((tmp = -720387522, tmp)>>(x>>>(-3085295162.6877327))))));
+  assertEquals(-1313079316, x |= (2981887904.020387));
+  assertEquals(-1957790646, x -= (644711330));
+  assertEquals(17831, x >>>= ((tmp = -2550108342, tmp)-(((tmp = 454671414.0146706, tmp)+(-661129693.9333956))>>(x>>>(((tmp = 1752959432.3473055, tmp)*(-2619510342.1812334))%(tmp = -456773274.2411971, tmp))))));
+  assertEquals(689287937.6879716, x -= ((tmp = -397126863.6879716, tmp)-(((x>>x)^(x/(-1387467129.6278908)))|((x>>((tmp = -2361114214.8413954, tmp)<<(tmp = -805670024.4717407, tmp)))<<(-2724018098)))));
+  assertEquals(1378575875.3759432, x += x);
+  assertEquals(84112428460187.8, x *= (((((2681425112.3513584)%(tmp = -1757945333, tmp))|x)>>(-1793353713.0003397))%x));
+  assertEquals(-3221, x >>= (-1976874128));
+  assertEquals(-3221, x %= (((tmp = 2318583056.834932, tmp)|((tmp = -1016115125, tmp)+((-472566636.32567954)+x)))|(tmp = 3135899138.065598, tmp)));
+  assertEquals(-6596608, x <<= x);
+  assertEquals(-1249902592, x <<= (((tmp = -2025951709.5051148, tmp)/((-465639441)<<(-2273423897.9682302)))*((tmp = -2408892408.0294642, tmp)-(tmp = 1017739741, tmp))));
+  assertEquals(73802092170444800, x *= (tmp = -59046275, tmp));
+  assertEquals(-1619001344, x <<= x);
+  assertEquals(0, x <<= (tmp = 1610670303, tmp));
+  assertEquals(-0, x *= ((((x+(tmp = 2039867675, tmp))|(tmp = 399355061, tmp))<<(1552355369.313559))^x));
+  assertEquals(0, x *= x);
+  assertEquals(0, x >>>= (((2875576018.0610805)>>x)%(tmp = -2600467554, tmp)));
+  assertEquals(2290405226.139538, x -= (-2290405226.139538));
+  assertEquals(0, x %= x);
+  assertEquals(0, x ^= (((tmp = 2542309844.485515, tmp)-x)%((-2950029429.0027323)/(tmp = 2943628481, tmp))));
+  assertEquals(0, x += x);
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>>= (tmp = 2337330038, tmp));
+  assertEquals(0, x += (x/(((292272669.0808271)&(tmp = 2923699026.224247, tmp))^(tmp = 367745855, tmp))));
+  assertEquals(0, x &= x);
+  assertEquals(0, x %= ((tmp = 1565155613.3644123, tmp)<<(-308403859.5844681)));
+  assertEquals(-1845345399.3731332, x += (tmp = -1845345399.3731332, tmp));
+  assertEquals(5158590659731951000, x *= (-2795460763.8680177));
+  assertEquals(-364664, x >>= (1837745292.5701954));
+  assertEquals(1, x /= x);
+  assertEquals(-860616114.8182092, x += ((tmp = 2076961323.1817908, tmp)+(-2937577439)));
+  assertEquals(-860616115, x ^= ((x*(tmp = 2841422442.583121, tmp))>>>((tmp = 1929082917.9039137, tmp)>>(-2602087246.7521305))));
+  assertEquals(-38387843, x |= (3114677624));
+  assertEquals(2927507837, x += (tmp = 2965895680, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(-1792887531, x *= (-1792887531));
+  assertEquals(-0, x %= ((x^x)+x));
+  assertEquals(-0, x %= (tmp = 2800752702.562547, tmp));
+  assertEquals(1384510548, x ^= (tmp = 1384510548, tmp));
+  assertEquals(42251, x >>= (1645421551.363844));
+  assertEquals(0, x >>>= (17537561));
+  assertEquals(-2076742862, x ^= (tmp = 2218224434, tmp));
+  assertEquals(-2.790313825067623, x /= (744268563.3934636));
+  assertEquals(5313538, x &= (((((tmp = -2406579239.0691676, tmp)+((-1470174628)+(((tmp = -783981599, tmp)<<(tmp = -1789801141.272646, tmp))^(((((((tmp = -844643189.5616491, tmp)&(tmp = -252337862, tmp))&(x|x))%((-3159642145.7728815)+(tmp = 2149920003.9525595, tmp)))&(x>>(1737589807.9431858)))-((((((((1610161800)<<(497024994))>>x)<<x)/x)>>>x)&x)-(757420763.2141517)))-(tmp = -3061016994.9596977, tmp)))))/(tmp = 1810041920.4089384, tmp))&(tmp = 5887654.786785364, tmp))&((tmp = 1626414403.2432103, tmp)+(x%x))));
+  assertEquals(-2147483648, x <<= (tmp = 1304102366.8011155, tmp));
+  assertEquals(-208418816, x %= (((((-2850404799)*(x+(3158771063.226051)))*(-2017465205))/(x>>x))>>(x%(tmp = 2760203322, tmp))));
+  assertEquals(-2189223477, x -= (1980804661));
+  assertEquals(-859239912, x ^= (tmp = 2974421971.3544703, tmp));
+  assertEquals(-1599850415, x ^= (tmp = -2475871671.140151, tmp));
+  assertEquals(-1600636847, x += ((((tmp = -1311002944, tmp)<<((tmp = -1137871342, tmp)<<(tmp = 115719116, tmp)))/(413107255.6242596))<<(x>>((((-1908022173)&(((-1519897333)^((x>>(x*(tmp = -2886087774.426503, tmp)))*(tmp = 530910975, tmp)))+(-2579617265.889692)))+((2518127437.127563)>>>((tmp = 481642471.56441486, tmp)>>>(792447239))))^(x<<(248857393.6819017))))));
+  assertEquals(-191, x >>= (-1591265193));
+  assertEquals(-192.27421813247196, x += ((tmp = 2627329028.207775, tmp)/(tmp = -2061914644.9523563, tmp)));
+  assertEquals(1230613220, x ^= (tmp = 3064354212.307105, tmp));
+  assertEquals(1230613220, x &= x);
+  assertEquals(1230613220, x %= (1833479205.1064768));
+  assertEquals(1230613220, x >>>= ((((1559450742.1425748)|((2151905260.956583)*(1213275165)))%(514723483.12764716))>>>x));
+  assertEquals(1230613493, x |= ((((3004939197.578903)*(tmp = -576274956, tmp))+((tmp = 1037832416.2243971, tmp)^x))>>>(tmp = 2273969109.7735467, tmp)));
+  assertEquals(2461226986, x += x);
+  assertEquals(-27981, x >>= ((692831755.8048055)^((tmp = -1593598757, tmp)%(x-((((-1470536513.882593)|((tmp = -2716394020.466401, tmp)|(tmp = 2399097686, tmp)))&x)%x)))));
+  assertEquals(-1.4660454948034359e+23, x *= (((x>>>((((((tmp = -3056016696, tmp)<<(-2882888332))*(2041143608.321916))&(((tmp = -634710040, tmp)|(tmp = -2559412457, tmp))>>(1916553549.7552106)))%((-2150969350.3643866)*x))<<((x*(tmp = 2657960438.247278, tmp))|x)))%((tmp = 526041379, tmp)*(tmp = 2514771352.4509397, tmp)))*(1219908294.8107886)));
+  assertEquals(-1.4660454948034359e+23, x -= ((1709004428)>>(((x|(-422745730.626189))%x)>>x)));
+  assertEquals(-2247766068, x %= (-3105435508));
+  assertEquals(-386845856.0649812, x -= (-1860920211.9350188));
+  assertEquals(-386846803.0649812, x -= ((((-3214465921)|((tmp = -1326329034, tmp)+(((tmp = -1203188938.9833462, tmp)%((((((-1318276502)+(x+x))^((x<<x)%(x>>>x)))+(tmp = -439689881, tmp))+((-1455448168.695214)^(x-((-388589993)>>((((940252202)^(-2218777278))|x)/(tmp = -1007511556, tmp))))))&(-140407706.28176737)))-(x/((888903270.7746506)-((tmp = -2885938478.632409, tmp)<<(((((tmp = -1750518830.270917, tmp)>>(((((((tmp = 868557365.7908674, tmp)/(tmp = -2805687195.5172157, tmp))*x)|((((((-1342484550)-((tmp = 1089284576, tmp)^(tmp = 120651272, tmp)))<<(tmp = 2230578669.4642825, tmp))-(x*x))%(x^(((tmp = -3177941534, tmp)+(x>>(-1595660968)))/(-1738933247))))>>>(tmp = 2860175623, tmp)))-(((2392690115.8475947)>>>(tmp = -1754609670.2068992, tmp))>>>(tmp = 2615573062, tmp)))-(tmp = 2590387730, tmp))^((x+((((x-(tmp = -2823664112.4548965, tmp))*(200070977))>>>(((x|((((tmp = 1361398, tmp)>>((tmp = 1649209268, tmp)%x))+x)+(x>>>(tmp = -2379989262.1245675, tmp))))|(x^((tmp = -647953298.7526417, tmp)-x)))&(tmp = -1881232501.1945808, tmp)))>>>x))%(x^(tmp = -1737853471.005935, tmp)))))>>>(427363558))>>>((tmp = -3076726422.0846386, tmp)^(-1518782569.1853383)))/x)))))))|x)>>>(1854299126)));
+  assertEquals(-386846803.0649812, x -= (x%x));
+  assertEquals(238532, x >>>= (-448890706.10774803));
+  assertEquals(232, x >>>= (-791593878));
+  assertEquals(232, x <<= (((x^((x-x)&(tmp = 1219114201, tmp)))/(tmp = -427332955, tmp))%(tmp = 1076283154, tmp)));
+  assertEquals(210, x ^= (x>>>((2975097430)>>>x)));
+  assertEquals(1, x /= x);
+  assertEquals(2317899531, x *= (2317899531));
+  assertEquals(1131786, x >>>= x);
+  assertEquals(2301667519.6379366, x += ((tmp = 193109669.63793683, tmp)+(tmp = 2107426064, tmp)));
+  assertEquals(3842614963.6379366, x += (((-1676516834)>>>(tmp = -1817478916.5658965, tmp))^(((tmp = 1122659711, tmp)>>>(tmp = -2190796437, tmp))|(tmp = -2754023244, tmp))));
+  assertEquals(-452352333, x &= x);
+  assertEquals(-863, x >>= x);
+  assertEquals(-3.777863669459606e-7, x /= (2284359827.424491));
+  assertEquals(-3.777863669459606e-7, x %= ((tmp = -2509759238, tmp)>>>x));
+  assertEquals(0, x <<= (-814314066.6614306));
+  assertEquals(0, x %= (tmp = 190720260, tmp));
+  assertEquals(2301702913, x += (2301702913));
+  assertEquals(-249158048, x >>= (tmp = -2392013853.302008, tmp));
+  assertEquals(-249158048, x >>= x);
+  assertEquals(-498316096, x += x);
+  assertEquals(-498316096, x %= (tmp = 2981330372.914731, tmp));
+  assertEquals(106616.2199211318, x *= (((((tmp = 1020104482.2766557, tmp)^((tmp = -416114189.96786, tmp)>>>(1844055704)))|(tmp = 1665418123, tmp))>>(1826111980.6564898))/(-2446724367)));
+  assertEquals(106616, x |= x);
+  assertEquals(1094927345, x -= (((-1229759420)|(741260479.7854375))-x));
+  assertEquals(8353, x >>= x);
+  assertEquals(0, x >>>= (tmp = -327942828, tmp));
+  assertEquals(-953397616.8888416, x += (tmp = -953397616.8888416, tmp));
+  assertEquals(-1906641240.7776833, x += (x+((-3033450184.9106326)>>>(tmp = 2090901325.5617187, tmp))));
+  assertEquals(-1906641240.7776833, x %= (tmp = 2584965124.3953505, tmp));
+  assertEquals(-1098907671, x |= (tmp = -1272590495, tmp));
+  assertEquals(-1.8305258600334393, x /= (600323489));
+  assertEquals(-1, x &= x);
+  assertEquals(-1, x |= ((x+x)-x));
+  assertEquals(1, x *= x);
+  assertEquals(867473898, x ^= (tmp = 867473899.0274491, tmp));
+  assertEquals(6, x >>>= (tmp = 1174763611.341228, tmp));
+  assertEquals(0, x >>= ((689882795)^(2250084531)));
+  assertEquals(0, x /= (tmp = 2545625607, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x += x);
+  assertEquals(0, x -= (x*(-1098372339.5157008)));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x -= (tmp = -1797344676.375759, tmp));
+  assertEquals(1121476698, x |= (tmp = 1121476698, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(1, x &= (-191233693));
+  assertEquals(330137888.92595553, x += (330137887.92595553));
+  assertEquals(-1792236714, x ^= (tmp = 2256609910, tmp));
+  assertEquals(269000724, x &= (316405813.62093115));
+  assertEquals(256, x >>= x);
+  assertEquals(256, x %= ((2556320341.54669)|(1066176021.2344948)));
+  assertEquals(256, x |= x);
+  assertEquals(131072, x <<= ((-1650561175.8467631)|x));
+  assertEquals(-286761951, x -= ((tmp = 287024095, tmp)-((-2293511421)&(x|x))));
+  assertEquals(-1561852927, x &= (3002663949.0989227));
+  assertEquals(-460778761, x %= (tmp = -550537083, tmp));
+  assertEquals(-3023749308.0492287, x += (tmp = -2562970547.0492287, tmp));
+  assertEquals(-481313332.04922867, x %= ((x|((tmp = -855929299, tmp)%((2181641323)%(x|(220607471.33018696)))))&x));
+  assertEquals(17510668, x &= (tmp = 363557663, tmp));
+  assertEquals(12552, x &= (3020225307));
+  assertEquals(1814655896, x |= ((x<<(((-1475967464)*(-3122830185))*x))+(x^(-2480340864.2661023))));
+  assertEquals(-3209124403525266400, x -= ((1146847590)*(tmp = 2798213497, tmp)));
+  assertEquals(-6418248807050533000, x += x);
+  assertEquals(1.1856589432073933e+28, x *= (-1847324681.313275));
+  assertEquals(-1238853292, x ^= (-1238853292));
+  assertEquals(-77428331, x >>= (x&((((2043976651.8514216)>>>x)^(x>>>(((tmp = -1785122464.9720652, tmp)%x)<<(1570073474.271266))))*x)));
+  assertEquals(2011, x >>>= x);
+  assertEquals(2011, x &= x);
+  assertEquals(0, x >>= (-2682377538));
+  assertEquals(-1.1367252770299785, x -= (((tmp = 2704334195.566802, tmp)/(2379056972))%((((-1764065164)*((((468315142.8822602)>>((x%(((tmp = 2537190513.506641, tmp)+((x&(x|((tmp = -947458639, tmp)^(2653736677.417406))))*((x<<((1243371170.1759553)>>>(((tmp = 1572208816, tmp)<<((tmp = 963855806.1090456, tmp)>>>x))%((-3078281718.7743487)*x))))^(-1154518374))))^(-2839738226.6314087)))^((-2865141241.190915)*(-2400659423.8207664))))>>((tmp = 32940590, tmp)/(tmp = 2917024064.570817, tmp)))+(((27601850)/(tmp = 3168834986, tmp))>>x)))+(tmp = 2528181032.600125, tmp))/(3162473952))));
+  assertEquals(-1697395408.7948515, x -= (1697395407.6581264));
+  assertEquals(1536992607912062500, x *= (tmp = -905500627.5781817, tmp));
+  assertEquals(102759872, x >>= (tmp = -707887133.4484048, tmp));
+  assertEquals(102759872, x %= (tmp = -1764067619.7913327, tmp));
+  assertEquals(12543, x >>>= (-144142995.1469829));
+  assertEquals(-2059555229.2592103, x += ((-2059555229.2592103)-x));
+  assertEquals(-537022593, x |= (tmp = -2770761410.407701, tmp));
+  assertEquals(23777505, x ^= (-560496738.6854918));
+  assertEquals(-64329014115772310, x *= ((tmp = -2729234369.198843, tmp)+x));
+  assertEquals(189083830, x ^= (tmp = 933619934, tmp));
+  assertEquals(189083830, x %= ((tmp = -2918083254, tmp)-(x|(x^(-2481479224.0329475)))));
+  assertEquals(378167660, x += x);
+  assertEquals(-0.45833387791900504, x /= ((tmp = 2727991875.241294, tmp)<<(tmp = 2570034571.9084663, tmp)));
+  assertEquals(0, x <<= x);
+  assertEquals(-0, x /= (tmp = -67528553.30662966, tmp));
+  assertEquals(0, x <<= (938440044.3983492));
+  assertEquals(-945479171, x ^= (tmp = -945479171, tmp));
+  assertEquals(-225632619284361200, x *= (238643670.00884593));
+  assertEquals(-0, x %= x);
+  assertEquals(-585826304, x ^= ((-1256265560)<<(tmp = 1144713549, tmp)));
+  assertEquals(-671583855, x ^= (183333265.1468178));
+  assertEquals(-484311040, x <<= x);
+  assertEquals(-3969762.62295082, x /= ((((tmp = -1164308668.931008, tmp)-x)%x)>>>(((397816647)>>(-1605343671.4070785))<<x)));
+  assertEquals(758097879, x ^= ((tmp = -2871307491, tmp)^(-2043176492.646442)));
+  assertEquals(0, x *= ((x>>(tmp = 1983292927, tmp))&(tmp = -860505131.4484091, tmp)));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x &= x);
+  assertEquals(0, x %= ((3132981707)-(-2832016477)));
+  assertEquals(0, x >>= (x<<((1830195133.0342631)>>>(tmp = -1003969250, tmp))));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x += (tmp = 273271019.87603223, tmp));
+  assertEquals(NaN, x += (625749326.1155348));
+  assertEquals(0, x >>= (tmp = -531039433.3702333, tmp));
+  assertEquals(0, x -= (((tmp = 2029464099, tmp)-(x-(tmp = -329058111.411458, tmp)))*(x<<x)));
+  assertEquals(-0, x *= ((-1112957170.5613296)|((tmp = 847344494, tmp)>>>(tmp = 2735119927, tmp))));
+  assertEquals(-0, x /= (tmp = 544636506, tmp));
+  assertEquals(0, x >>>= (x^(545093699)));
+  assertEquals(0, x %= (((tmp = -2208409647.5052004, tmp)+(3083455385.374988))+(((-482178732.7077277)*x)>>>((2661060565)*(-2125201239)))));
+  assertEquals(0, x >>>= (-212334007.34016395));
+  assertEquals(0.7004300865203454, x -= ((2032883941)/(-2902336693.0154715)));
+  assertEquals(0, x <<= (x<<((265868133.50175047)>>>(1162631094))));
+  assertEquals(604920272.4394834, x -= (-604920272.4394834));
+  assertEquals(604920272, x &= x);
+  assertEquals(0, x <<= (((-1961880051.1127694)%(tmp = 1715021796, tmp))|((tmp = 2474759639.4587016, tmp)|(243416152.55635))));
+  assertEquals(-46419074, x |= (((tmp = -518945938.5238774, tmp)%((x+(tmp = 242636408, tmp))+(-1974062910)))|(1546269242.0259726)));
+  assertEquals(-46419074, x += ((-629802130)*((tmp = -658144149, tmp)%((-905005358.5370393)>>>x))));
+  assertEquals(-46419074, x |= (x%(-1103652494)));
+  assertEquals(7892881050983985, x *= (-170035297.36469936));
+  assertEquals(1105701997.4273424, x %= ((((-490612260.0023911)>>>(tmp = 1803426906, tmp))^(x%(2725270344.2568116)))-(1010563167.8934317)));
+  assertEquals(1088619532, x &= (-2232199650));
+  assertEquals(1073807364, x &= (-888024506.5008001));
+  assertEquals(1153062254980628500, x *= x);
+  assertEquals(1153062255703627000, x -= (tmp = -722998613.897227, tmp));
+  assertEquals(-1141418584, x |= (3017232552.4814596));
+  assertEquals(-373464140, x ^= (-2914372068));
+  assertEquals(994050048, x <<= x);
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= (tmp = -3166402389, tmp));
+  assertEquals(0, x &= ((-1760842506.337213)|(tmp = 2538748127.795164, tmp)));
+  assertEquals(-0, x /= (-2635127769.808626));
+  assertEquals(0, x &= ((((tmp = 1414701581, tmp)^(((2425608769)/((x<<x)^(x-x)))^((tmp = -2641946468.737288, tmp)|(tmp = -313564549.1754241, tmp))))*(tmp = -2126027460, tmp))|(-2255015479)));
+  assertEquals(225482894, x ^= (225482894.8767246));
+  assertEquals(0, x ^= x);
+  assertEquals(306216231, x += (tmp = 306216231, tmp));
+  assertEquals(306216231, x -= ((-465875275.19848967)&((-806775661.4260025)/((((-184966089.49763203)>>>((x>>x)+((tmp = -1951107532, tmp)|x)))%x)*((2704859526.4047284)%((x*x)>>x))))));
+  assertEquals(30754, x &= (1706162402.033193));
+  assertEquals(30454.010307602264, x -= (((590456519)>>>(tmp = 2713582726.8181214, tmp))/x));
+  assertEquals(8419062, x |= ((2848886788)<<(tmp = 2993383029.402275, tmp)));
+  assertEquals(16, x >>= (tmp = -1651287021, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(-1407643485, x ^= (-1407643486));
+  assertEquals(2, x >>>= (-1126004674));
+  assertEquals(470812081, x ^= ((-2411718964)>>>x));
+  assertEquals(550443688.6407901, x += (tmp = 79631607.6407901, tmp));
+  assertEquals(3669092443.64079, x -= (-3118648755));
+  assertEquals(-625874853, x <<= (((tmp = -1640437346, tmp)/(((x*x)>>>x)<<x))/x));
+  assertEquals(-1431439050363516700, x *= (2287101077));
+  assertEquals(-1921660672, x |= ((((((((-1912249689.9978154)&(-1676922742.5343294))*(2625527768))<<((820676465)^(((x+(tmp = -852743692, tmp))&((x-((((1361714551)/(311531668))>>>(tmp = -1330495518.8175917, tmp))<<(((tmp = 1369938417.8760853, tmp)*(-1217947853.8942266))<<(-2048029668))))-(-513455284)))>>>(tmp = 1980267333.6201067, tmp))))<<(((1503464217.2901971)>>(tmp = 2258265389, tmp))>>>(1868451148)))&(x-(x^(tmp = -1565209787, tmp))))*x)<<(tmp = -2426550685, tmp)));
+  assertEquals(-1921660672, x %= (((tmp = 523950472.3315773, tmp)+(((2971865706)^x)-x))&(-1773969177)));
+  assertEquals(420176973.1169958, x += (2341837645.116996));
+  assertEquals(420176973, x >>>= (((tmp = -2485489141, tmp)<<((tmp = -2520928568.360244, tmp)+x))&(543950045.0932506)));
+  assertEquals(50, x ^= (x|((tmp = 2001660699.5898843, tmp)>>>(tmp = 1209151128, tmp))));
+  assertEquals(138212770720.96973, x *= (2764255414.4193945));
+  assertEquals(-28683, x |= (((-535647551)|x)>>((((2065261509)>>(-354214733))*x)+(-3218217378.2592907))));
+  assertEquals(1627048838, x ^= (tmp = -1627044749, tmp));
+  assertEquals(-839408795, x ^= (2903337187.480303));
+  assertEquals(-1000652427, x += (tmp = -161243632, tmp));
+  assertEquals(740237908.4196916, x += ((tmp = 1587000348, tmp)+(tmp = 153889987.41969144, tmp)));
+  assertEquals(Infinity, x /= (((((-615607376.1012697)&(57343184.023578644))+((-1967741575)|(-3082318496)))<<(((tmp = -958212971.99792, tmp)>>(tmp = 2962656321.3519197, tmp))-(x|(x*(969365195)))))<<(tmp = -1739470562.344624, tmp)));
+  assertEquals(-Infinity, x /= ((tmp = -1736849852, tmp)%x));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x %= (tmp = -226505646, tmp));
+  assertEquals(1982856549, x -= (((x+(-1982856549))%(-2274946222))>>(x%(((tmp = -1289577208.9097936, tmp)>>x)^(778147661)))));
+  assertEquals(1648018703, x ^= ((3085618856)+((tmp = 1546283467, tmp)&(((x|((-2376306530)*(((((((tmp = -2807616416, tmp)%(((((tmp = 347097983.1491085, tmp)<<x)|(((((1135380667)/(x>>>(tmp = 1679395106, tmp)))^((1277761947)<<((tmp = -1614841203.5244312, tmp)>>x)))%((tmp = 1552249234.2065845, tmp)>>>x))>>>(tmp = -1677859287, tmp)))>>>(2605907565))/(tmp = 2291657422.221277, tmp)))%(((tmp = 425501732.6666014, tmp)>>>(1327403879.455553))+x))>>((tmp = -3075752653.2474413, tmp)&(x-(tmp = -71834630, tmp))))|((((2532199449.6500597)*(-842197612.4577162))%x)>>x))*(((1220047194.5100307)<<((tmp = 1642962251, tmp)<<((-662340)>>>((tmp = -1672316631.3251066, tmp)<<((tmp = 1762690952.542441, tmp)-(x/(1904755683.3277364)))))))>>x))|(((((tmp = 1625817700.7052522, tmp)%(tmp = -2990984460, tmp))|(2395645662))-((2619930607.550086)>>x))^(tmp = 130618712, tmp)))))&((-3142462204.4628367)/(1078126534.8819227)))%(((tmp = -256343715.2267704, tmp)+x)^(tmp = 2009243755, tmp))))));
+  assertEquals(1937698223, x |= (((tmp = 866354374.7435778, tmp)+(tmp = 2751925259.3264275, tmp))%(-2252220455)));
+  assertEquals(0, x -= x);
+  assertEquals(-823946290.6515498, x -= (tmp = 823946290.6515498, tmp));
+  assertEquals(706970324, x ^= (-457174758));
+  assertEquals(32916, x &= (25740724));
+  assertEquals(0, x >>>= ((-1658933418.6445677)|(tmp = -846929510.4794133, tmp)));
+  assertEquals(0, x ^= ((-834208600)/((-1256752740)&(tmp = 1973248337.8973258, tmp))));
+  assertEquals(-1639195806, x += (-1639195806));
+  assertEquals(-1559416478, x ^= ((tmp = 1349893449.0193534, tmp)*(tmp = 2044785568.1713037, tmp)));
+  assertEquals(0, x &= ((x>>(tmp = 1720833612, tmp))/((x+(-1305879952.5854573))^x)));
+  assertEquals(-0, x *= (tmp = -1713182743, tmp));
+  assertEquals(0, x >>= x);
+  assertEquals(NaN, x /= (((x%((x>>>(((-1515761763.5499895)^(-3076528507.626539))<<(tmp = 1293944457.8983147, tmp)))<<(tmp = 276867491.8483894, tmp)))>>(tmp = -2831726496.6887417, tmp))%((((tmp = 1780632637.3666987, tmp)^x)%((208921173.18897665)>>(tmp = 633138136, tmp)))+x)));
+  assertEquals(0, x >>= (tmp = -2755513767.0561147, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(840992300.0324914, x -= ((-840992300.0324914)+x));
+  assertEquals(840992300, x &= x);
+  assertEquals(-1094140277, x ^= (2364029095));
+  assertEquals(-Infinity, x /= ((((((1257084956)<<(2009241695))>>(x+x))*x)>>>x)>>>(205318919.85870552)));
+  assertEquals(-Infinity, x -= (((x>>>(tmp = 3037168809.20163, tmp))&x)*(x&(((806151109)*x)-(tmp = -1741679480.58333, tmp)))));
+  assertEquals(400659949, x ^= (tmp = 400659949, tmp));
+  assertEquals(5, x >>= (tmp = 1175519290, tmp));
+  assertEquals(5, x |= x);
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>= ((1317772443)&(x<<x)));
+  assertEquals(-1123981819, x ^= (tmp = 3170985477, tmp));
+  assertEquals(1123864651, x ^= ((x%(((x&x)&(-2606227299.7590737))<<((tmp = -2018123078.1859496, tmp)*x)))|(x+(((((1935939774.8139446)/((-1303958190)/(2802816697.32639)))<<((2880056582)*x))+x)+x))));
+  assertEquals(1543368927, x |= (-2795691884));
+  assertEquals(NaN, x /= (x%((tmp = -1129915114, tmp)<<x)));
+  assertEquals(NaN, x += (tmp = -3045743135, tmp));
+  assertEquals(NaN, x -= (tmp = -2849555731.8207827, tmp));
+  assertEquals(NaN, x /= (((((2127485827)>>>((((tmp = 363239924, tmp)>>x)|((((tmp = -1419142286.0523334, tmp)-(x<<x))^(tmp = -1990365089.8283136, tmp))*((tmp = 2780242444.0739098, tmp)>>>(((-2336511023.342298)&x)/(tmp = 2296926221.402897, tmp)))))>>((tmp = 1378982475.6839466, tmp)>>(tmp = -816522530, tmp))))&(x^(tmp = -1668642255.0586753, tmp)))%(((tmp = 921249300.1500335, tmp)^x)*(tmp = -2228816905, tmp)))>>x));
+  assertEquals(-1460685191, x |= (tmp = 2834282105, tmp));
+  assertEquals(-1463439264, x &= (tmp = 2881860064.146755, tmp));
+  assertEquals(20.98100714963762, x /= (((3017150580.7875347)^((250499372.5339837)<<(tmp = -42767556.30788112, tmp)))|(x%(-2829281526))));
+  assertEquals(1, x /= x);
+  assertEquals(2, x += x);
+  assertEquals(8, x <<= x);
+  assertEquals(0, x >>>= ((730174750)>>>x));
+  assertEquals(0, x ^= x);
+  assertEquals(-1459637373, x ^= (2835329923.456409));
+  assertEquals(-1233115861, x ^= (511678120));
+  assertEquals(95682857, x >>>= ((tmp = 1534570885, tmp)|(tmp = -414425499.3786578, tmp)));
+  assertEquals(70254633, x &= (-1502067585));
+  assertEquals(51384749748909710, x *= (tmp = 731407276, tmp));
+  assertEquals(9390482.873469353, x %= (tmp = -592576964.7982686, tmp));
+  assertEquals(4695241, x >>>= (tmp = -1879898431.5395758, tmp));
+  assertEquals(-3129811912538149000, x += (((-727481809)^((3106908604)%x))*((((tmp = -1218123690, tmp)^(x>>((-942923806)^x)))/(x+x))>>>(-1508881888.969373))));
+  assertEquals(1596870236, x ^= (-1135673764.9721224));
+  assertEquals(0, x ^= x);
+  assertEquals(2133782410, x |= (((-2202469371)>>((tmp = 1327588406.183342, tmp)/(tmp = 253581265.7246865, tmp)))-((tmp = 2226575446.838795, tmp)^x)));
+  assertEquals(-81895217.83608055, x -= (tmp = 2215677627.8360806, tmp));
+  assertEquals(812089344, x <<= ((tmp = 882824005, tmp)/(((x>>((((((((tmp = 1211145185, tmp)/((-137817273)-(((tmp = 2165480503.1144185, tmp)-(-1840859887.1288517))*((155886014.8393339)>>((-1984526598)<<(tmp = 1331249058.3246582, tmp))))))>>(x*x))%(2830324652))%(933701061))|(1346496215))^(tmp = -988800810, tmp))+x))>>>x)<<(-2372088384))));
+  assertEquals(812089344, x <<= x);
+  assertEquals(8472, x %= ((((x|(((x%(tmp = 2772099481.664402, tmp))+(2894690616))-x))&(x&(((-715790638.6454093)>>(tmp = -1447931029, tmp))-(tmp = 1761027889, tmp))))^x)%(((tmp = 830969811, tmp)|x)|((-1102267929)-(3193018687)))));
+  assertEquals(-0.0000028559857417864914, x /= (-2966401364));
+  assertEquals(0, x >>= x);
+  assertEquals(-701800392, x += (tmp = -701800392, tmp));
+  assertEquals(2034756873, x -= (tmp = -2736557265, tmp));
+  assertEquals(-0.9475075048394501, x /= (((((82879340.27231383)+((tmp = -2876678920.653639, tmp)*(-2801097850)))<<x)>>>((x<<(((((x|x)&(tmp = -1572694766, tmp))>>(x+(x/((x-(((tmp = 1435301275, tmp)|(tmp = 983577854.212041, tmp))>>(tmp = 632633852.1644179, tmp)))+x))))>>>x)|(-850932021)))>>x))<<(-821983991)));
+  assertEquals(0, x >>= (x>>(2424003553.0883207)));
+  assertEquals(2599386349, x -= (-2599386349));
+  assertEquals(-68157441, x |= (((tmp = -1170343454.9327996, tmp)+((((tmp = 448468098, tmp)|(x>>(x>>(((x>>(((x/(x&(x<<x)))<<(2436876051.2588806))^(3010167261)))%((tmp = 2577616315.7538686, tmp)>>>(-2953152591.015912)))%((tmp = -1304628613, tmp)/(x&((x|((-2000952119)%((691146914)/((tmp = 1480966978.7766845, tmp)<<((tmp = 2644449477.392441, tmp)|(-2143869305.871568))))))+(tmp = -315254308, tmp))))))))&(-2060205555))|((-604140518.8186448)^(x*x))))%(x*((tmp = 1383244000.2807684, tmp)/(3195793656)))));
+  assertEquals(-68157441, x |= x);
+  assertEquals(-1, x >>= x);
+  assertEquals(-2147483648, x <<= x);
+  assertEquals(-1.5257198286933313, x /= (tmp = 1407521622, tmp));
+  assertEquals(1149084989.47428, x += (((tmp = 1149084991.9004865, tmp)&x)^((((((2797053000)/(x^x))*(-2829253694))>>>((tmp = -610924351, tmp)>>x))>>>(tmp = -675681012, tmp))<<(2812852729))));
+  assertEquals(0, x %= x);
+  assertEquals(0, x <<= ((tmp = -584069073, tmp)*(-2953140326)));
+  assertEquals(0, x <<= (tmp = -481515023.6404002, tmp));
+  assertEquals(-1441535370, x ^= (2853431926));
+  assertEquals(2853431926, x >>>= (((((((tmp = 2215663525.9620194, tmp)%((-1102832735.9274108)/x))>>x)&(3220898702.76322))&(((2077584946)*((x>>x)<<((tmp = 1845701049, tmp)-x)))/(tmp = 1947184202.5737212, tmp)))|(((tmp = 2976351488, tmp)^(-42517339))%((2648230244.410125)^(1520051731.31089))))/(1761635964)));
+  assertEquals(43539, x >>>= (tmp = 1361671184.7432632, tmp));
+  assertEquals(21769, x >>= ((tmp = -804932298.9572575, tmp)>>((((tmp = 1749006993.253409, tmp)+(276536978))^x)|(2698166994))));
+  assertEquals(1103025563, x |= (tmp = 1103007891, tmp));
+  assertEquals(1327594607, x += (tmp = 224569044, tmp));
+  assertEquals(1327594607, x |= x);
+  assertEquals(-478674944, x <<= (((672378508)&x)^(((-2070209708.6470091)|x)|(x>>>x))));
+  assertEquals(-478674943, x ^= ((-1832457698.6345716)>>>((tmp = -3077714019, tmp)/(1809383028))));
+  assertEquals(229129701056053250, x *= x);
+  assertEquals(1, x /= x);
+  assertEquals(2, x <<= (-1522529727));
+  assertEquals(2, x &= x);
+  assertEquals(-2016989182, x |= ((((tmp = -1267845511, tmp)*(1225350332))+((tmp = -1397690831.5717893, tmp)>>>(tmp = -2575382994, tmp)))+x));
+  assertEquals(-241, x >>= (tmp = 931869591, tmp));
+  assertEquals(-1048087547, x &= (tmp = -1048087403.1163051, tmp));
+  assertEquals(-4004486369.844599, x += (tmp = -2956398822.844599, tmp));
+  assertEquals(-4004486368.844599, x -= (((2701878498)>>x)|(x|(-1079354967))));
+  assertEquals(1, x >>= (tmp = -1583689092, tmp));
+  assertEquals(1, x *= (x>>(x%x)));
+  assertEquals(0, x %= x);
+  assertEquals(-0, x *= (-120818969));
+  assertEquals(0, x >>= ((tmp = 1794099660, tmp)/(((x&(((-321906091)^(tmp = -3009885933.8449526, tmp))&((tmp = -140917780, tmp)|(2037803173.4075825))))&x)&(tmp = -745357154, tmp))));
+  assertEquals(0, x <<= (563984257.3493614));
+  assertEquals(NaN, x %= ((((x>>(tmp = -2190891392.320677, tmp))-x)<<(462714956))<<((tmp = -84413570, tmp)|((x|(-2787022855))-((tmp = 2028532622, tmp)|(tmp = 1103757073.9178817, tmp))))));
+  assertEquals(NaN, x *= ((2137674085.3142445)|((tmp = -1054749859.2353804, tmp)%x)));
+  assertEquals(NaN, x /= (x>>>(((((tmp = 597103360.9069608, tmp)>>>(-2850217714.1866236))-((tmp = 1125150527, tmp)*x))%(tmp = -982662312, tmp))|((x/(((968656808.6069037)*(((128484784.15362918)>>x)^x))&((((x/((((tmp = 748775979, tmp)*((x-(((tmp = 709571811.9883962, tmp)%(-2083567026))%(x/(tmp = -680467505, tmp))))/((tmp = -167543858, tmp)/(tmp = -3113588783, tmp))))/x)<<(-2605415230)))>>>(tmp = 3133054172, tmp))%(tmp = -1904650393, tmp))*((x|(-1193709562))*(tmp = -1731312795.718104, tmp)))))/((tmp = -672386301, tmp)/(tmp = 808898833.4163612, tmp))))));
+  assertEquals(-9, x |= (((((tmp = 150377964.57195818, tmp)/(tmp = 2161910879.0514045, tmp))-(-2381625849))>>(-2715928517))/(((452113643)^(-2502232011))/((-3076471740)^(((tmp = 1664851172, tmp)*(((-1460011714)>>>x)<<((-2870606437)%x)))*((tmp = -2836565755.609597, tmp)-((x/(tmp = -871461415, tmp))-(2278867564))))))));
+  assertEquals(-1, x >>= x);
+  assertEquals(-1, x |= ((-1319927272)>>>(-2866709980)));
+  assertEquals(-1, x >>= ((2345179803.155703)&(-978025218.2243443)));
+  assertEquals(1, x /= x);
+  assertEquals(-260730973, x |= (tmp = -260730973, tmp));
+  assertEquals(1174405120, x <<= (2681054073));
+  assertEquals(1174405120, x &= x);
+  assertEquals(1073741824, x &= (tmp = 2017166572.7622075, tmp));
+  assertEquals(1073741824, x |= x);
+  assertEquals(168806102, x %= ((((tmp = -2939969193.950067, tmp)|((-2325174027.614815)/(-2329212715)))*(x/(((((-2927776738)/(x|x))+(x%(tmp = -3007347037.698492, tmp)))<<(-1898633380))>>(tmp = 204338085.45241892, tmp))))^x));
+  assertEquals(168806102, x %= ((-832849739.5197744)&(tmp = -141908598, tmp)));
+  assertEquals(-401033205.05225074, x -= (tmp = 569839307.0522507, tmp));
+  assertEquals(-401033205, x &= x);
+  assertEquals(-401130402, x ^= ((x*(tmp = 311418759.22436893, tmp))>>x));
+  assertEquals(793533469, x ^= (-950312893.5201888));
+  assertEquals(756, x >>>= (-1096189516));
+  assertEquals(711, x += ((tmp = -753105189, tmp)>>(599823192.5381484)));
+  assertEquals(0, x >>>= ((tmp = -2859668634.4641137, tmp)+(-1160392986.1521513)));
+  assertEquals(2427599726.176195, x -= (-2427599726.176195));
+  assertEquals(1942312465.2523103, x -= (485287260.92388475));
+  assertEquals(0, x >>>= ((tmp = -1740656456, tmp)/(tmp = 1339746799.9335847, tmp)));
+  assertEquals(0, x <<= ((-7017077.38786912)*((-699490904.4551768)^x)));
+  assertEquals(0, x <<= (tmp = 715662384, tmp));
+  assertEquals(0, x *= (x>>>(2149735450.0758677)));
+  assertEquals(NaN, x /= x);
+  assertEquals(0, x >>= ((397078885)*((851639692.8982519)-x)));
+  assertEquals(0, x &= (-2526654445));
+  assertEquals(0, x %= (-1204924598));
+  assertEquals(251639720, x ^= (x|(tmp = 251639720, tmp)));
+  assertEquals(695433573, x ^= (663539405));
+  assertEquals(-1038050104, x -= (1733483677));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x &= (392107269));
+  assertEquals(0, x %= (-3084908458.241551));
+  assertEquals(0, x ^= x);
+  assertEquals(-2121660509, x ^= (tmp = -2121660509.7861986, tmp));
+  assertEquals(2285041855588855800, x *= (x|(3209046634)));
+  assertEquals(54915072, x >>>= (x%(((((x%((((tmp = -1429433339.5078833, tmp)|(tmp = 2906845137, tmp))^(3207260333))&(-848438650)))-(-2721099735))&(141851917.19978714))+x)/x)));
+  assertEquals(54915072, x &= x);
+  assertEquals(54915072, x %= (x+(1855489160)));
+  assertEquals(70078753, x ^= ((((((-1648661736)+(x%((-1421237596)+(tmp = 2053180992.3857927, tmp))))+(tmp = 38606889, tmp))<<((-241334284)%((x>>(215316122))*(tmp = 396488307, tmp))))+((tmp = -2900704565, tmp)^x))^(((1103481003.1111188)^x)-(tmp = 1304113534, tmp))));
+  assertEquals(1149501440, x <<= ((x>>(tmp = 3203172843, tmp))*(tmp = -192535531, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= ((tmp = 2751499787, tmp)&((tmp = 2217654798, tmp)*(tmp = -2798728014, tmp))));
+  assertEquals(NaN, x /= ((((-2019592425)>>>((((-1571930240.741224)>>>((-183952981)/((((1990518443.672842)>>(((((2051371284)%(685322833.6793983))>>>(2662885938))<<(-1212029669.6675105))|((-2790877875)<<(1546643473))))<<x)-(tmp = 804296674.4579233, tmp))))-(tmp = -417759051.68770766, tmp))/((-621859758)>>>x)))&x)<<(tmp = -48558935.55320549, tmp)));
+  assertEquals(0, x <<= (x&x));
+  assertEquals(0, x *= (x%(tmp = 301196068, tmp)));
+  assertEquals(398290944, x |= (((tmp = 1904146839, tmp)+(1521017178))*(-3174245888.562067)));
+  assertEquals(1256401076, x ^= (1566464180));
+  assertEquals(149620758, x %= ((tmp = 532626355, tmp)^(tmp = -382971203, tmp)));
+  assertEquals(149620791, x |= (x>>x));
+  assertEquals(-0.07034576194938641, x /= ((tmp = -1977313182.7573922, tmp)-x));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x &= x);
+  assertEquals(0, x /= ((2182424851.139966)%(((-2768516150)+x)>>>x)));
+  assertEquals(0, x %= (-504299638.53962016));
+  assertEquals(-0, x *= (-2915134629.6909094));
+  assertEquals(0, x <<= ((tmp = 952692723.402582, tmp)%(2146335996.785011)));
+  assertEquals(230457472, x |= ((tmp = -574776101.8681948, tmp)*(683185125)));
+  assertEquals(933795934, x ^= (tmp = 974395614, tmp));
+  assertEquals(933801974, x ^= (x>>>((-148683729)*(((tmp = 2912596991.415531, tmp)^(-2883672328))/x))));
+  assertEquals(222, x >>= (-3060224682));
+  assertEquals(27, x >>>= (1429156099.1338701));
+  assertEquals(754519106, x ^= (tmp = 754519129.7281355, tmp));
+  assertEquals(188629776, x >>>= ((x>>>((1247267193)<<(tmp = -936228622, tmp)))%((tmp = 978604324.8236886, tmp)*((tmp = -3018953108, tmp)^(((tmp = 259650195, tmp)>>>(tmp = 2762928902.7901163, tmp))*(x>>((tmp = 787444263.5542864, tmp)/(x>>>(((-2039193776)<<(tmp = -1408159169, tmp))-(1238893783))))))))));
+  assertEquals(188629775.33987066, x += ((tmp = 1040520414, tmp)/((-1576237184)|((tmp = -970083705, tmp)&(((tmp = -312062761.12228274, tmp)|(1171754278.2968853))<<(-2069846597.7723892))))));
+  assertEquals(1473670, x >>>= ((tmp = 202409672, tmp)^x));
+  assertEquals(2171703268900, x *= (x>>(((tmp = 840468550, tmp)&(-3208057101.2136793))/x)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x ^= (x&((tmp = 2569871408.2405066, tmp)|((tmp = -3149374622, tmp)<<(x-(x|((tmp = -821239139.1626894, tmp)>>>x)))))));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x %= (tmp = 1926106354, tmp));
+  assertEquals(0, x >>= ((x/(-2848416))/(tmp = 2484293767, tmp)));
+  assertEquals(0, x <<= ((tmp = -2484137114, tmp)>>>(tmp = -887083772.8318355, tmp)));
+  assertEquals(0, x >>= (tmp = -2651389432, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(1041871201, x += ((tmp = 1041871201.9272791, tmp)|(x<<(-1136959830))));
+  assertEquals(651390879501530900, x *= ((tmp = 1250424964.0346212, tmp)>>x));
+  assertEquals(1965815296.245636, x %= ((2650603245.655831)+((-1610821947.8640454)>>>(((878987151.6917406)*((((784630543)%(((1448720244)>>(((tmp = 3036767847, tmp)+((tmp = 1012548422, tmp)<<(1957000200)))-x))/(x>>x)))<<((tmp = 914710268, tmp)*(((x^(1559603121))<<(tmp = 3181816736, tmp))|((-1964115655)+x))))-(-1055603890)))&(946797797.0616649)))));
+  assertEquals(1965815296.245636, x %= (tmp = -2601038357.593118, tmp));
+  assertEquals(-769384440.872302, x += (-2735199737.117938));
+  assertEquals(-769384440.872302, x %= (2193123162));
+  assertEquals(1, x /= x);
+  assertEquals(1, x -= (((x>>>(-1968465925))*((tmp = 563037904, tmp)>>((tmp = 3009534415.769578, tmp)>>((-2567240601.7038674)<<(tmp = -1258402723.4150183, tmp)))))%(3112239470.276867)));
+  assertEquals(1, x |= x);
+  assertEquals(1505461527, x ^= (tmp = 1505461526.5858076, tmp));
+  assertEquals(406553877, x &= (tmp = 2558242293, tmp));
+  assertEquals(406553877, x |= x);
+  assertEquals(-574902339, x |= ((-709809495)%(tmp = -2880884811.410611, tmp)));
+  assertEquals(-20281777.349363208, x %= (22184822.46602547));
+  assertEquals(1, x /= x);
+  assertEquals(-4360732, x ^= ((x|(tmp = 3178620274, tmp))>>(((2686286888)&(((-1107223053.8716578)/(((-2955575332.3675404)+(-2770518721))|(-2705016953.640522)))-x))^((1473641110.4633303)*((((-1466496401)<<x)+x)%(1805868749.082736))))));
+  assertEquals(-1158545408, x <<= ((((x/((-2710098221.691819)-(-2421462965.788145)))/(((((x>>>(tmp = 1994541591.1032422, tmp))+(tmp = -1276676679.9747126, tmp))&((tmp = 1764029634.2493339, tmp)+((x|(tmp = -3050446156, tmp))-((tmp = -9441859, tmp)/(((-2072420232)&x)*(-1003199889))))))+(tmp = -2443230628, tmp))*x))*((x&((((x|(747566933))*(((2039741506)>>>((tmp = -2456000554, tmp)>>>(-1566360933.7788877)))^((tmp = 960600745, tmp)/x)))&(x^(((-2649310348.777452)^((2224282875)-(tmp = -2129141087.3182096, tmp)))<<((x<<x)+((-1307892509.3874407)-(x|(tmp = -2831643528.9720087, tmp)))))))/(((tmp = -35502946, tmp)<<((tmp = 1091279222, tmp)>>(((-2686069468.8930416)-x)+(tmp = 367442353.2904701, tmp))))%(1218262628))))/x))^(-919079153.7857773)));
+  assertEquals(747, x >>>= (1229157974));
+  assertEquals(747, x |= x);
+  assertEquals(NaN, x %= (((3086718766.4715977)*((7912648.497568846)*((-2713828337.1659327)*(-176492425.4011252))))<<(tmp = -1074475173, tmp)));
+  assertEquals(0, x >>>= ((((444923201)<<x)>>>(-883391420.2142565))*((((617245412)<<x)>>>x)*(-913086143.2793813))));
+  assertEquals(1941802406, x ^= (tmp = -2353164890, tmp));
+  assertEquals(14, x >>>= (-1600311077.4571416));
+  assertEquals(-18229482703.7246, x += (((x+(-993157139.7880647))%x)*(1862419512.1781366)));
+  assertEquals(-14.531388114858734, x /= ((tmp = -1649072797.951641, tmp)<<x));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= ((x/x)^x));
+  assertEquals(2, x ^= ((-1597416259)/(-738770020)));
+  assertEquals(0, x >>= (tmp = -387850072.74833393, tmp));
+  assertEquals(0, x >>>= ((2491085477.186817)>>(x*(((tmp = -1592498533, tmp)+(tmp = 2086841852, tmp))&(-3174019330.8288536)))));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x >>>= (tmp = -3045348659.45243, tmp));
+  assertEquals(-1208573479, x |= ((3086393817)-x));
+  assertEquals(1460649854142163500, x *= x);
+  assertEquals(1588199424, x <<= (-1902076952));
+  assertEquals(1586102272, x &= (tmp = 2139876091.9142454, tmp));
+  assertEquals(-460908552.5528109, x -= (tmp = 2047010824.552811, tmp));
+  assertEquals(-460908552.5528109, x %= (tmp = 507904117.09368753, tmp));
+  assertEquals(-460908552.5528109, x %= (2749577642.527038));
+  assertEquals(234012, x >>>= (-340465746.91275));
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x %= (tmp = -2601875531, tmp));
+  assertEquals(0, x %= (x|(tmp = 650979981.1158671, tmp)));
+  assertEquals(0, x %= (tmp = -2286020987, tmp));
+  assertEquals(0, x |= x);
+  assertEquals(0, x &= (x|((tmp = 2568101411, tmp)-(-1438002403))));
+  assertEquals(0, x >>>= (1399248574));
+  assertEquals(0, x %= (-1906670287.2043698));
+  assertEquals(0, x >>= (1019286379.6962404));
+  assertEquals(0, x |= (x/(tmp = -82583591.62643051, tmp)));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x *= (x^(1874776436)));
+  assertEquals(NaN, x -= ((-1238826797)-(-2971588236.7228813)));
+  assertEquals(0, x <<= (2064632559));
+  assertEquals(-0.5967273958864694, x += (((tmp = 1502995019, tmp)>>x)/(-2518729707)));
+  assertEquals(0, x >>>= x);
+  assertEquals(-0, x /= (-1923030890));
+  assertEquals(NaN, x %= x);
+  assertEquals(0, x >>= (tmp = 1081732779.9449487, tmp));
+  assertEquals(-820183066, x |= ((tmp = -3169007292.4721155, tmp)|(-1912588318)));
+  assertEquals(0, x -= x);
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x /= (tmp = 287181840, tmp));
+  assertEquals(0, x &= (x/((tmp = -1139766051, tmp)<<(x&(tmp = 2779004578, tmp)))));
+  assertEquals(0, x >>= (((tmp = -1816938028, tmp)+(-224851993.3139863))*(-2933829524)));
+  assertEquals(0, x |= ((((tmp = 305077929.1808746, tmp)&((x-(((((tmp = 2122810346.7475111, tmp)<<(717271979))*(tmp = 256854043.72633624, tmp))%((x+(tmp = -318657223.9992106, tmp))*((1993144830)<<(2594890698.603228))))^((((tmp = 257370667, tmp)>>>((((x^(3160746820))>>>(2049640466.8116226))>>>(2543930504.7117066))^(x-x)))^(x%(964838975)))^x)))%(x*x)))>>>x)*(tmp = -46861540, tmp)));
+  assertEquals(747575633, x ^= ((-2406502427)-(-3154078060.3794584)));
+  assertEquals(0, x *= (x%x));
+  assertEquals(0, x <<= (1313773705.3087234));
+  assertEquals(0, x >>>= ((x+x)>>>(3068164056)));
+  assertEquals(-0, x *= (tmp = -1771797797, tmp));
+  assertEquals(1784146970, x ^= (tmp = 1784146970, tmp));
+  assertEquals(1784146970, x >>>= (tmp = -2219972320.7195597, tmp));
+  assertEquals(1744830464, x <<= ((((-2769476584)-(((1798431604)>>(tmp = 1337687914.799577, tmp))>>>((-2802941943.15014)>>x)))>>>(tmp = 646033678, tmp))-x));
+  assertEquals(3044433348102455300, x *= x);
+  assertEquals(0, x >>= ((tmp = 1592076570.1900845, tmp)-((645774223.6317859)>>x)));
+  assertEquals(0, x >>= (x>>>(-3045822290.1536255)));
+  assertEquals(-0, x *= (tmp = -2450298800.986624, tmp));
+  assertEquals(0, x >>= (tmp = 1379605393, tmp));
+  assertEquals(0, x &= (((x-((((tmp = 837939461.6683749, tmp)+((((-813261853.3247359)|(x&(((-2565113940)*(tmp = -2725085381.240134, tmp))|x)))%(-1457259320))-(x+((tmp = -273947066, tmp)%((1164825698.879649)>>(1653138880.3434052))))))>>>(2823967606.411492))>>>((((((((1189235604.9646997)/(tmp = -2875620103.4002438, tmp))-(tmp = -801261493, tmp))<<(((1832556579.5095325)<<x)|((tmp = -2740330665, tmp)>>(tmp = -2352814025, tmp))))-(tmp = -1445043552.99499, tmp))&(x<<(((((445325471)*(1293047043.1808558))>>>(((1901837408.5910044)-(tmp = -2349093446.5313253, tmp))>>>(tmp = 1000847053.1861948, tmp)))*(x>>>(1771853406.6567078)))>>x)))>>>x)>>>(x^((tmp = 2813422715, tmp)-(x+(-342599947)))))))&(x>>>x))*x));
+  assertEquals(NaN, x %= ((tmp = -3027713526, tmp)-((((x%(((((x/((2711155710)^(((((x>>>x)%((1098599291.155015)^(((((tmp = 1855724377.8987885, tmp)/(x|x))*((-1963179786)*((x-((-1634717702)%x))<<x)))>>(2008859507))>>((tmp = 2635024299.7983694, tmp)^(tmp = -602049246, tmp)))))*(x>>x))&(tmp = -1925103609, tmp))*((tmp = 2106913531.2828505, tmp)%((tmp = -200970069, tmp)*(-2809001910.951446))))))%x)*((1990098169)>>((x<<(2303347904.2601404))%x)))|(2767962065.9846206))+(201589933.301661)))>>(((tmp = 1921071149.5140274, tmp)>>(1054558799.1731887))|x))*(x/((((-2833879637.345674)>>>(tmp = 2849099601, tmp))%x)+(x%(x%(((tmp = 1983018049, tmp)^(tmp = -2659637454, tmp))>>((-1335497229.6945198)-(x+(((((tmp = 1136612609.848967, tmp)%(2471741030.01762))<<(x|(((tmp = 1644081190.1972675, tmp)&(-1422527338))^(2379264356.265957))))/(tmp = 2979299484.1884174, tmp))/x)))))))))*((tmp = 1858298882, tmp)^((tmp = -547417134.9651439, tmp)*x)))));
+  assertEquals(-7664, x |= ((2286000258.825538)>>(1716389170)));
+  assertEquals(-1, x >>= x);
+  assertEquals(-1231640486.3023372, x += ((tmp = 1231640485.3023372, tmp)*x));
+  assertEquals(-2463280972.6046743, x += x);
+  assertEquals(1746, x >>>= x);
+  assertEquals(1746, x >>>= (((tmp = -562546488.0669937, tmp)*((-2475357745.8508205)&((x%(821425388.8633704))%((((-2315481592.687686)&(((tmp = 3130530521.7453523, tmp)+x)-x))^(-973033390.1773088))/x))))<<x));
+  assertEquals(1746, x %= (-1544973951.076033));
+  assertEquals(27936, x <<= (-525441532.33816123));
+  assertEquals(27936, x %= (x*((tmp = 344991423.5336287, tmp)+(-2267207281))));
+  assertEquals(27, x >>>= (tmp = 1249792906, tmp));
+  assertEquals(0, x >>>= (tmp = -1068989615, tmp));
+  assertEquals(0, x >>>= (tmp = 347969658.92579734, tmp));
+  assertEquals(-2656611892, x -= (2656611892));
+  assertEquals(1944539596, x |= (((tmp = 3000889963, tmp)-x)<<((tmp = 2917390580.5323124, tmp)^(-996041439))));
+  assertEquals(1944539596, x |= x);
+  assertEquals(-739740167.0752468, x -= ((1712009965.0752468)+(x>>((tmp = -740611560.99014, tmp)>>>((tmp = -1033267419.6253037, tmp)&(862184116.3583733))))));
+  assertEquals(-1479480334.1504936, x += x);
+  assertEquals(-4294967296.150494, x -= (x>>>((1219235492.3661718)&(3138970355.0665245))));
+  assertEquals(0, x >>= (x*x));
+  assertEquals(-0, x *= ((-2202530054.6558375)-(-676578695)));
+  assertEquals(-0, x %= (1336025846));
+  assertEquals(0, x &= x);
+  assertEquals(0, x /= (1759366510));
+  assertEquals(630007622, x |= (630007622));
+  assertEquals(-0.22460286863455903, x /= (tmp = -2804984753, tmp));
+  assertEquals(1102410276.775397, x -= (-1102410277));
+  assertEquals(1102410276.775397, x %= ((((-2569525203)&x)*(x|(-1932675298)))/((-2376634450)>>>(x>>>(tmp = 936937604.9491489, tmp)))));
+  assertEquals(33642, x >>= (3028252527));
+  assertEquals(2181106522.688034, x -= (-2181072880.688034));
+  assertEquals(-2113861630, x &= (2523921542));
+  assertEquals(-2147483646, x &= (-1996601566.9370148));
+  assertEquals(-2147483648, x &= (tmp = -665669175.1968856, tmp));
+  assertEquals(-2858673260.1367273, x -= (tmp = 711189612.1367272, tmp));
+  assertEquals(350657, x >>= (tmp = -170243892.25474262, tmp));
+  assertEquals(-0.0001405571562140975, x /= (-2494764474.7868776));
+  assertEquals(0, x ^= x);
+  assertEquals(NaN, x /= ((x&(-2041236879))*((tmp = -2182530229, tmp)^((1274197078)*x))));
+  assertEquals(0, x |= (x&(x-(1794950303))));
+  assertEquals(1222105379, x |= (tmp = 1222105379, tmp));
+  assertEquals(729884484, x ^= (tmp = 1666645607.6907792, tmp));
+  assertEquals(729884484, x %= (tmp = -2896922082, tmp));
+  assertEquals(8768, x &= ((tmp = 358940932, tmp)>>>(3159687631.3308897)));
+  assertEquals(1892384495, x |= (-2402591569));
+  assertEquals(1892470533, x += ((((x^(-2266612043))>>>(tmp = -531009952, tmp))<<(x>>>((-1365315963.5698428)>>>((x+((-3168207800.184341)-(tmp = 1776222157.609917, tmp)))+(-1588857469.3596382)))))>>>x));
+  assertEquals(143587205, x += (tmp = -1748883328, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= (tmp = 2334880462.3195543, tmp));
+  assertEquals(0, x &= ((tmp = 1819359625.4396145, tmp)|(tmp = -1323513565, tmp)));
+  assertEquals(-1102259874, x ^= (3192707422));
+  assertEquals(2567457772588852700, x *= (-2329267202));
+  assertEquals(-16783687, x |= ((-2212476227.060922)^(378973700.78452563)));
+  assertEquals(4278183609, x >>>= ((((((((tmp = 1766363150.197206, tmp)*(-2774552871))%x)>>>((3071429820)&((((((tmp = 351068445.27642524, tmp)<<(tmp = 2646575765, tmp))^(806452682))<<((x>>>(-2217968415.505327))<<(1564726716)))|x)-(tmp = -3110814468.9023848, tmp))))+x)^x)>>>(tmp = -617705282.0788529, tmp))>>>x));
+  assertEquals(4314933530, x -= ((1032195469.789219)|(tmp = -448053861.9531791, tmp)));
+  assertEquals(9709850, x %= (((tmp = -3056286252.5853324, tmp)*x)&x));
+  assertEquals(9709850, x %= (tmp = -2596800940, tmp));
+  assertEquals(2655489828.9461126, x -= (tmp = -2645779978.9461126, tmp));
+  assertEquals(369266212, x &= (((335712316.24874604)|(tmp = 33648215, tmp))-((x/(2639848695))<<((-499681175)<<(-2490554556)))));
+  assertEquals(-2147483648, x <<= (-834465507));
+  assertEquals(1073741824, x >>>= (((tmp = 3018385473.1824775, tmp)>>(x*(-2574502558.216812)))|(((tmp = -1742844828, tmp)*(1698724455))&x)));
+  assertEquals(-270818218, x += (-1344560042));
+  assertEquals(360710144, x <<= x);
+  assertEquals(0, x <<= (tmp = 612718075, tmp));
+  assertEquals(0, x <<= x);
+  assertEquals(-0, x /= (tmp = -1922423684, tmp));
+  assertEquals(-0, x *= ((((tmp = 741806213.3264687, tmp)%(-711184803.2022421))+((tmp = -3209040938, tmp)&(525355849.044886)))&(x<<(tmp = -698610297, tmp))));
+  assertEquals(0, x <<= (-482471790));
+  assertEquals(0, x &= ((-921538707)/(tmp = -482498765.988616, tmp)));
+  assertEquals(0, x ^= (x^x));
+  assertEquals(-351721702, x ^= (-351721702.8850286));
+  assertEquals(726242219625599900, x -= ((2064820612)*x));
+  assertEquals(1452484439251199700, x += x);
+  assertEquals(2.52318299412847e-15, x %= ((((x<<((2508143285)+x))>>(-2493225905.011774))%(1867009511.0792103))/((((x<<(2542171236))>>((x|x)&(tmp = -384528563, tmp)))+((-1168755343)*(1731980691.6745195)))+(tmp = -1608066022.71164, tmp))));
+  assertEquals(79905008, x += ((((-2702081714.590131)&(x+(tmp = -1254725471.2121565, tmp)))*(3088309981))%(((tmp = 1476844981.1453142, tmp)|((((tmp = -1243556934.7291331, tmp)%x)^(-1302096154))+((660489180)/(tmp = -681535480.8642154, tmp))))^(tmp = -8410710, tmp))));
+  assertEquals(1215822204, x ^= ((-3008054900)>>>(tmp = -1990206464.460693, tmp)));
+  assertEquals(-394790532, x |= ((((-1334779133.2038574)+(tmp = -1407958866.832946, tmp))<<(1699208315))-(((x^(x%x))<<(3216443))>>(x+((((2576716374.3081336)|((tmp = 2316167191.348064, tmp)&((51086351.20208645)&((x|(tmp = -357261999, tmp))^(x/x)))))*(-45901631.10155654))*(((-439588079)>>>((-2358959768.7634916)|(1613636894.9373643)))+(((-908627176)<<x)%(x%((-1669567978)>>>((x>>(1289400876))+(tmp = 2726174270, tmp)))))))))));
+  assertEquals(-0.17717467607696327, x /= (2228255982.974148));
+  assertEquals(-1905616474, x ^= (tmp = 2389350822.851587, tmp));
+  assertEquals(-0, x %= x);
+  assertEquals(2818124981.508915, x -= (-2818124981.508915));
+  assertEquals(-1476842315, x |= x);
+  assertEquals(73408564, x &= (-3147390604.3453345));
+  assertEquals(70, x >>>= x);
+  assertEquals(1, x >>= x);
+  assertEquals(3086527319.899181, x *= (3086527319.899181));
+  assertEquals(-145, x >>= x);
+  assertEquals(-145, x %= (tmp = -2500421077.3982406, tmp));
+  assertEquals(-1, x >>= (tmp = -2970678326.712191, tmp));
+  assertEquals(-1, x %= ((tmp = -535932632.4668834, tmp)+(((-1226598339.347982)<<((tmp = 616949449, tmp)/(tmp = 2779464046, tmp)))/(214578501.67984307))));
+  assertEquals(1, x *= x);
+  assertEquals(1, x >>= ((tmp = 11080208, tmp)<<(460763913)));
+  assertEquals(-1.8406600706723492e-19, x /= ((tmp = -2334126306.1720915, tmp)*(tmp = 2327566272.5901165, tmp)));
+  assertEquals(856681434186007200, x -= ((tmp = -2286974992.8133907, tmp)*(374591518)));
+  assertEquals(3126084224, x >>>= x);
+  assertEquals(-1160460669, x |= (tmp = 181716099, tmp));
+  assertEquals(873988096, x <<= (tmp = 406702419, tmp));
+  assertEquals(0, x <<= ((tmp = 802107965.4672925, tmp)-((tmp = 1644174603, tmp)>>((tmp = 604679952, tmp)+(tmp = -515450096.51425123, tmp)))));
+  assertEquals(NaN, x %= ((x>>(tmp = 2245570378, tmp))*(tmp = 1547616585, tmp)));
+  assertEquals(NaN, x /= ((tmp = -776657947.0382309, tmp)&(tmp = 163929332.28270507, tmp)));
+  assertEquals(NaN, x *= (tmp = 243725679.78916526, tmp));
+  assertEquals(NaN, x /= (x>>x));
+  assertEquals(0, x <<= ((tmp = -1293291295.5735884, tmp)%(((((63309078)>>>x)&(x&(-2835108260.025297)))+x)>>>(-1317213424))));
+  assertEquals(0, x *= ((((tmp = -1140319441.0068483, tmp)*(tmp = 2102496185, tmp))&(-2326380427))<<(tmp = -2765904696, tmp)));
+  assertEquals(0, x /= (tmp = 2709618593, tmp));
+  assertEquals(0, x >>= (-1753085095.7670164));
+  assertEquals(1766381484, x |= (-2528585812));
+  assertEquals(1766381484, x %= (2735943476.6363373));
+  assertEquals(1766381484, x %= (x*(tmp = 2701354268, tmp)));
+  assertEquals(-2147483648, x <<= (-323840707.4949653));
+  assertEquals(4611686018427388000, x *= (x<<x));
+  assertEquals(0, x <<= (3066735113));
+  assertEquals(0, x ^= ((((x*x)^(tmp = -2182795086.39927, tmp))<<(x^(tmp = 1661144992.4371827, tmp)))<<((((-2885512572.176741)*(tmp = 609919485, tmp))|(tmp = 929399391.0790694, tmp))>>>((((((((((399048996)>>((-107976581.61751771)>>>x))|(((-1502100015)<<(tmp = -1108852531.9494338, tmp))&(x/(tmp = -3198795871.7239237, tmp))))+((-2627653357)>>x))>>>x)*(1066736757.2718519))%(tmp = 1326732482.201604, tmp))/(tmp = 2513496019.814191, tmp))>>>((1694891519)>>>(-2860217254.378931)))<<(tmp = 31345503, tmp)))));
+  assertEquals(0, x ^= (x/((-2556481161)>>>(x/(x%(x&(1302923615.7148068)))))));
+  assertEquals(NaN, x /= x);
+  assertEquals(NaN, x += (tmp = 846522031, tmp));
+  assertEquals(0, x >>= (x+(-1420249556.419045)));
+  assertEquals(0, x ^= (((x%(-1807673170))&x)-x));
+  assertEquals(-3484.311990686845, x -= ((((((-510347602.0068991)>>>x)<<((tmp = 1647999950, tmp)&(((305407727)>>((1781066601.791009)&x))<<((tmp = -998795238, tmp)%(((x/x)+x)<<(((2586995491.434947)<<x)-((((tmp = 545715607.9395425, tmp)*x)>>>x)>>>(((((2332534960.4595165)^(-3159493972.3695474))<<(tmp = 867030294, tmp))|(2950723135.753855))^(((3150916666)<<x)>>((tmp = 414988690, tmp)|((tmp = -1879594606, tmp)/(tmp = 1485647336.933429, tmp))))))))))))>>(tmp = -2676293177, tmp))%(617312699.1995015))/((((tmp = -1742121185, tmp)^((((x&x)<<(tmp = 698266916, tmp))/(-1860886248))+((-213304430)%((((((-2508973021.1333447)+(tmp = 2678876318.4903, tmp))&(tmp = -43584540, tmp))-x)^(-2251323850.4611115))-x))))>>>(tmp = 2555971284, tmp))%((((tmp = 16925106, tmp)^x)&x)|((x/((x|(tmp = -2787677257.125139, tmp))<<(-853699567)))+(tmp = -1721553520, tmp))))));
+  assertEquals(-447873933.26863855, x += (-447870448.9566479));
+  assertEquals(200591060101520900, x *= x);
+  assertEquals(200591062202483420, x -= (-2100962536));
+  assertEquals(-5.261023346568228e+24, x *= ((tmp = -419641692.6377077, tmp)>>(tmp = -224703100, tmp)));
+  assertEquals(1269498660, x |= (195756836));
+  assertEquals(1269498660, x |= x);
+  assertEquals(1269498660, x |= x);
+  assertEquals(-37.75978948486164, x /= (((tmp = -595793780, tmp)+((tmp = 2384365752, tmp)>>>(1597707155)))|((968887032)^(tmp = 2417905313.4337964, tmp))));
+  assertEquals(-37.75978948486164, x %= (tmp = -1846958365.291661, tmp));
+  assertEquals(1102319266.6421175, x += (1102319304.401907));
+  assertEquals(-1664202255175155200, x -= ((x^(tmp = 407408729, tmp))*x));
+  assertEquals(-752874653, x ^= (tmp = 314673507, tmp));
+  assertEquals(-72474761, x |= (tmp = -2538726025.8884344, tmp));
+  assertEquals(-72474761, x |= x);
+  assertEquals(-122849418, x += ((tmp = -2332080457, tmp)|(((((30496388.145492196)*(((-1654329438.451212)|(-2205923896))&(x>>(tmp = -1179784444.957002, tmp))))&(tmp = 319312118, tmp))*(651650825))|(((-2305190283)|x)>>>(-428229803)))));
+  assertEquals(994, x >>>= x);
+  assertEquals(614292, x *= (((((2565736877)/((tmp = 649009094, tmp)>>>(((x>>>(2208471260))>>(x>>>x))%x)))&(tmp = 357846438, tmp))<<(tmp = -2175355851, tmp))%x));
+  assertEquals(1792008118, x |= (tmp = 1791924774.5121183, tmp));
+  assertEquals(1246238208, x &= (tmp = 1264064009.9569638, tmp));
+  assertEquals(-88877082, x ^= (2969289190.285704));
+  assertEquals(0.044923746573582474, x /= ((tmp = -3057438043, tmp)^(-1009304907)));
+  assertEquals(0, x <<= ((-828383918)-((((x>>(734512101))*(tmp = -3108890379, tmp))-(x|((tmp = 3081370585.3127823, tmp)^((-271087194)-(x/(tmp = -2777995324.4073873, tmp))))))%x)));
+  assertEquals(1604111507.3365753, x -= (-1604111507.3365753));
+  assertEquals(-1721314970, x ^= (tmp = -956686859, tmp));
+  assertEquals(-102247425, x |= (tmp = -2535095555, tmp));
+  assertEquals(-102247425, x %= (-955423877));
+  assertEquals(1053144489850425, x *= (((tmp = 1583243590.9550207, tmp)&(1356978114.8592746))|(tmp = -10299961.622774363, tmp)));
+  assertEquals(-0.0043728190668037336, x /= ((-1196259252.435701)*(((-689529982)|(tmp = -1698518652.4373918, tmp))<<x)));
+  assertEquals(-2, x ^= (((x+(tmp = 2961627388, tmp))>>(tmp = 231666110.84104693, tmp))|x));
+  assertEquals(-1, x >>= (tmp = -83214419.92958307, tmp));
+  assertEquals(-1, x %= (-1303878209.6288595));
+  assertEquals(2944850457.5213213, x -= (tmp = -2944850458.5213213, tmp));
+  assertEquals(-1.6607884436053055, x /= (-1773164107));
+  assertEquals(-0.6607884436053055, x %= ((x>>(1240245489.8629928))%(tmp = -3044136221, tmp)));
+  assertEquals(-0, x *= ((x*x)>>>((1069542313.7656753)+x)));
+  assertEquals(0, x >>>= (tmp = -202931587.00212693, tmp));
+  assertEquals(-0, x *= (-375274420));
+  assertEquals(0, x |= ((x/(((tmp = -876417141, tmp)*(x>>>x))&(-2406962078)))<<x));
+  assertEquals(0, x &= ((tmp = -650283599.0780096, tmp)*(tmp = 513255913.34108484, tmp)));
+  assertEquals(3027255453.458466, x += (3027255453.458466));
+  assertEquals(-12568623413253943000, x *= (((x-(198689694.92141533))|x)-x));
+  assertEquals(-12568623410285185000, x -= (tmp = -2968758030.3694654, tmp));
+  assertEquals(-2008903680, x &= (3111621747.7679076));
+  assertEquals(-110045263.26583672, x += (tmp = 1898858416.7341633, tmp));
+  assertEquals(15964, x >>>= (1141042034));
+  assertEquals(31928, x += x);
+  assertEquals(0, x ^= x);
+  assertEquals(-1159866377, x |= (-1159866377));
+  assertEquals(0, x ^= x);
+  assertEquals(3072699529.4306993, x -= (tmp = -3072699529.4306993, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(-1471195029, x |= (2823772267.429641));
+  assertEquals(-4152937108, x += (-2681742079));
+  assertEquals(142030188, x |= x);
+  assertEquals(270, x >>= (tmp = 1013826483, tmp));
+  assertEquals(0, x >>>= (529670686));
+  assertEquals(-2912300367, x -= (2912300367));
+  assertEquals(2213791134963007500, x *= (x<<((((-3214746140)>>(tmp = -588929463, tmp))+((tmp = -3084290306, tmp)>>x))>>x)));
+  assertEquals(2213791133466809900, x -= (tmp = 1496197641, tmp));
+  assertEquals(69834416, x >>>= (x|(((2755815509.6323137)^(x%(((x*((((tmp = 375453453, tmp)<<(x*x))>>(tmp = -973199642, tmp))*x))>>((tmp = -356288629, tmp)>>(tmp = 2879464644, tmp)))<<((((1353647167.9291127)>>>(x/x))<<((2919449101)/(2954998123.5529594)))^x))))&((-2317273650)>>>(tmp = 34560010.71060455, tmp)))));
+  assertEquals(69834416, x >>>= (x^(-2117657680.8646245)));
+  assertEquals(2217318064, x -= ((tmp = 2035883891, tmp)<<(tmp = -1884739265, tmp)));
+  assertEquals(-1272875686, x ^= (tmp = 805889002.7165648, tmp));
+  assertEquals(-1272875686, x >>= (x&(((1750455903)*x)>>((722098015)%((tmp = 1605335626, tmp)>>(tmp = -565369634, tmp))))));
+  assertEquals(-1274351316, x -= (x>>>((tmp = 2382002632, tmp)-((tmp = -2355012843, tmp)+(1465018311.6735773)))));
+  assertEquals(-2982908522.4418216, x -= ((tmp = 1635549038.4418216, tmp)+(((1952167017.720186)&((tmp = -2284822073.1002254, tmp)>>(-1403893917)))%(tmp = 655347757, tmp))));
+  assertEquals(312, x >>>= x);
+  assertEquals(1248, x <<= (2376583906));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x *= ((((tmp = 1914053541.881434, tmp)>>>(tmp = 1583032186, tmp))>>>(-2511688231))%(tmp = -2647173031, tmp)));
+  assertEquals(0, x >>>= (tmp = -2320612994.2421227, tmp));
+  assertEquals(0, x %= (((x+(tmp = -720216298.5403998, tmp))<<(414712685))>>(tmp = 480416588, tmp)));
+  assertEquals(0, x >>= ((((3039442014.271272)<<x)%(-2402430612.9724464))&((-2141451461.3664773)%((x>>(1361764256))/((tmp = -1723952801.9320493, tmp)%(477351810.2485285))))));
+  assertEquals(-0, x /= (tmp = -1627035877, tmp));
+  assertEquals(0, x >>>= (tmp = 1745193212, tmp));
+  assertEquals(0, x >>>= (2309131575));
+  assertEquals(NaN, x %= (((x*(tmp = -1730907131.6124666, tmp))%((((1481750041)|(x>>((((x>>>(tmp = 3128156522.5936565, tmp))/(tmp = -1277222645.9880452, tmp))^(tmp = -2327254789, tmp))+x)))>>>(-1161176960))>>>(tmp = 3135906272.5466847, tmp)))*(((((-2230902834.464362)^(1822893689.8183987))+(((tmp = 1597326356, tmp)/(x&((tmp = -3044163063.587389, tmp)>>(tmp = 2844997555, tmp))))%(x^x)))>>((x|x)/x))^(2634614167.2529745))));
+  assertEquals(0, x &= (3081901595));
+  assertEquals(0, x &= (-2453019214.8914948));
+  assertEquals(0, x &= x);
+  assertEquals(0, x >>>= (-596810618.3666217));
+  assertEquals(0, x >>= (((908276623)|x)/x));
+  assertEquals(0, x ^= x);
+  assertEquals(958890056, x |= (tmp = 958890056.474458, tmp));
+  assertEquals(1325436928, x <<= (tmp = -2474326583, tmp));
+  assertEquals(711588532333838300, x *= ((-148161646.68183947)<<(tmp = -1149179108.8049204, tmp)));
+  assertEquals(0, x ^= (((2862565506)%x)/(tmp = -2865813112, tmp)));
+  assertEquals(-2064806628, x += (((tmp = -2677361175.7317276, tmp)/((817159440)>>>(tmp = 1895467706, tmp)))^(x|(tmp = -2309094859, tmp))));
+  assertEquals(-69806982479424, x *= ((x&(tmp = 2857559765.1909904, tmp))&(-3166908966.754988)));
+  assertEquals(-430255744, x %= ((((((-2968574724.119535)<<x)<<((tmp = 1603913671, tmp)%((-1495838556.661653)^(tmp = 1778219751, tmp))))*(-400364265))<<((((1607866371.235576)-(1961740136))|(1259754297))&(tmp = -1018024797.1352971, tmp)))^x));
+  assertEquals(6.828637393208647e-7, x /= (x*(tmp = 1464421, tmp)));
+  assertEquals(0, x &= x);
+  assertEquals(-0, x *= (((tmp = -2510016276, tmp)-(2088209546))<<((tmp = -1609442851.3789036, tmp)+(tmp = 1919930212, tmp))));
+  assertEquals(-0, x %= (tmp = 1965117998, tmp));
+  assertEquals(-290294792.53186846, x += ((tmp = -2361555894.5318685, tmp)%(2071261102)));
+  assertEquals(-70873, x >>= (tmp = 2206814124, tmp));
+  assertEquals(-141746, x += x);
+  assertEquals(-141733.9831459089, x -= (((tmp = -806523527, tmp)>>>(tmp = 1897214891, tmp))/x));
+  assertEquals(-141733.9831459089, x %= ((tmp = 1996295696, tmp)<<(tmp = 3124244672, tmp)));
+  assertEquals(141733.9831459089, x /= (x>>(2688555704.561076)));
+  assertEquals(3196954517.3075542, x -= (tmp = -3196812783.3244085, tmp));
+  assertEquals(-19929155, x |= (((x|x)+x)^((tmp = 391754876, tmp)-(((((((tmp = -3051902902.5100636, tmp)*(x/(1546924993)))|(tmp = 1494375949, tmp))/((((-795378522)/(tmp = 509984856, tmp))>>>(tmp = -106173186, tmp))+x))|x)|(1916921307))>>>x))));
+  assertEquals(1279271449, x &= ((tmp = 1289446971, tmp)&(tmp = 1836102619, tmp)));
+  assertEquals(17876992, x <<= (-207633461));
+  assertEquals(0, x >>= (tmp = -903885218.9406946, tmp));
+  assertEquals(0, x >>>= x);
+  assertEquals(-2999, x -= (((754533336.2183633)%(tmp = 557970276.0537136, tmp))>>(tmp = -1171045520, tmp)));
+  assertEquals(-0.000003020470363504361, x /= (tmp = 992891715.2229724, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(0.45768595820301217, x %= ((tmp = 673779031, tmp)/(tmp = -1242414872.3263657, tmp)));
+  assertEquals(-980843052.1872087, x += (tmp = -980843052.6448946, tmp));
+  assertEquals(-Infinity, x /= ((((tmp = 317747175.8024508, tmp)&(x&(((tmp = 1632953053, tmp)>>x)/x)))%x)/(3145184986)));
+  assertEquals(0, x &= (x<<x));
+  assertEquals(0, x ^= (x-((2969023660.5619783)/x)));
+  assertEquals(0, x *= x);
+  assertEquals(NaN, x %= (x/(((x-x)/((tmp = -1622970458.3812745, tmp)-(1626134522)))&((((((tmp = 1384729039.4149384, tmp)^(x%(tmp = -2736365959, tmp)))+((-1465172172)%x))>>(tmp = -1839184810.2603343, tmp))^(((tmp = 1756918419, tmp)>>>(x+(x%(tmp = -2011122996.9794662, tmp))))<<(-3026600748.902623)))*((tmp = -2040286580, tmp)>>(-2899217430.655154))))));
+  assertEquals(0, x >>>= (tmp = 2100066003.3046467, tmp));
+  assertEquals(1362012169, x ^= (tmp = 1362012169, tmp));
+  assertEquals(1476312683, x |= ((457898409)>>>(-3079768830.723079)));
+  assertEquals(1441711, x >>>= (905040778.7770994));
+  assertEquals(2078530607521, x *= x);
+  assertEquals(-208193103, x |= ((tmp = -241750000, tmp)^x));
+  assertEquals(745036378, x ^= (((tmp = -1737151062.4726632, tmp)<<x)|(tmp = -1900321813, tmp)));
+  assertEquals(1744830464, x <<= x);
+  assertEquals(212992, x >>>= ((1210741037)-(x-(x>>>((x^(-1273817997.0036907))+((2401915056.5471)%(x<<(tmp = 1696738364.277438, tmp))))))));
+  assertEquals(0.0001604311565639742, x /= (1327622418));
+  assertEquals(0, x <<= (tmp = 166631979.34529006, tmp));
+  assertEquals(0, x *= ((((tmp = 657814984, tmp)/(((-831055031)>>>(1531978379.1768064))|((tmp = 2470027754.302619, tmp)^(-223467597))))/(tmp = 1678697269.468965, tmp))&(tmp = -1756260071.4360774, tmp)));
+  assertEquals(-2049375053, x ^= (tmp = -2049375053, tmp));
+  assertEquals(-1879109889, x |= (tmp = -1963586818.0436726, tmp));
+  assertEquals(718239919, x ^= (tmp = -1523550640.1925273, tmp));
+  assertEquals(-1361085185, x |= (-1939964707));
+  assertEquals(2, x >>>= (1864136030.7395325));
+  assertEquals(0.794648722849246, x %= ((-668830999)*(((-2227700170.7193384)%(x^(x>>>x)))/(tmp = 399149892, tmp))));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x *= x);
+  assertEquals(0, x &= ((tmp = -2389008496.5948563, tmp)|((((tmp = -2635919193.905919, tmp)*((-64464127)<<(2136112830.1317358)))>>((184057979)*(-1204959085.8362718)))>>>(-442946870.3341484))));
+  assertEquals(-243793920, x -= ((tmp = 3002998032, tmp)<<((537875759)<<x)));
+  assertEquals(0, x -= x);
+  assertEquals(0, x *= ((((66852616.82442963)/((((x^x)&(2975318321.223734))+(((tmp = -1388210811.1249495, tmp)^((((-680567297.7620237)%(x-(tmp = -672906716.4672911, tmp)))-x)*(tmp = -1452125821.0132627, tmp)))*(((2770387154.5427895)%x)%x)))-x))<<((-1481832432.924325)>>(tmp = 3109693867, tmp)))>>>(x/(((((((tmp = 928294418, tmp)^(((-1018314535)/(tmp = -3167523001, tmp))%((((((tmp = -1639338126, tmp)-(tmp = -2613558829, tmp))&x)/x)%(tmp = 513624872, tmp))/((-520660667)&x))))*(2620452414))^((tmp = 2337189239.5949326, tmp)*(3200887846.7954993)))>>>((tmp = 1173330667, tmp)^x))<<x)>>(((tmp = -2475534594.982338, tmp)*x)|x)))));
+  assertEquals(0, x /= (2520915286));
+  assertEquals(0, x &= x);
+  assertEquals(0, x >>= (-1908119327));
+  assertEquals(0, x >>>= (tmp = 549007635, tmp));
+  assertEquals(0, x >>= (-994747873.8117285));
+  assertEquals(0, x <<= ((((x>>>((-3084793026.846681)%((1107295502)&(tmp = -296613957.8133817, tmp))))&((19637717.166736007)/(x+x)))+x)/(-2479724242)));
+  assertEquals(-695401420, x += (-695401420));
+  assertEquals(-695401394, x += (x>>>(tmp = 2340097307.6556053, tmp)));
+  assertEquals(-555745552, x -= (x|(-483851950.68644)));
+  assertEquals(-17825792, x <<= x);
+  assertEquals(-17825792, x >>= x);
+  assertEquals(-17, x %= ((tmp = 1799361095, tmp)|((x>>(((-1201252592)<<((((543273288)+(-2859945716.606924))*x)<<((-3030193601)<<(3081129914.9217644))))|((1471431587.981769)>>(-246180750))))|(((tmp = -2689251055.1605787, tmp)>>x)&(((2131333169)^x)-((tmp = -951555489, tmp)/x))))));
+  assertEquals(-8912896, x <<= (1146444211));
+  assertEquals(2854567584, x += (tmp = 2863480480, tmp));
+  assertEquals(426232502.24151134, x %= (1214167540.8792443));
+  assertEquals(1806802048, x ^= (-2368317898));
+  assertEquals(432537600, x <<= (tmp = 2831272652.589364, tmp));
+  assertEquals(432537600, x %= (((1713810619.3880467)-x)&((-2853023009.553296)&(tmp = -3158798098.3355417, tmp))));
+  assertEquals(-509804066, x += (tmp = -942341666, tmp));
+  assertEquals(-509804066, x %= (-732349220));
+  assertEquals(259900185710132350, x *= x);
+  assertEquals(711598501.7021885, x %= ((tmp = 2020395586.2280731, tmp)-(tmp = 3031459563.1386633, tmp)));
+  assertEquals(711598503.0618857, x += ((tmp = 967558548.4141241, tmp)/x));
+  assertEquals(711598503, x &= x);
+  assertEquals(711598503, x ^= (((((1609355669.1963444)+((((tmp = -2660082403.258437, tmp)+(tmp = -235367868, tmp))&(x/x))*((-2595932186.69466)|((tmp = -3039202860, tmp)<<x))))>>>(-951354869))-((tmp = -691482949.6335375, tmp)/(tmp = -1735502400, tmp)))/(tmp = 798440377, tmp)));
+  assertEquals(558262613882868500, x *= (784519095.4299527));
+  assertEquals(558262611968479000, x -= ((((tmp = 1039039153.4026555, tmp)/(-3138845051.6240187))*(tmp = 633557994, tmp))&(1981507217)));
+  assertEquals(1170427648, x |= ((x>>((((-1086327124)%((tmp = -1818798806.368613, tmp)^(tmp = 2183576654.9959817, tmp)))>>x)&((((((tmp = 1315985464.0330539, tmp)&(2774283689.333836))%x)*((2722693772.8994813)&(tmp = -2720671984.945404, tmp)))^(tmp = -76808019, tmp))<<((tmp = 685037799.2336662, tmp)^((tmp = 1057250849, tmp)&(tmp = 1469205111.2989025, tmp))))))+(x*(((tmp = 448288818.47173154, tmp)-(-2527606231))-((8387088.402292728)>>x)))));
+  assertEquals(558, x >>>= (tmp = 2732701109, tmp));
+  assertEquals(558, x &= x);
+  assertEquals(-0.00015855057024653912, x /= ((x+(((tmp = -1963815633, tmp)-(x>>x))-((x|x)>>x)))/x));
+  assertEquals(1.3458861596445712e-13, x /= (-1178038492.4116466));
+  assertEquals(0, x <<= (-104550232));
+  assertEquals(0, x >>>= (x>>(tmp = -255275244.12613606, tmp)));
+  assertEquals(0, x >>= x);
+  assertEquals(375, x |= ((1576819294.6991196)>>>(-2570246122)));
+  assertEquals(96000, x <<= ((2252913843.0150948)>>>(-49239716)));
+  assertEquals(6144000, x <<= ((((tmp = -2478967279, tmp)&((x%((tmp = -1705332610.8018858, tmp)+(x+(tmp = 590766349, tmp))))<<(tmp = 1759375933, tmp)))+(-2024465658.849834))&(1564539207.3650014)));
+  assertEquals(-1149239296, x <<= (1862803657.7241006));
+  assertEquals(-9, x >>= (((tmp = 463306384.05696774, tmp)^x)|((x>>((((-2098070856.799663)<<((-2054870274.9012866)<<(((-2582579691)/(829257170.0266814))<<(((((tmp = -1753535573.7074275, tmp)<<((x>>(-197886116))%((2487188445)%(tmp = 2465391564.873364, tmp))))&(((tmp = -500069832, tmp)&(tmp = 3016637032, tmp))&((tmp = 2525942628, tmp)|((((-920996215)|x)^((((tmp = -687548533.419106, tmp)&(1423222636.058937))<<((tmp = -1096532228, tmp)>>((((tmp = -3124481449.2740726, tmp)^(tmp = 2724328271.808975, tmp))>>x)*x)))+(-1661789589.5808442)))+(((x*(tmp = -1224371664.9549093, tmp))^((tmp = 3202970043, tmp)^x))/(tmp = 131494054.58501709, tmp))))))|(((tmp = -1654136720, tmp)<<x)>>((1652979932.362416)-(tmp = -863732721, tmp))))^(-113307998)))))^(-90820449.91417909))*((tmp = 641519890, tmp)-((((x<<(tmp = 2349936514.071881, tmp))*(2324420443.587892))^x)%(x<<((tmp = -1838473742, tmp)/(((-3154172718.4274178)-x)+x)))))))|(x>>>((tmp = 2096024376.4308293, tmp)<<x)))));
+  assertEquals(81, x *= x);
+  assertEquals(81, x &= x);
+  assertEquals(81, x %= (tmp = 2223962994, tmp));
+  assertEquals(81, x ^= ((x/(((-1606183420.099584)|(-1242175583))&(((x|((tmp = 828718431.3311573, tmp)/(x>>x)))+(((-2207542725.4531174)^(x*x))*(tmp = 551575809.955105, tmp)))/x)))&((x>>x)&x)));
+  assertEquals(81, x %= (tmp = 279598358.6976975, tmp));
+  assertEquals(101.72338484518858, x -= (((tmp = 2452584495.44003, tmp)%((-1181192721)+(((x>>(((x&x)^x)+((x>>>((x+(-2472793823.57181))/(((2854104951)>>(-1208718359.6554642))>>>(1089411895.694705))))/(x|(-2821482890.1780205)))))^(-1786654551))/(-29404242.70557475))))/(((-4352531)<<((-1227287545)<<x))%(-2558589438))));
+  assertEquals(101.72338484518858, x %= (-943645643));
+  assertEquals(0, x -= x);
+  assertEquals(0, x >>>= (-2440404084));
+  assertEquals(0, x >>= (tmp = 1029680958.405923, tmp));
+  assertEquals(0, x >>>= (1213820208.7204895));
+  assertEquals(-0, x /= (tmp = -103093683, tmp));
+  assertEquals(0, x >>>= (-2098144813));
+  assertEquals(-0, x /= (((-3087283334)+(((tmp = -3129028112.6859293, tmp)%(tmp = 2413829931.1605015, tmp))-(2578195237.8071446)))|x));
+  assertEquals(-15, x |= ((((-178926550.92823577)>>>(-965071271))^((tmp = -484633724.7237625, tmp)-(tmp = 473098919.1486404, tmp)))>>((-2264998310.203265)%(tmp = -499034672, tmp))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x >>= (((-3207915976.698118)<<(tmp = 2347058630, tmp))|(tmp = -2396250098.559627, tmp)));
+  assertEquals(NaN, x %= x);
+  assertEquals(NaN, x *= (621843222));
+  assertEquals(0, x >>= (((-2409032228.7238913)*x)-(tmp = -887793239, tmp)));
+  assertEquals(NaN, x /= x);
+  assertEquals(1193017666, x ^= (tmp = 1193017666, tmp));
+  assertEquals(3.5844761899682753, x /= (tmp = 332829011.206393, tmp));
+  assertEquals(-888572929, x |= (((tmp = 1032409228, tmp)+(tmp = -1920982163.7853453, tmp))+x));
+  assertEquals(-1817051951333455600, x *= (((-1506265102)^(tmp = -775881816, tmp))-(tmp = -32116372.59181881, tmp)));
+  assertEquals(-1638479616, x |= x);
+  assertEquals(-114489, x %= (((tmp = -247137297.37866855, tmp)>>>((((((-322805409)-x)^x)>>((((((((x>>>(tmp = -900610424.7148039, tmp))/(-1155208489.6240904))|((-2874045803)|(tmp = 3050499811, tmp)))+(x/((tmp = -613902712, tmp)^((-982142626.2892077)*((((tmp = -3201753245.6026397, tmp)|((1739238762.0423079)^x))/(243217629.47237313))^((tmp = -11944405.987132788, tmp)/(tmp = 2054031985.633406, tmp)))))))*(tmp = 2696108952.450961, tmp))*x)>>>(tmp = 3058430643.0660386, tmp))>>(x<<x)))>>(-984468302.7450335))%((tmp = 1302320585.246251, tmp)>>>x)))%(tmp = -2436842285.8208156, tmp)));
+  assertEquals(2047, x >>>= (2380161237));
+  assertEquals(0, x >>= x);
+  assertEquals(0, x &= (tmp = 980821012.975836, tmp));
+  assertEquals(-1090535537, x -= ((-3064511503.1214876)&((tmp = -2598316939.163751, tmp)<<((tmp = -969452391.8925576, tmp)*x))));
+  assertEquals(-2181071074, x += x);
+  assertEquals(1, x >>>= ((2902525386.449062)>>x));
+  assertEquals(1, x += (x&(tmp = -2643758684.6636515, tmp)));
+  assertEquals(1, x %= ((tmp = -2646526891.7004848, tmp)/x));
+  assertEquals(448735695.7888887, x -= (tmp = -448735694.7888887, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(1, x >>= ((-480385726)<<(2641021142)));
+  assertEquals(1, x %= (375099107.9200462));
+  assertEquals(1, x >>= (((x&((tmp = -2402469116.9903326, tmp)%(tmp = -2862459555.860298, tmp)))*(tmp = -2834162871.0586414, tmp))%(((x>>>(tmp = 721589907.5073895, tmp))*(x^x))%(((tmp = 2844611489.231776, tmp)^((983556913)&(906035409.6693488)))^(x>>>(1239322375))))));
+  assertEquals(268435456, x <<= (tmp = 178807644.80966163, tmp));
+  assertEquals(44, x %= ((tmp = 2527026779.081539, tmp)>>>(2736129559)));
+  assertEquals(88, x += x);
+  assertEquals(0, x >>>= x);
+  assertEquals(0, x -= x);
+  assertEquals(-1523121602, x |= (2771845694));
+  assertEquals(-2, x >>= x);
+  assertEquals(-4, x += x);
+  assertEquals(-256, x <<= (((2522793132.8616533)>>(tmp = 77232772.94058788, tmp))+(3118669244.49152)));
+  assertEquals(4294967040, x >>>= x);
+  assertEquals(-256, x &= x);
+  assertEquals(1278370155.835435, x -= (-1278370411.835435));
+  assertEquals(-3.488228054921667, x /= (tmp = -366481243.6881058, tmp));
+  assertEquals(1.162742684973889, x /= ((x|(((((2404819175.562809)*(tmp = -2524589506, tmp))&(tmp = -675727145, tmp))>>>(x*x))&((-413250006)<<(tmp = 2408322715, tmp))))|((2940367603)>>>x)));
+  assertEquals(0, x >>>= ((2513665793)-(tmp = 1249857454.3367786, tmp)));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x ^= x);
+  assertEquals(1989998348.6336238, x -= (-1989998348.6336238));
+  assertEquals(903237918.986834, x %= (1086760429.6467898));
+  assertEquals(-4.4185765232981975, x /= (-204418304));
+  assertEquals(1471621914, x ^= (tmp = -1471621914.1771696, tmp));
+  assertEquals(1471621914, x |= ((((((x<<(tmp = -2676407394.536844, tmp))%(((343324258)+(x/(x>>(((-221193011)>>>x)|x))))>>(((-2737713893)^((tmp = -49214797.00735545, tmp)+((-2818106123.172874)/(tmp = -2361786565.3028684, tmp))))<<(1859353297.6355076))))*(tmp = -751970685, tmp))|((tmp = 2502717391.425871, tmp)/(tmp = -2647169430, tmp)))*((tmp = -1647567294, tmp)&(((tmp = 1819557651, tmp)/x)>>((((-3073469753)/x)-(((tmp = -1973810496.6407511, tmp)&((x-(x+(tmp = -2986851659, tmp)))>>>(tmp = -2226975699, tmp)))|(418770782.142766)))<<x))))*(((((tmp = 125466732, tmp)/((((1453655756.398259)|(((874792086.7064595)-(194880772.91499102))>>>x))%(x<<(tmp = -1445557137, tmp)))<<x))>>>(tmp = -1953751906, tmp))/((tmp = -2140573172.2979035, tmp)*((-108581964)^x)))|(-481484013.0393069))));
+  assertEquals(1454179065, x += ((tmp = 947147038.2829313, tmp)|(tmp = -154822975.3629098, tmp)));
+  assertEquals(1, x /= x);
+  assertEquals(1, x %= ((((((tmp = -2262250297.991866, tmp)-(tmp = 481953960, tmp))/(1629215187.6020458))|(2515244216))>>>((tmp = -3040594752.2184515, tmp)-(tmp = -1116041279, tmp)))^(((-182133502)-(1065160192.6609197))+(((((-1850040207)^(tmp = -1570328610, tmp))^(tmp = 20542725.09256518, tmp))*x)|(2386866629)))));
+  assertEquals(1, x &= (2889186303));
+  assertEquals(0, x >>= (((-1323093107.050538)>>(x%x))-(((((((-1736522840)+(tmp = -2623890690.8318863, tmp))*(959395040.5565329))*(233734920))<<((x+(x%((tmp = -2370717284.4370327, tmp)%(tmp = 2109311949, tmp))))-(tmp = -1005532894, tmp)))|(861703605))>>>((2399820772)/x))));
+  assertEquals(0, x >>= x);
+  assertEquals(57233408, x |= ((tmp = 2655923764.4179816, tmp)*(-1353634624.3025436)));
+  assertEquals(997939728, x |= (980552208.9005274));
+  assertEquals(1859642592476610800, x *= (1863481872));
+  assertEquals(-977190656, x <<= x);
+  assertEquals(4.378357529141239e+26, x *= ((((x/(((tmp = 2429520991, tmp)/(x/(tmp = 784592802, tmp)))-(tmp = -2704781982, tmp)))*(tmp = -2161015768.2322354, tmp))&((((-3164868762)>>(tmp = 2390893153.32907, tmp))^x)>>(-2422626718.322538)))*(tmp = 278291869, tmp)));
+  assertEquals(4.378357529141239e+26, x -= (1710777896.992369));
+  assertEquals(0, x &= (((((tmp = -2532956158.400033, tmp)|((2195255831.279001)|(1051047432)))|(-1628591858))|(tmp = -2042607521.947963, tmp))>>((-1471225208)/(((-133621318)>>(1980416325.7358408))*((1741069593.1036062)-(x|(2133911581.991011)))))));
+  assertEquals(-0, x /= (-656083507));
+  assertEquals(NaN, x += ((tmp = -1071410982.2789869, tmp)%x));
+  assertEquals(NaN, x *= (tmp = -1513535145.3146675, tmp));
+  assertEquals(0, x >>= ((2831245247.5267224)>>(x<<((x+(((3068824580.7922907)|(1708295544.275714))*((tmp = -1662930228.1170444, tmp)-(((tmp = 1979994889, tmp)<<(tmp = -1826911988, tmp))&((x/(x<<(1909384611.043981)))+(1958052414.7139997))))))<<(tmp = 2481909816.56558, tmp)))));
+  assertEquals(0, x *= (((tmp = -2979739958.1614842, tmp)&x)+x));
+  assertEquals(-0, x *= ((-332769864.50313234)^x));
+  assertEquals(0, x >>= ((((689018886.1436445)+(tmp = -2819546038.620694, tmp))|(((tmp = -1459669934.9066005, tmp)|x)/x))<<(((tmp = 2640360389, tmp)/((x%((-1947492547.9056122)%((1487212416.2083092)-(-1751984129))))^x))%(tmp = 2666842881, tmp))));
+  assertEquals(-1801321460, x |= (tmp = 2493645836, tmp));
+  assertEquals(-1801321460, x %= (2400405136));
+  assertEquals(-2905399858195810300, x *= (tmp = 1612926911, tmp));
+  assertEquals(-2905399858195810300, x -= (x>>(tmp = 1603910263.9593458, tmp)));
+  assertEquals(-238798848, x &= ((tmp = -2638646212.767516, tmp)/(((tmp = 1755616291.436998, tmp)>>>(tmp = 1083349775, tmp))-(x%(((tmp = 1728859105.53634, tmp)^(1931522619.0403612))/(tmp = 712460587.0025489, tmp))))));
+  assertEquals(-2363873607.2302856, x += (-2125074759.230286));
+  assertEquals(1712665, x &= (((117229515)>>>(((1707090894.1915488)>>>((-1696008695)>>(((-1045367326.7522249)<<(tmp = -209334716, tmp))-x)))|(-1707909786.080653)))%(1260761349.172689)));
+  assertEquals(1073741824, x <<= (tmp = -289437762.34742975, tmp));
+  assertEquals(1073741824, x &= (tmp = 2079141140, tmp));
+  assertEquals(0, x <<= ((x^(-3139646716.1615124))-(((-362323071.74237394)|(tmp = 2989896849, tmp))*(tmp = -218217991, tmp))));
+  assertEquals(0, x &= (tmp = -1476835288.425903, tmp));
+  assertEquals(0, x >>>= (tmp = 61945262.70868635, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(-2735263498.7189775, x -= (2735263498.7189775));
+  assertEquals(-1182289920, x <<= (x+x));
+  assertEquals(-1182289580, x ^= ((2858446263.2258)>>>(2387398039.6273785)));
+  assertEquals(696693056, x &= ((2178665823)*(-51848583)));
+  assertEquals(1652555776, x <<= (((tmp = 2943916975, tmp)-((-1544273901)>>(-1671503106.2896929)))|x));
+  assertEquals(6455296, x >>>= (tmp = 1492638248.675439, tmp));
+  assertEquals(2097152, x &= (((x|x)*(2873891571.7000637))^((2165264807)+(tmp = 451721563, tmp))));
+  assertEquals(2097152, x %= (tmp = 1089484582.1455994, tmp));
+  assertEquals(2097152, x <<= x);
+  assertEquals(2097152, x &= ((tmp = 119096343.4032247, tmp)^((-1947874541)*x)));
+  assertEquals(0, x &= (tmp = 2363070677, tmp));
+  assertEquals(0, x &= ((tmp = -1897325383, tmp)>>>((2368480527)>>>((tmp = 1837528979, tmp)*(-1838904077)))));
+  assertEquals(-1898659416, x ^= (-1898659416.1125412));
+  assertEquals(-725506048, x <<= x);
+  assertEquals(1392943104, x <<= (295287938.9104482));
+  assertEquals(-63620329, x ^= ((tmp = -3175925826.5573816, tmp)-(tmp = 2474613927, tmp)));
+  assertEquals(-1135111726, x -= ((tmp = -1133259081, tmp)^(((tmp = -742228219, tmp)>>((-7801909.587711811)%((tmp = -642758873, tmp)+(tmp = 2893927824.6036444, tmp))))^((tmp = -2145465178.9142997, tmp)+x))));
+  assertEquals(0, x ^= x);
+  assertEquals(660714589, x |= (660714589));
+  assertEquals(660714676, x ^= ((-376720042.8047826)>>>(2196220344)));
+  assertEquals(660714676, x |= ((((((((x<<(-1140465568))-(tmp = -1648489774.1573918, tmp))%(((tmp = -2955505390.573639, tmp)*x)<<((((tmp = -1769375963, tmp)*(tmp = -440619797, tmp))&((tmp = 1904284066, tmp)%(-2420852665.0629807)))+(-324601009.2063596))))>>(tmp = 2317210783.9757776, tmp))^((tmp = 750057067.4541628, tmp)^(tmp = -1391814244.7286487, tmp)))>>((344544658.6054913)%((tmp = -1508630423.218488, tmp)&(tmp = 1918909238.2974637, tmp))))>>((-647746783.685822)&(tmp = 2444858958.3595476, tmp)))&x));
+  assertEquals(-962337195, x ^= (tmp = -507358495.30825853, tmp));
+  assertEquals(-182008925.58535767, x %= (tmp = -195082067.35366058, tmp));
+  assertEquals(502070, x >>>= (tmp = 1459732237.1447744, tmp));
+  assertEquals(-2391009930.7235765, x -= (tmp = 2391512000.7235765, tmp));
+  assertEquals(1568669696, x <<= x);
+  assertEquals(0, x <<= (tmp = -571056688.2717848, tmp));
+  assertEquals(1770376226, x ^= (tmp = 1770376226.0584736, tmp));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= ((((x<<x)>>>x)|x)|(((tmp = -2141573723, tmp)^x)|(64299956))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x &= x);
+  assertEquals(0, x <<= (1106060336.7362857));
+  assertEquals(-0, x /= (x|(tmp = 2760823963, tmp)));
+  assertEquals(0, x <<= ((-2436225757)|(-1800598694.4062433)));
+  assertEquals(0, x >>>= ((-728332508.9870625)<<x));
+  assertEquals(-173377680, x ^= ((tmp = -173377680, tmp)%(tmp = -2843994892, tmp)));
+  assertEquals(-173377680, x |= ((((-819217898)&(tmp = -1321650255, tmp))&(x+((x^x)<<((1700753064)>>((((((-1038799327)>>((782275464)^x))-(tmp = -2113814317.8539028, tmp))>>(2143804838))&x)-((2970418921)/(-3073015285.6587048)))))))&((-1759593079.4077306)%((1699128805)-((tmp = -467193967, tmp)&(((2225788267.3466334)*(((2687946762.5504274)+x)>>>x))<<(-1853556066.880512)))))));
+  assertEquals(-0.5520657226957338, x /= ((tmp = -755493878, tmp)&(tmp = 918108389, tmp)));
+  assertEquals(0.30477656217556287, x *= x);
+  assertEquals(0, x &= ((tmp = -2746007517, tmp)<<(2749629340)));
+  assertEquals(0, x ^= ((x%(tmp = 1683077876, tmp))%(-162706778)));
+  assertEquals(0, x *= (tmp = 10203423, tmp));
+  assertEquals(119043212.1461842, x += (tmp = 119043212.1461842, tmp));
+  assertEquals(587202560, x <<= (tmp = 658697910.7051642, tmp));
+  assertEquals(-138689730, x |= (x-(tmp = 1296317634.5661907, tmp)));
+  assertEquals(-138663011, x -= ((-1751010109.5506423)>>(152829872)));
+  assertEquals(-138663011, x %= (-1266200468));
+  assertEquals(-138663011, x &= (x|((tmp = -571277275.622529, tmp)<<x)));
+  assertEquals(-138663011, x >>= ((971259905.1265712)*(tmp = 2203764981, tmp)));
+  assertEquals(-138663011, x %= (-904715829));
+  assertEquals(-138663011, x |= ((tmp = -2823047885.283391, tmp)>>>(((tmp = 533217000, tmp)|(650754598.7836078))|(-1475565890))));
+  assertEquals(-1610612736, x <<= x);
+  assertEquals(-1610612736, x &= x);
+  assertEquals(163840, x >>>= (-188885010));
+  assertEquals(-1224224814, x |= (tmp = 3070742482, tmp));
+  assertEquals(1498726395213334500, x *= x);
+  assertEquals(1723591210, x |= ((tmp = 615164458, tmp)|x));
+  assertEquals(1721910480, x ^= (x>>>x));
+  assertEquals(4505284605.764313, x -= (tmp = -2783374125.7643127, tmp));
+  assertEquals(-9504912393868483000, x *= (((tmp = 2896651872, tmp)<<(-2896385692.9017262))&(((((tmp = -2081179810.20238, tmp)|(tmp = -2484863999, tmp))>>((tmp = 1560885110.2665749, tmp)/(((tmp = 934324123.4289343, tmp)<<((tmp = -1591614157.0496385, tmp)+x))/(((x%(((tmp = 1672629986.8055913, tmp)%x)>>(tmp = 2116315086.2559657, tmp)))/(((-2687682697.5806303)>>x)/(-2034391222.5029132)))%(x-((((((tmp = 2598594967, tmp)/(((((((2950032233)%x)/x)^(tmp = -2126753451.3732262, tmp))<<(tmp = -3019113473, tmp))+(tmp = -2021220129.2320697, tmp))%((((-587645875.4666483)>>(((((x+x)+x)&(tmp = 533801785, tmp))|x)-((tmp = -2224808495.678903, tmp)/(1501942300))))>>>(-2558947646))>>((2798508249.020792)>>>x))))>>>((1060584557)/((((((((x&x)|(1426725365))>>>(tmp = 1500508838, tmp))>>(-1328705938))*((tmp = -2288009425.598777, tmp)>>>(((2586897285.9759064)%((-1605651559.2122297)>>>(tmp = 1936736684.4887302, tmp)))+((tmp = 2316261040, tmp)^(570340750.353874)))))&(x^((tmp = -2266524143, tmp)-(tmp = 2358520476, tmp))))+(tmp = 1449254900.9222453, tmp))%((-100598196)%((tmp = -2985318242.153491, tmp)>>((620722274.4565848)>>(871118975)))))))<<x)*(tmp = -1287065606.4143271, tmp))>>>(1038059916.2438471)))))))+((x/(-276990308.1264961))&(tmp = 2471016351.2195315, tmp)))|(((((tmp = -1288792769.3210807, tmp)+((tmp = -641817194, tmp)*(x<<(((-1933817364)>>(((tmp = 2084673536, tmp)|x)&x))&(tmp = -2752464480, tmp)))))%((796026752)*x))+(((tmp = -3083359669, tmp)|x)-((715303522)|(tmp = 181297266, tmp))))*(-1691520182.3207517)))));
+  assertEquals(0, x <<= (-2322389800));
+  assertEquals(0, x *= (tmp = 3188682235, tmp));
+  assertEquals(0, x |= (x>>>((tmp = -2729325231.8288336, tmp)^((-393497076.96012783)*(x/(tmp = -2198942459.9466457, tmp))))));
+  assertEquals(0, x ^= x);
+  assertEquals(0, x %= (2835024997.4447937));
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>= (tmp = 1109824126, tmp));
+  assertEquals(0, x <<= (3013043386));
+  assertEquals(206825782.74659085, x -= (-206825782.74659085));
+  assertEquals(-645346761227699500, x *= (-3120243292));
+  assertEquals(6825462, x >>= ((tmp = 1457908135, tmp)<<x));
+  assertEquals(-612366097.9189918, x -= (619191559.9189918));
+  assertEquals(-612306090.9189918, x -= ((2328676543.893506)>>x));
+  assertEquals(0, x ^= (x>>(((x>>>(1856200611.2269292))&(tmp = 2003217473, tmp))%((((((-107135673)+(((3062079356.170611)<<(tmp = -676928983, tmp))>>((tmp = -1487074941.2638814, tmp)|((-1601614031)/(1317006144.5025365)))))+x)*(((1163301641)>>>(448796567))/((x%((tmp = 72293197.34410787, tmp)+(-2304112723)))/((455610361)%(-2799431520)))))>>>(-217305041.09432888))<<(x-(tmp = -2168353649, tmp))))));
+  assertEquals(0, x >>= x);
+  assertEquals(-Infinity, x -= (((-1651597599.8950624)+(1780404320))/x));
+  assertEquals(0, x <<= (tmp = 2246420272.4321294, tmp));
+  assertEquals(0, x *= ((2793605382)-(tmp = -272299011, tmp)));
+  assertEquals(0, x *= x);
+  assertEquals(0, x <<= x);
+  assertEquals(0, x >>= (tmp = 2556413090, tmp));
+  assertEquals(0, x >>= ((tmp = -1784710085, tmp)%x));
+  assertEquals(0, x %= (tmp = -1929880813, tmp));
+  assertEquals(0, x *= (2586983368));
+  assertEquals(0, x &= x);
+  assertEquals(0, x <<= (-2144588807));
+  assertEquals(0, x ^= ((x<<(((((((-596537598)+((x-(((((((tmp = -3179604796, tmp)/((tmp = 1156725365.3543215, tmp)>>>(tmp = -2762144319, tmp)))%(x<<x))&((tmp = 1750241928.1271567, tmp)&(x/((tmp = 1781306819, tmp)|x))))+((((2893068644)/((tmp = -576164593.9720252, tmp)<<((2724671.48995471)&(tmp = -573132475, tmp))))%(tmp = -1355625108, tmp))&(tmp = -302869512.5880568, tmp)))+x)<<x))>>((tmp = -2569172808, tmp)/x)))^x)-(tmp = -1174006275.2213159, tmp))&x)&(((((((-2303274799)>>(tmp = -814839320, tmp))/(tmp = 183887306.09810615, tmp))>>(((tmp = 1054106394.3704875, tmp)|x)>>>x))-(x-(tmp = 1313696830, tmp)))-((tmp = 2373274399.0742035, tmp)|((((tmp = -3163779539.4902935, tmp)*(tmp = -3056125181.726942, tmp))&(((x^(x^(x/((tmp = -576441696.6015451, tmp)<<(tmp = -26223719.920306206, tmp)))))>>(tmp = -2332835940, tmp))|((-146303509.41093707)&(tmp = -2676964025, tmp))))/((((x*(tmp = 1059918020, tmp))|((((2341797349)|(tmp = -744763805.1381104, tmp))<<x)+((2991320875.552578)^(2920702604.701831))))^(-1721756138))^(((tmp = -2794367554, tmp)>>((-2671235923.2097874)<<(x&((((tmp = -621472314.0859051, tmp)-(((x*x)+x)>>>((tmp = 1834038956, tmp)+x)))*x)^(tmp = -2090567586.321468, tmp)))))<<(321395210))))))>>>(tmp = -1207661719, tmp)))+(-2877264053.3805156)))/(x%(tmp = -2226991657.709366, tmp))));
+  assertEquals(0, x *= (tmp = 986904991.061398, tmp));
+  assertEquals(0, x -= (x%(650819306.6671969)));
+  assertEquals(0, x >>>= (905893666.2871252));
+  assertEquals(0, x += (((tmp = 2501942710.4804144, tmp)&x)/((tmp = -851080399.1751502, tmp)-(-1168623992))));
+  assertEquals(-0, x *= (tmp = -2014577821.4554045, tmp));
+  assertEquals(0, x &= (tmp = 1995246018, tmp));
+  assertEquals(0, x %= (1724355237.7031958));
+  assertEquals(-954696411, x += (((-2825222201)+(((1662353496.1795506)>>>(x-x))|(tmp = 225015046, tmp)))^(x&x)));
+  assertEquals(-2158427339993389800, x *= (2260852052.1539803));
+  assertEquals(19559, x >>>= (-862409169.4978967));
+  assertEquals(-0.000012241163878671237, x /= (x^(tmp = 2697144215.160239, tmp)));
+  assertEquals(0, x -= x);
+  assertEquals(1448177644, x |= (tmp = 1448177644.624848, tmp));
+  assertEquals(1448177644, x %= (((-1497553637.4976408)+(402228446))<<x));
+  assertEquals(2304640553, x -= (-856462909));
+  assertEquals(152436736, x &= ((766686903)*(((tmp = 660964683.1744609, tmp)|((((tmp = 297369746, tmp)-(x+((tmp = -2677127146, tmp)/x)))>>(((((((x%(x<<x))-(((((529254728)|((x|(-1407086127.6088922))&(tmp = -1968465008.5000398, tmp)))/(x%x))&((((-2761805265.92574)-x)*(x^(tmp = 110730179, tmp)))%((177220657.06030762)*(((2532585190.671373)/x)+(-1465143151)))))<<((tmp = -3008848338, tmp)<<(-2475597073))))|((-192996756.38619018)|((((1445996780)|(x>>>((((tmp = -2482370545.791443, tmp)*(tmp = -270543594, tmp))^x)*((1346780586)/(tmp = -625613363.885356, tmp)))))-(x<<(x/(-562307527))))&(-125701272))))*((x&x)%(tmp = 752963070, tmp)))>>>(tmp = 17419750.79086232, tmp))*x)^(x^((-157821212.04674292)-(tmp = 503849221.598824, tmp)))))-(tmp = 1479418449, tmp)))>>>((((((-78138548.2193842)<<(((2319032860.806689)-(tmp = -1564963892.5137577, tmp))>>>(-73673322.28957987)))<<((1797573493.3467085)*x))>>(tmp = 759994997, tmp))>>>(-1066441220))&(((((((tmp = 1972048857, tmp)*(((x&((-1347017320.0747669)>>>x))*(-2332716925.705054))%(-376976019.24362826)))>>>((tmp = -466479974, tmp)+x))&(-2282789473.3675604))|(((((((((269205423.7510414)-(tmp = 21919626.105656862, tmp))*((x-(tmp = -378670528, tmp))>>(tmp = -1045706598, tmp)))>>(tmp = -3062647341.234485, tmp))>>>x)|(tmp = -285399599.9386575, tmp))%(tmp = 2731214562, tmp))|((((tmp = 837093165.3438574, tmp)|(tmp = -2956931321, tmp))+((1871874558.3292787)<<((x|((tmp = -3169147427, tmp)%(((x^x)%(1479885041))%((1769991217)%(tmp = -1899472458, tmp)))))*(tmp = -837098563.71806, tmp))))>>(tmp = -1866722748, tmp)))-(2037734340.8345597)))>>((tmp = -1262019180.5332131, tmp)+(x*(1274173993.9800131))))*(tmp = 2336989321.855402, tmp))))));
+  assertEquals(4, x >>= (tmp = -2577728327, tmp));
+  assertEquals(16, x *= (x<<((2622323372.580596)*(tmp = -1947643367, tmp))));
+  assertEquals(33554432, x <<= (tmp = -2938370507, tmp));
+  assertEquals(-2399497018.987414, x -= (tmp = 2433051450.987414, tmp));
+  assertEquals(1, x /= x);
+  assertEquals(2, x <<= x);
+  assertEquals(0, x >>= (x&x));
+  assertEquals(0, x <<= x);
+}
+f();
diff --git a/test/mjsunit/object-define-properties.js b/test/mjsunit/object-define-properties.js
index 6b3725b..128df69 100644
--- a/test/mjsunit/object-define-properties.js
+++ b/test/mjsunit/object-define-properties.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Tests the Object.defineProperties method - ES 15.2.3.7
-// Note that the internal DefineOwnProperty method is tested through 
+// Note that the internal DefineOwnProperty method is tested through
 // object-define-property.js, this file only contains tests specific for
 // Object.defineProperties. Also note that object-create.js contains
 // a range of indirect tests on this method since Object.create uses
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index a8a3213..ee6083a 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -30,28 +30,34 @@
 // Flags: --allow-natives-syntax
 
 // Check that an exception is thrown when null is passed as object.
+var exception = false;
 try {
   Object.defineProperty(null, null, null);
-  assertTrue(false);
 } catch (e) {
+  exception = true;
   assertTrue(/called on non-object/.test(e));
 }
+assertTrue(exception);
 
 // Check that an exception is thrown when undefined is passed as object.
+exception = false;
 try {
   Object.defineProperty(undefined, undefined, undefined);
-  assertTrue(false);
 } catch (e) {
+  exception = true;
   assertTrue(/called on non-object/.test(e));
 }
+assertTrue(exception);
 
 // Check that an exception is thrown when non-object is passed as object.
+exception = false;
 try {
   Object.defineProperty(0, "foo", undefined);
-  assertTrue(false);
 } catch (e) {
+  exception = true;
   assertTrue(/called on non-object/.test(e));
 }
+assertTrue(exception);
 
 // Object.
 var obj1 = {};
@@ -695,12 +701,14 @@
 // Make sure we can redefine with -0.
 Object.defineProperty(obj5, 'minuszero', descMinusZero);
 
+exception = false;
 try {
   Object.defineProperty(obj5, 'minuszero', descPlusZero);
-  assertUnreachable();
 } catch (e) {
+  exception = true;
   assertTrue(/Cannot redefine property/.test(e));
 }
+assertTrue(exception);
 
 
 Object.defineProperty(obj5, 'pluszero', descPlusZero);
@@ -708,12 +716,14 @@
 // Make sure we can redefine with +0.
 Object.defineProperty(obj5, 'pluszero', descPlusZero);
 
+exception = false;
 try {
   Object.defineProperty(obj5, 'pluszero', descMinusZero);
-  assertUnreachable();
 } catch (e) {
+  exception = true;
   assertTrue(/Cannot redefine property/.test(e));
 }
+assertTrue(exception);
 
 
 var obj6 = {};
@@ -761,13 +771,15 @@
 
 // Ensure that we can't change the descriptor of a
 // non configurable property.
+exception = false;
 try {
   var descAccessor = { get: function() { return 0; } };
   Object.defineProperty(obj6, '2', descAccessor);
-  assertUnreachable();
 } catch (e) {
+  exception = true;
   assertTrue(/Cannot redefine property/.test(e));
 }
+assertTrue(exception);
 
 Object.defineProperty(obj6, '2', descElementNonWritable);
 desc = Object.getOwnPropertyDescriptor(obj6, '2');
@@ -858,13 +870,15 @@
 
 // Ensure that we can't change the descriptor of a
 // non configurable property.
+exception = false;
 try {
   var descAccessor = { get: function() { return 0; } };
   Object.defineProperty(arr, '2', descAccessor);
-  assertUnreachable();
 } catch (e) {
+  exception = true;
   assertTrue(/Cannot redefine property/.test(e));
 }
+assertTrue(exception);
 
 Object.defineProperty(arr, '2', descElementNonWritable);
 desc = Object.getOwnPropertyDescriptor(arr, '2');
@@ -1031,3 +1045,12 @@
 testDefineProperty(o, 'p',
   { enumerable : false, configurable : false },
   { get: undefined, set: setter1, enumerable : false, configurable : false });
+
+
+// Regression test: Ensure that growing dictionaries are not ignored.
+o = {};
+for (var i = 0; i < 1000; i++) {
+  // Non-enumerable property forces dictionary mode.
+  Object.defineProperty(o, i, {value: i, enumerable: false});
+}
+assertEquals(999, o[999]);
\ No newline at end of file
diff --git a/test/mjsunit/object-freeze.js b/test/mjsunit/object-freeze.js
index 3eefffd..c3a9278 100644
--- a/test/mjsunit/object-freeze.js
+++ b/test/mjsunit/object-freeze.js
@@ -32,21 +32,25 @@
 // Test that we throw an error if an object is not passed as argument.
 var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
 for (var key in non_objects) {
+  var exception = false;
   try {
     Object.freeze(non_objects[key]);
-    assertUnreachable();
   } catch(e) {
+    exception = true;
     assertTrue(/Object.freeze called on non-object/.test(e));
   }
+  assertTrue(exception);
 }
 
 for (var key in non_objects) {
+  exception = false;
   try {
     Object.isFrozen(non_objects[key]);
-    assertUnreachable();
   } catch(e) {
+    exception = true;
     assertTrue(/Object.isFrozen called on non-object/.test(e));
   }
+  assertTrue(exception);
 }
 
 // Test normal data properties.
diff --git a/test/mjsunit/object-literal-conversions.js b/test/mjsunit/object-literal-conversions.js
index 8540d93..742f814 100644
--- a/test/mjsunit/object-literal-conversions.js
+++ b/test/mjsunit/object-literal-conversions.js
@@ -43,4 +43,4 @@
 
 assertEquals(7, test5[13]);
 assertEquals(7, test6[17.31]);
-  
\ No newline at end of file
+
diff --git a/test/mjsunit/object-literal-overwrite.js b/test/mjsunit/object-literal-overwrite.js
index 5c58a2d..5a3584d 100644
--- a/test/mjsunit/object-literal-overwrite.js
+++ b/test/mjsunit/object-literal-overwrite.js
@@ -79,7 +79,7 @@
 // Test for the classic code generator.
 
 function fun(x) {
-  var inner = { j: function(x) { return x; }, j: 7 }; 
+  var inner = { j: function(x) { return x; }, j: 7 };
   return inner.j;
 }
 
@@ -115,4 +115,4 @@
 
 var y = fun3();
 assertEquals(7, y);
-assertEquals(3, glob3);
\ No newline at end of file
+assertEquals(3, glob3);
diff --git a/test/mjsunit/object-literal.js b/test/mjsunit/object-literal.js
index 397d670..3d0b33b 100644
--- a/test/mjsunit/object-literal.js
+++ b/test/mjsunit/object-literal.js
@@ -105,7 +105,7 @@
 assertFalse(a.a.c === b.a.c);
 
 
-// Test keywords valid as property names in initializers and dot-access.
+// Test keywords are valid as property names in initializers and dot-access.
 var keywords = [
   "break",
   "case",
@@ -124,7 +124,6 @@
   "if",
   "in",
   "instanceof",
-  "native",
   "new",
   "null",
   "return",
@@ -137,15 +136,18 @@
   "var",
   "void",
   "while",
-  "with",
+  "with"
 ];
 
 function testKeywordProperty(keyword) {
+  var exception = false;
   try {
     // Sanity check that what we get is a keyword.
     eval("var " + keyword + " = 42;");
-    assertUnreachable("Not a keyword: " + keyword);
-  } catch (e) { }
+  } catch (e) {
+    exception = true;
+  }
+  assertTrue(exception);
 
   // Simple property, read and write.
   var x = eval("({" + keyword + ": 42})");
@@ -187,4 +189,4 @@
 
 for (var i = 0; i < keywords.length; i++) {
   testKeywordProperty(keywords[i]);
-}
\ No newline at end of file
+}
diff --git a/test/mjsunit/object-prevent-extensions.js b/test/mjsunit/object-prevent-extensions.js
index dc32342..322a2cb 100644
--- a/test/mjsunit/object-prevent-extensions.js
+++ b/test/mjsunit/object-prevent-extensions.js
@@ -33,7 +33,7 @@
 assertTrue(Object.isExtensible(obj1));
 Object.preventExtensions(obj1);
 
-// Make sure the is_extensible flag is set. 
+// Make sure the is_extensible flag is set.
 assertFalse(Object.isExtensible(obj1));
 obj1.x = 42;
 assertEquals(undefined, obj1.x);
diff --git a/test/mjsunit/object-seal.js b/test/mjsunit/object-seal.js
index 3ce2367..f21baed 100644
--- a/test/mjsunit/object-seal.js
+++ b/test/mjsunit/object-seal.js
@@ -32,21 +32,25 @@
 // Test that we throw an error if an object is not passed as argument.
 var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
 for (var key in non_objects) {
+  var exception = false;
   try {
     Object.seal(non_objects[key]);
-    assertUnreachable();
   } catch(e) {
+    exception = true;
     assertTrue(/Object.seal called on non-object/.test(e));
   }
+  assertTrue(exception);
 }
 
 for (var key in non_objects) {
+  exception = false;
   try {
     Object.isSealed(non_objects[key]);
-    assertUnreachable();
   } catch(e) {
+    exception = true;
     assertTrue(/Object.isSealed called on non-object/.test(e));
   }
+  assertTrue(exception);
 }
 
 // Test normal data properties.
@@ -91,7 +95,7 @@
 // Since writable is not affected by seal we should still be able to
 // update the values.
 obj.x = "43";
-assertEquals(43, obj.x);
+assertEquals("43", obj.x);
 
 // Test on accessors.
 var obj2 = {};
@@ -142,7 +146,7 @@
 Object.seal(arr);
 assertTrue(Object.isSealed(arr));
 assertFalse(Object.isExtensible(arr));
-// Since the values in the array is still writable this object 
+// Since the values in the array is still writable this object
 // is not frozen.
 assertFalse(Object.isFrozen(arr));
 
@@ -186,5 +190,5 @@
 assertFalse(Object.isSealed(obj4));
 
 // Make sure that Object.seal returns the sealed object.
-var obj4 = {}
-assertTrue(obj4 === Object.seal(obj4))
+var obj4 = {};
+assertTrue(obj4 === Object.seal(obj4));
diff --git a/test/mjsunit/parse-int-float.js b/test/mjsunit/parse-int-float.js
index a4f09df..2e4f648 100644
--- a/test/mjsunit/parse-int-float.js
+++ b/test/mjsunit/parse-int-float.js
@@ -100,4 +100,17 @@
 assertEquals(Infinity, parseFloat(1/0), "parseFloat Infinity");
 assertEquals(-Infinity, parseFloat(-1/0), "parseFloat -Infinity");
 
+var state;
+var throwingRadix = { valueOf: function() { state = "throwingRadix"; throw null; } };
+var throwingString = { toString: function() { state = "throwingString"; throw null; } };
+state = null;
+try { parseInt('123', throwingRadix); } catch (e) {}
+assertEquals(state, "throwingRadix");
 
+state = null;
+try { parseInt(throwingString, 10); } catch (e) {}
+assertEquals(state, "throwingString");
+
+state = null;
+try { parseInt(throwingString, throwingRadix); } catch (e) {}
+assertEquals(state, "throwingString");
diff --git a/test/mjsunit/polymorph-arrays.js b/test/mjsunit/polymorph-arrays.js
new file mode 100644
index 0000000..ff0c433
--- /dev/null
+++ b/test/mjsunit/polymorph-arrays.js
@@ -0,0 +1,177 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+function init_array(a) {
+  for (var i = 0; i < 10; ++i ){
+    a[i] = i;
+  }
+}
+
+function init_sparse_array(a) {
+  for (var i = 0; i < 10; ++i ){
+    a[i] = i;
+  }
+  a[5000000] = 256;
+  assertTrue(%HasDictionaryElements(a));
+}
+
+function testPolymorphicLoads() {
+  function make_polymorphic_load_function() {
+    function load(a, i) {
+      return a[i];
+    }
+
+    var object_array = new Object;
+    var sparse_object_array = new Object;
+    var js_array = new Array(10);
+    var sparse_js_array = new Array(5000001);
+
+    init_array(object_array);
+    init_array(js_array);
+    init_sparse_array(sparse_object_array);
+    init_sparse_array(sparse_js_array);
+
+    assertEquals(1, load(object_array, 1));
+    assertEquals(1, load(js_array, 1));
+    assertEquals(1, load(sparse_object_array, 1));
+    assertEquals(1, load(sparse_js_array, 1));
+
+    return load;
+  }
+
+  var object_array = new Object;
+  var sparse_object_array = new Object;
+  var js_array = new Array(10);
+  var sparse_js_array = new Array(5000001);
+
+  init_array(object_array);
+  init_array(js_array);
+  init_sparse_array(sparse_object_array);
+  init_sparse_array(sparse_js_array);
+
+  load = make_polymorphic_load_function();
+  assertEquals(undefined, load(js_array, new Object()));
+  load = make_polymorphic_load_function();
+  assertEquals(undefined, load(object_array, new Object()));
+  load = make_polymorphic_load_function();
+  assertEquals(undefined, load(sparse_js_array, new Object()));
+  load = make_polymorphic_load_function();
+  assertEquals(undefined, load(sparse_object_array, new Object()));
+
+  // Try with crankshaft.
+  load = make_polymorphic_load_function();
+  %OptimizeFunctionOnNextCall(load);
+  assertEquals(1, load(object_array, 1));
+  assertEquals(1, load(js_array, 1));
+  assertEquals(1, load(sparse_object_array, 1));
+  assertEquals(1, load(sparse_js_array, 1));
+
+  load = make_polymorphic_load_function();
+  %OptimizeFunctionOnNextCall(load);
+  assertEquals(undefined, load(js_array, new Object()));
+  load = make_polymorphic_load_function();
+  %OptimizeFunctionOnNextCall(load);
+  assertEquals(undefined, load(object_array, new Object()));
+  load = make_polymorphic_load_function();
+  %OptimizeFunctionOnNextCall(load);
+  assertEquals(undefined, load(sparse_js_array, new Object()));
+  load = make_polymorphic_load_function();
+  %OptimizeFunctionOnNextCall(load);
+  assertEquals(undefined, load(sparse_object_array, new Object()));
+}
+
+function testPolymorphicStores() {
+  function make_polymorphic_store_function() {
+    function store(a, i, val) {
+      a[i] = val;
+    }
+
+    var object_array = new Object;
+    var sparse_object_array = new Object;
+    var js_array = new Array(10);
+    var sparse_js_array = new Array(5000001);
+
+    init_array(object_array);
+    init_array(js_array);
+    init_sparse_array(sparse_object_array);
+    init_sparse_array(sparse_js_array);
+
+    store(object_array, 1, 256);
+    store(js_array, 1, 256);
+    store(sparse_object_array, 1, 256);
+    store(sparse_js_array, 1, 256);
+
+    return store;
+  }
+
+  var object_array = new Object;
+  var sparse_object_array = new Object;
+  var js_array = new Array(10);
+  var sparse_js_array = new Array(5000001);
+
+  init_array(object_array);
+  init_array(js_array);
+  init_sparse_array(sparse_object_array);
+  init_sparse_array(sparse_js_array);
+
+  store = make_polymorphic_store_function();
+  store(object_array, 2, 257);
+  store = make_polymorphic_store_function();
+  store(js_array, 2, 257);
+  store = make_polymorphic_store_function();
+  store(sparse_object_array, 2, 257);
+  store = make_polymorphic_store_function();
+  store(sparse_js_array, 2, 257);
+
+  assertEquals(257, object_array[2]);
+  assertEquals(257, js_array[2]);
+  assertEquals(257, sparse_js_array[2]);
+  assertEquals(257, sparse_object_array[2]);
+
+  // Now try Crankshaft optimized polymorphic stores
+  store = make_polymorphic_store_function();
+  %OptimizeFunctionOnNextCall(store);
+  store(object_array, 3, 258);
+  store = make_polymorphic_store_function();
+  %OptimizeFunctionOnNextCall(store);
+  store(js_array, 3, 258);
+  store = make_polymorphic_store_function();
+  %OptimizeFunctionOnNextCall(store);
+  store(sparse_object_array, 3, 258);
+  store = make_polymorphic_store_function();
+  %OptimizeFunctionOnNextCall(store);
+  store(sparse_js_array, 3, 258);
+
+  assertEquals(258, object_array[3]);
+  assertEquals(258, js_array[3]);
+  assertEquals(258, sparse_js_array[3]);
+  assertEquals(258, sparse_object_array[3]);
+}
+
+testPolymorphicLoads();
+testPolymorphicStores();
diff --git a/test/mjsunit/property-load-across-eval.js b/test/mjsunit/property-load-across-eval.js
index 5419cc7..98b621e 100644
--- a/test/mjsunit/property-load-across-eval.js
+++ b/test/mjsunit/property-load-across-eval.js
@@ -45,19 +45,21 @@
     assertEquals(2, y);
     assertEquals('global', global_function());
     assertEquals('local', local_function());
+    var exception = false;
     try {
       const_uninitialized();
-      assertUnreachable();
     } catch(e) {
-      // Ignore.
+      exception = true;
     }
+    assertTrue(exception);
     assertEquals('const_global', const_initialized());
+    exception = false;
     try {
       local_const_uninitialized();
-      assertUnreachable();
     } catch(e) {
-      // Ignore.
+      exception = true;
     }
+    assertTrue(exception);
     assertEquals('const_local', local_const_initialized());
     function g() {
       assertEquals(1, x);
@@ -65,19 +67,21 @@
       assertEquals(2, y);
       assertEquals('global', global_function());
       assertEquals('local', local_function());
+      var exception = false;
       try {
         const_uninitialized();
-        assertUnreachable();
       } catch(e) {
-        // Ignore.
+        exception = true;
       }
+      assertTrue(exception);
       assertEquals('const_global', const_initialized());
+      exception = false;
       try {
         local_const_uninitialized();
-        assertUnreachable();
       } catch(e) {
-        // Ignore.
+        exception = true;
       }
+      assertTrue(exception);
       assertEquals('const_local', local_const_initialized());
     }
     g();
diff --git a/test/mjsunit/regexp-call-as-function.js b/test/mjsunit/regexp-call-as-function.js
index 4cbe7f9..add81ac 100644
--- a/test/mjsunit/regexp-call-as-function.js
+++ b/test/mjsunit/regexp-call-as-function.js
@@ -33,4 +33,4 @@
 var subject = "xyzabcde";
 var expected = 'abc,b,c';
 assertEquals(expected, String(regexp.exec(subject)));
-assertEquals(expected, String(regexp(subject)));
+assertThrows(function(){ regexp(subject); });
diff --git a/test/mjsunit/regexp-capture.js b/test/mjsunit/regexp-capture.js
index dc24491..8aae717 100755
--- a/test/mjsunit/regexp-capture.js
+++ b/test/mjsunit/regexp-capture.js
@@ -39,19 +39,20 @@
 assertEquals("z", "y".replace(/(x)?\1y/, "z"));
 assertEquals("", "y".replace(/(x)?y/, "$1"));
 assertEquals("undefined", "y".replace(/(x)?\1y/,
-    function($0, $1){ 
-        return String($1); 
+    function($0, $1){
+        return String($1);
     }));
-assertEquals("undefined", "y".replace(/(x)?y/, 
-    function($0, $1){ 
-        return String($1); 
+assertEquals("undefined", "y".replace(/(x)?y/,
+    function($0, $1){
+        return String($1);
     }));
-assertEquals("undefined", "y".replace(/(x)?y/, 
-    function($0, $1){ 
-        return $1; 
+assertEquals("undefined", "y".replace(/(x)?y/,
+    function($0, $1){
+        return $1;
     }));
 
 // See https://bugzilla.mozilla.org/show_bug.cgi?id=476146
-assertEquals("bbc,b", /^(b+|a){1,2}?bc/.exec("bbc"));
-assertEquals("bbaa,a,,a", /((\3|b)\2(a)){2,}/.exec("bbaababbabaaaaabbaaaabba"));
+assertEquals(["bbc", "b"], /^(b+|a){1,2}?bc/.exec("bbc"));
+assertEquals(["bbaa", "a", "", "a"],
+             /((\3|b)\2(a)){2,}/.exec("bbaababbabaaaaabbaaaabba"));
 
diff --git a/test/mjsunit/regexp-compile.js b/test/mjsunit/regexp-compile.js
index 6f8e751..6a24325 100644
--- a/test/mjsunit/regexp-compile.js
+++ b/test/mjsunit/regexp-compile.js
@@ -27,16 +27,16 @@
 
 // Test that we don't cache the result of a regexp match across a
 // compile event.
-var re = /x/; 
+var re = /x/;
 assertEquals("a.yb", "axyb".replace(re, "."));
 
-re.compile("y")
+re.compile("y");
 assertEquals("ax.b", "axyb".replace(re, "."));
 
 re.compile("(x)");
 
-assertEquals("x,x", re.exec("axyb"));
+assertEquals(["x", "x"], re.exec("axyb"));
 
 re.compile("(y)");
 
-assertEquals("y,y", re.exec("axyb"));
+assertEquals(["y", "y"], re.exec("axyb"));
diff --git a/test/mjsunit/regexp-static.js b/test/mjsunit/regexp-static.js
index 9e73f3d..0f84968 100644
--- a/test/mjsunit/regexp-static.js
+++ b/test/mjsunit/regexp-static.js
@@ -134,7 +134,8 @@
 assertEquals('abcd', 'abcd'.replace(re, f));
 
 // lastParen where the last parenthesis didn't match.
-assertEquals("foo,", /foo(?:a(x))?/.exec("foobx"), "lastParen setup");
+assertEquals(["foo",undefined], /foo(?:a(x))?/.exec("foobx"),
+             "lastParen setup");
 assertEquals("", RegExp.lastParen, "lastParen");
 
 // The same test for $1 to $9.
diff --git a/test/mjsunit/regexp-string-methods.js b/test/mjsunit/regexp-string-methods.js
index ef3bf6e..56604a6 100644
--- a/test/mjsunit/regexp-string-methods.js
+++ b/test/mjsunit/regexp-string-methods.js
@@ -28,18 +28,18 @@
 // Regexp shouldn't use String.prototype.slice()
 var s = new String("foo");
 assertEquals("f", s.slice(0,1));
-String.prototype.slice = function() { return "x"; }
+String.prototype.slice = function() { return "x"; };
 assertEquals("x", s.slice(0,1));
-assertEquals("g", /g/.exec("gg"));
+assertEquals(["g"], /g/.exec("gg"));
 
 // Regexp shouldn't use String.prototype.charAt()
 var f1 = new RegExp("f", "i");
-assertEquals("F", f1.exec("F"));
+assertEquals(["F"], f1.exec("F"));
 assertEquals("f", "foo".charAt(0));
 String.prototype.charAt = function(idx) { return 'g'; };
 assertEquals("g", "foo".charAt(0));
 var f2 = new RegExp("[g]", "i");
-assertEquals("G", f2.exec("G"));
+assertEquals(["G"], f2.exec("G"));
 assertTrue(f2.ignoreCase);
 
 // On the other hand test is defined in a semi-coherent way as a call to exec.
@@ -47,5 +47,5 @@
 // We match other browsers in using the original value of RegExp.prototype.exec.
 // I.e., RegExp.prototype.test shouldn't use the current value of
 // RegExp.prototype.exec.
-RegExp.prototype.exec = function(string) { return 'x'; }
+RegExp.prototype.exec = function(string) { return 'x'; };
 assertFalse(/f/.test('x'));
diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js
index 24e1b21..3c4f883 100644
--- a/test/mjsunit/regexp.js
+++ b/test/mjsunit/regexp.js
@@ -333,9 +333,9 @@
 
 // Check decimal escapes doesn't overflow.
 // (Note: \214 is interpreted as octal).
-assertEquals(/\2147483648/.exec("\x8c7483648"),
-             ["\x8c7483648"],
-             "Overflow decimal escape");
+assertArrayEquals(["\x8c7483648"],
+                  /\2147483648/.exec("\x8c7483648"),
+                  "Overflow decimal escape");
 
 
 // Check numbers in quantifiers doesn't overflow and doesn't throw on
@@ -435,8 +435,8 @@
 re.lastIndex = 42;
 re.someOtherProperty = 42;
 re.someDeletableProperty = 42;
-re[37] = 37;  
-re[42] = 42;  
+re[37] = 37;
+re[42] = 42;
 
 re.compile("ra+", "i");
 assertEquals("ra+", re.source);
@@ -466,7 +466,7 @@
 assertEquals(37, re[42]);
 
 // Test boundary-checks.
-function assertRegExpTest(re, input, test) { 
+function assertRegExpTest(re, input, test) {
   assertEquals(test, re.test(input), "test:" + re + ":" + input);
 }
 
@@ -525,7 +525,7 @@
   assertEquals(1, res.index);
   assertEquals("axyzb", res.input);
   assertEquals(undefined, res.foobar);
-  
+
   res.foobar = "Arglebargle";
   res[3] = "Glopglyf";
   assertEquals("Arglebargle", res.foobar);
@@ -534,18 +534,18 @@
 // Test that we perform the spec required conversions in the correct order.
 var log;
 var string = "the string";
-var fakeLastIndex = { 
-      valueOf: function() { 
+var fakeLastIndex = {
+      valueOf: function() {
         log.push("li");
         return 0;
-      } 
+      }
     };
-var fakeString = { 
+var fakeString = {
       toString: function() {
         log.push("ts");
         return string;
-      }, 
-      length: 0 
+      },
+      length: 0
     };
 
 var re = /str/;
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-100409.js
similarity index 79%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-100409.js
index aa93b25..c29250f 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-100409.js
@@ -25,12 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function outer () {
+  var val = 0;
+
+  function foo () {
+    val = 0;
+    val;
+    var z = false;
+    var y = true;
+    if (!z) {
+      while (z = !z) {
+        if (y) val++;
+      }
+    }
+    return val++;
+  }
+
+  return foo;
 }
 
-test();
+
+var foo = outer();
+
+assertEquals(1, foo());
+assertEquals(1, foo());
+    %OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo());
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-103259.js
similarity index 87%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-103259.js
index aa93b25..447073c 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-103259.js
@@ -25,12 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+var a = [];
+a[8192] = '';
+assertTrue(%HasDictionaryElements(a));
+var uc16 = '\u0094';
+var test = uc16;
+for (var i = 0; i < 13; i++) test += test;
+assertEquals(test, a.join(uc16));
diff --git a/test/mjsunit/regress/regress-1081309.js b/test/mjsunit/regress/regress-1081309.js
index 009ede1..5a6c524 100644
--- a/test/mjsunit/regress/regress-1081309.js
+++ b/test/mjsunit/regress/regress-1081309.js
@@ -67,7 +67,7 @@
     // The expected backtrace is
     // 1: g
     // 0: [anonymous]
-    
+
     // Get the debug command processor.
     var dcp = exec_state.debugCommandProcessor(false);
 
diff --git a/test/mjsunit/regress/regress-1092.js b/test/mjsunit/regress/regress-1092.js
index 0b29231..00422cb 100644
--- a/test/mjsunit/regress/regress-1092.js
+++ b/test/mjsunit/regress/regress-1092.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that CodeGenerator::EmitKeyedPropertyAssignment for the start 
+// Test that CodeGenerator::EmitKeyedPropertyAssignment for the start
 // of an initialization block doesn't normalize the properties of the
 // JSGlobalProxy.
 this.w = 0;
diff --git a/test/mjsunit/regress/regress-1099.js b/test/mjsunit/regress/regress-1099.js
index 0ed6ede..36f09e4 100644
--- a/test/mjsunit/regress/regress-1099.js
+++ b/test/mjsunit/regress/regress-1099.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Test that LApplyArguments lithium instruction restores context after the call.
 
 function X() {
@@ -41,6 +43,9 @@
 
 var y = Y(X());
 
-for (var i = 0; i < 1000000; i++) {
+for (var i = 0; i < 5; i++) {
   assertTrue(y("foo"));
 }
+
+%OptimizeFunctionOnNextCall(y);
+assertTrue(y("foo"));
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-1110.js b/test/mjsunit/regress/regress-1110.js
index 204a87b..43b8d77 100644
--- a/test/mjsunit/regress/regress-1110.js
+++ b/test/mjsunit/regress/regress-1110.js
@@ -29,7 +29,7 @@
 
 try {
   function Crash() { continue;if (Crash) {
-    } } 
+    } }
   Crash();
   assertTrue(false);
 } catch (e) {
diff --git a/test/mjsunit/regress/regress-1118.js b/test/mjsunit/regress/regress-1118.js
index 84f96e4..7e0461d 100644
--- a/test/mjsunit/regress/regress-1118.js
+++ b/test/mjsunit/regress/regress-1118.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // An exception thrown in a function optimized by on-stack replacement (OSR)
 // should be able to construct a receiver from all optimized stack frames.
 
@@ -39,10 +41,22 @@
 // inlined.
 function g() { try { return o.f(); } finally { }}
 
+// Optimization status (see runtime.cc):
+// 1 - yes, 2 - no, 3 - always, 4 - never.
+
 // This function should be optimized via OSR.
 function h() {
-  while(false) ;
-  for (var j = 0; j < 5000000; j++) g();
+  var optstatus = %GetOptimizationStatus(h);
+  if (optstatus == 4) {
+    // Optimizations are globally disabled; just run once.
+    g();
+  } else {
+    // Run for a bit as long as h is unoptimized.
+    while (%GetOptimizationStatus(h) == 2) {
+      for (var j = 0; j < 100; j++) g();
+    }
+    g();
+  }
 }
 
 h();
diff --git a/test/mjsunit/regress/regress-1119.js b/test/mjsunit/regress/regress-1119.js
index 484893c..16b2e4f 100644
--- a/test/mjsunit/regress/regress-1119.js
+++ b/test/mjsunit/regress/regress-1119.js
@@ -38,7 +38,6 @@
 var exception;
 try {
   eval("try { } catch (e) { var y = false; }");
-  assertUnreachable();
 } catch (e) {
   exception = e;
 }
diff --git a/test/mjsunit/regress/regress-1130.js b/test/mjsunit/regress/regress-1130.js
index 188f3f9..07d5e3d 100644
--- a/test/mjsunit/regress/regress-1130.js
+++ b/test/mjsunit/regress/regress-1130.js
@@ -30,9 +30,11 @@
 
 Object.prototype.__defineGetter__(0, function() { throw 42; } );
 
+var exception = false;
 try {
   eval("(function() { const x; var x })")();
-  assertUnreachable();
 } catch (e) {
+  exception = true;
   assertTrue(e instanceof TypeError);
 }
+assertTrue(exception);
diff --git a/test/mjsunit/regress/regress-1132.js b/test/mjsunit/regress/regress-1132.js
index 4423ecd..3314db8 100644
--- a/test/mjsunit/regress/regress-1132.js
+++ b/test/mjsunit/regress/regress-1132.js
@@ -41,8 +41,10 @@
   }
 }
 
+var exception = false;
 try {
   test();
-  assertUnreachable();
 } catch (e) {
+  exception = true;
 }
+assertTrue(exception);
diff --git a/test/mjsunit/regress/regress-1160.js b/test/mjsunit/regress/regress-1160.js
index 8e6e29b..10d3bc3 100644
--- a/test/mjsunit/regress/regress-1160.js
+++ b/test/mjsunit/regress/regress-1160.js
@@ -32,15 +32,10 @@
 // when keyed store on the array does not work as expected because of
 // the setter on its prototype.
 
-try {
-  var N = 100;
-  var array = Array(N);
-  for (var i = 0; i < N; ++i) {
-    array[i] = i;
-  }
-  Array.prototype.__defineSetter__(32, function() { });
-  // The next line throws. We should make it work even with changed
-  // prototype. See http://code.google.com/p/v8/issues/detail?id=1161
-  array.join(",");
-  assertUnreachable();
-} catch (e) { }
+var N = 10;
+var array = Array(N);
+for (var i = 0; i < N; ++i) {
+  array[i] = i;
+}
+Array.prototype.__defineSetter__(2, function() { });
+assertEquals("0,1,2,3,4,5,6,7,8,9", array.join(","));
diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js
index 8a5a9cf..95684c5 100644
--- a/test/mjsunit/regress/regress-1170.js
+++ b/test/mjsunit/regress/regress-1170.js
@@ -30,37 +30,43 @@
 __proto__.__defineSetter__("a", function(v) { setter_value = v; });
 eval("var a = 1");
 assertEquals(1, setter_value);
-assertFalse(hasOwnProperty("a"));
+assertFalse(this.hasOwnProperty("a"));
 
 eval("with({}) { eval('var a = 2') }");
 assertEquals(2, setter_value);
-assertFalse(hasOwnProperty("a"));
+assertFalse(this.hasOwnProperty("a"));
 
 // Function declarations are treated specially to match Safari. We do
 // not call setters for them.
 eval("function a() {}");
-assertTrue(hasOwnProperty("a"));
+assertTrue(this.hasOwnProperty("a"));
 
-__proto__.__defineSetter__("b", function(v) {   assertUnreachable(); });
+__proto__.__defineSetter__("b", function(v) { assertUnreachable(); });
+var exception = false;
 try {
   eval("const b = 23");
-  assertUnreachable();
 } catch(e) {
+  exception = true;
   assertTrue(/TypeError/.test(e));
 }
+assertTrue(exception);
+
+exception = false;
 try {
   eval("with({}) { eval('const b = 23') }");
-  assertUnreachable();
 } catch(e) {
+  exception = true;
   assertTrue(/TypeError/.test(e));
 }
+assertTrue(exception);
 
 __proto__.__defineSetter__("c", function(v) { throw 42; });
+exception = false;
 try {
   eval("var c = 1");
-  assertUnreachable();
 } catch(e) {
+  exception = true;
   assertEquals(42, e);
-  assertFalse(hasOwnProperty("c"));
+  assertFalse(this.hasOwnProperty("c"));
 }
-
+assertTrue(exception);
diff --git a/test/mjsunit/regress/regress-1172-bis.js b/test/mjsunit/regress/regress-1172-bis.js
index e8d5c81..01b637c 100644
--- a/test/mjsunit/regress/regress-1172-bis.js
+++ b/test/mjsunit/regress/regress-1172-bis.js
@@ -29,9 +29,11 @@
 // are properly treated.
 
 Object.prototype.__defineGetter__(0, function() { throw 42; });
+var exception = false;
 try {
   Object[0]();
-  assertUnreachable();
 } catch(e) {
+  exception = true;
   assertEquals(42, e);
 }
+assertTrue(exception);
diff --git a/test/mjsunit/regress/regress-1199637.js b/test/mjsunit/regress/regress-1199637.js
index d9116c1..9c560a9 100644
--- a/test/mjsunit/regress/regress-1199637.js
+++ b/test/mjsunit/regress/regress-1199637.js
@@ -34,43 +34,43 @@
 const READ_ONLY = 1;
 
 // Use DeclareGlobal...
-%SetProperty(this.__proto__, "a", "1234", NONE);
+%SetProperty(this.__proto__, "a", 1234, NONE);
 assertEquals(1234, a);
 eval("var a = 5678;");
 assertEquals(5678, a);
 
-%SetProperty(this.__proto__, "b", "1234", NONE);
+%SetProperty(this.__proto__, "b", 1234, NONE);
 assertEquals(1234, b);
 eval("const b = 5678;");
 assertEquals(5678, b);
 
-%SetProperty(this.__proto__, "c", "1234", READ_ONLY);
+%SetProperty(this.__proto__, "c", 1234, READ_ONLY);
 assertEquals(1234, c);
 eval("var c = 5678;");
 assertEquals(5678, c);
 
-%SetProperty(this.__proto__, "d", "1234", READ_ONLY);
+%SetProperty(this.__proto__, "d", 1234, READ_ONLY);
 assertEquals(1234, d);
 eval("const d = 5678;");
 assertEquals(5678, d);
 
 // Use DeclareContextSlot...
-%SetProperty(this.__proto__, "x", "1234", NONE);
+%SetProperty(this.__proto__, "x", 1234, NONE);
 assertEquals(1234, x);
 eval("with({}) { var x = 5678; }");
 assertEquals(5678, x);
 
-%SetProperty(this.__proto__, "y", "1234", NONE);
+%SetProperty(this.__proto__, "y", 1234, NONE);
 assertEquals(1234, y);
 eval("with({}) { const y = 5678; }");
 assertEquals(5678, y);
 
-%SetProperty(this.__proto__, "z", "1234", READ_ONLY);
+%SetProperty(this.__proto__, "z", 1234, READ_ONLY);
 assertEquals(1234, z);
 eval("with({}) { var z = 5678; }");
 assertEquals(5678, z);
 
-%SetProperty(this.__proto__, "w", "1234", READ_ONLY);
+%SetProperty(this.__proto__, "w", 1234, READ_ONLY);
 assertEquals(1234, w);
 eval("with({}) { const w = 5678; }");
 assertEquals(5678, w);
diff --git a/test/mjsunit/regress/regress-1210.js b/test/mjsunit/regress/regress-1210.js
index 7d4735a..43500e7 100644
--- a/test/mjsunit/regress/regress-1210.js
+++ b/test/mjsunit/regress/regress-1210.js
@@ -44,7 +44,7 @@
 }
 
 // Run enough to optimize assuming global 'a' is a smi.
-for (var i = 0; i < 1000000; ++i) test(0);
+for (var i = 0; i < 10; ++i) test(0);
 %OptimizeFunctionOnNextCall(test);
 test(0);
 
diff --git a/test/mjsunit/regress/regress-1213575.js b/test/mjsunit/regress/regress-1213575.js
index 0c3dcc2..9d82064 100644
--- a/test/mjsunit/regress/regress-1213575.js
+++ b/test/mjsunit/regress/regress-1213575.js
@@ -33,7 +33,7 @@
 
 var caught = false;
 try {
-  eval('const x'); 
+  eval('const x');
 } catch(e) {
   assertTrue(e instanceof TypeError);
   caught = true;
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1215.js
similarity index 81%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1215.js
index aa93b25..93a89f6 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1215.js
@@ -25,12 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Make sure that the "message" property on Error.prototype
+// has the following descriptor:
+// {writable: true, enumerable: false, and configurable: true}
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'message');
 
-test();
+assertEquals(desc.writable, true);
+assertEquals(desc.enumerable, false);
+assertEquals(desc.configurable, true);
diff --git a/test/mjsunit/regress/regress-124.js b/test/mjsunit/regress/regress-124.js
index 0b3aae5..1197467 100644
--- a/test/mjsunit/regress/regress-124.js
+++ b/test/mjsunit/regress/regress-124.js
@@ -26,26 +26,26 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 assertEquals("[object global]", this.toString());
-assertEquals("[object global]", toString());
+assertEquals("[object Undefined]", toString());
 
 assertEquals("[object global]", eval("this.toString()"));
-assertEquals("[object global]", eval("toString()"));
+assertEquals("[object Undefined]", eval("toString()"));
 
 assertEquals("[object global]", eval("var f; this.toString()"));
-assertEquals("[object global]", eval("var f; toString()"));
+assertEquals("[object Undefined]", eval("var f; toString()"));
 
 
 function F(f) {
   assertEquals("[object global]", this.toString());
-  assertEquals("[object global]", toString());
+  assertEquals("[object Undefined]", toString());
 
   assertEquals("[object global]", eval("this.toString()"));
-  assertEquals("[object global]", eval("toString()"));
+  assertEquals("[object Undefined]", eval("toString()"));
 
   assertEquals("[object global]", eval("var f; this.toString()"));
-  assertEquals("[object global]", eval("var f; toString()"));
+  assertEquals("[object Undefined]", eval("var f; toString()"));
 
-  assertEquals("[object global]", eval("f()"));
+  assertEquals("[object Undefined]", eval("f()"));
 
   // Receiver should be the arguments object here.
   assertEquals("[object Arguments]", eval("arguments[0]()"));
diff --git a/test/mjsunit/regress/regress-1246.js b/test/mjsunit/regress/regress-1246.js
new file mode 100644
index 0000000..f3dbec6
--- /dev/null
+++ b/test/mjsunit/regress/regress-1246.js
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression tests the behaviour of the parseInt function when
+// the given radix is not a SMI.
+
+// Flags: --allow-natives-syntax
+
+var nonSmi10 = Math.log(Math.exp(10));
+var nonSmi16 = Math.log(Math.exp(16));
+
+assertTrue(!%_IsSmi(nonSmi10) && nonSmi10 == 10);
+assertTrue(!%_IsSmi(nonSmi16) && nonSmi16 == 16);
+
+// Giving these values as the radix argument triggers radix detection.
+var radix_detect = [0, -0, NaN, Infinity, -Infinity, undefined, null,
+                    "0", "-0", "a"];
+
+// These values will result in an integer radix outside of the valid range.
+var radix_invalid = [1, 37, -2, "-2", "37"];
+
+// These values will trigger decimal parsing.
+var radix10 = [10, 10.1, "10", "10.1", nonSmi10];
+
+// These values will trigger hexadecimal parsing.
+var radix16 = [16, 16.1, 0x10, "0X10", nonSmi16];
+
+for (var i = 0; i < radix_detect.length; i++) {
+  var radix = radix_detect[i];
+  assertEquals(NaN, parseInt("", radix));
+  assertEquals(23, parseInt("23", radix));
+  assertEquals(0xaf, parseInt("0xaf", radix));
+  assertEquals(NaN, parseInt("af", radix));
+}
+
+for (var i = 0; i < radix_invalid.length; i++) {
+  var radix = radix_invalid[i];
+  assertEquals(NaN, parseInt("", radix));
+  assertEquals(NaN, parseInt("23", radix));
+  assertEquals(NaN, parseInt("0xaf", radix));
+  assertEquals(NaN, parseInt("af", radix));
+}
+
+for (var i = 0; i < radix10.length; i++) {
+  var radix = radix10[i];
+  assertEquals(NaN, parseInt("", radix));
+  assertEquals(23, parseInt("23", radix));
+  assertEquals(0, parseInt("0xaf", radix));
+  assertEquals(NaN, parseInt("af", radix));
+}
+
+for (var i = 0; i < radix16.length; i++) {
+  var radix = radix16[i];
+  assertEquals(NaN, parseInt("", radix));
+  assertEquals(0x23, parseInt("23", radix));
+  assertEquals(0xaf, parseInt("0xaf", radix));
+  assertEquals(0xaf, parseInt("af", radix));
+}
+
diff --git a/test/mjsunit/regress/regress-1327557.js b/test/mjsunit/regress/regress-1327557.js
index bdf4277..cd8f08f 100644
--- a/test/mjsunit/regress/regress-1327557.js
+++ b/test/mjsunit/regress/regress-1327557.js
@@ -28,9 +28,11 @@
 var x = { valueOf: function() { throw "x"; } };
 var y = { valueOf: function() { throw "y"; } };
 
+var exception = false;
 try {
   x * -y;
-  assertUnreachable("Didn't throw an exception");
 } catch (e) {
+  exception = true;
   assertEquals("y", e);
 }
+assertTrue(exception);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1337.js
similarity index 83%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1337.js
index aa93b25..ebcf84b 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1337.js
@@ -25,12 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test that the throw is not inlined if object literals cannot be
+// inlined.
+function bar() { throw {}; }
+
+function foo() { bar(); }
+
+for (var i = 0; i < 5; ++i) {
+    try { foo() } catch (e) { }
 }
-
-test();
+%OptimizeFunctionOnNextCall(foo)
+try { foo() } catch (e) { }
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1351.js
similarity index 85%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1351.js
index aa93b25..656b19f 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1351.js
@@ -25,12 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test that the arguments value is does not escape when it appears as
+// an intermediate value in an expression.
+
+function h() { }
+
+function f() {
+  var a = null;
+  h(a = arguments);
 }
 
-test();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1355.js
similarity index 80%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1355.js
index aa93b25..de9364a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1355.js
@@ -25,12 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that an exception is not thrown when trying to set a value for
+// a property that has only a defined getter, except when in strict mode.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var foo = Object.defineProperty({}, "bar", {
+ get: function () {
+      return 10;
+    }
+  });
+
+assertDoesNotThrow("foo.bar = 20");
+
+function shouldThrow() {
+  'use strict';
+  foo.bar = 20;
 }
 
-test();
+assertThrows("shouldThrow()");
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1360.js
similarity index 81%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1360.js
index aa93b25..bebad28 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1360.js
@@ -25,12 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Check the receiver for the sort and replace functions to
+// Array.prototype.sort and String.prototype.replace.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+var global = this;
+function strict() { "use strict"; assertEquals(void 0, this); }
+function non_strict() { assertEquals(global, this); }
 
-test();
+[1,2,3].sort(strict);
+[1,2,3].sort(non_strict);
+
+"axc".replace("x", strict);
+"axc".replace("x", non_strict);
diff --git a/test/mjsunit/regress/regress-1365.js b/test/mjsunit/regress/regress-1365.js
new file mode 100644
index 0000000..59290f9
--- /dev/null
+++ b/test/mjsunit/regress/regress-1365.js
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=1365
+
+// Check that builtin methods are passed undefined as the receiver
+// when called as functions through variables.
+
+// Flags: --allow-natives-syntax
+
+// Global variable.
+var valueOf = Object.prototype.valueOf;
+var hasOwnProperty = Object.prototype.hasOwnProperty;
+
+function callGlobalValueOf() { valueOf(); }
+function callGlobalHasOwnProperty() { valueOf(); }
+
+assertEquals(Object.prototype, Object.prototype.valueOf());
+assertThrows(callGlobalValueOf);
+assertThrows(callGlobalHasOwnProperty);
+
+%OptimizeFunctionOnNextCall(Object.prototype.valueOf);
+Object.prototype.valueOf();
+
+assertEquals(Object.prototype, Object.prototype.valueOf());
+assertThrows(callGlobalValueOf);
+assertThrows(callGlobalHasOwnProperty);
+
+function CheckExceptionCallLocal() {
+  var valueOf = Object.prototype.valueOf;
+  var hasOwnProperty = Object.prototype.hasOwnProperty;
+  var exception = false;
+  try { valueOf(); } catch(e) { exception = true; }
+  assertTrue(exception);
+  exception = false;
+  try { hasOwnProperty(); } catch(e) { exception = true; }
+  assertTrue(exception);
+}
+CheckExceptionCallLocal();
+
+function CheckExceptionCallParameter(f) {
+  var exception = false;
+  try { f(); } catch(e) { exception = true; }
+  assertTrue(exception);
+}
+CheckExceptionCallParameter(Object.prototype.valueOf);
+CheckExceptionCallParameter(Object.prototype.hasOwnProperty);
+
+function CheckPotentiallyShadowedByEval() {
+  var exception = false;
+  try {
+    eval("hasOwnProperty('x')");
+  } catch(e) {
+    exception = true;
+  }
+  assertTrue(exception);
+}
+CheckPotentiallyShadowedByEval();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1369.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1369.js
index aa93b25..48a8e7e 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1369.js
@@ -25,12 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-gc
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+assertDoesNotThrow('gc.call(1)');
+assertDoesNotThrow('gc.call("asdf")');
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1383.js
similarity index 84%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1383.js
index aa93b25..4b08f5a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1383.js
@@ -25,12 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that passing the strict_mode flag in extra ic state from
+// StubCache::ComputeKeyedLoadOrStoreExternalArray does not
+// hit an assertion in Code::ComputeFlags.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+x="";
+function foo(){
+  "use strict";
+  var wxemsx=(4);
+  var wxemsx_0=Float32Array(wxemsx);
+  wxemsx_0[0]={};
 }
 
-test();
+foo()
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1387.js
similarity index 82%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1387.js
index aa93b25..d171d38 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1387.js
@@ -25,12 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Tests that we always return the same type error function when trying to
+// access strict mode caller and callee.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function foo() {
+  'use strict';
+  return arguments;
 }
 
-test();
+var get1 = Object.getOwnPropertyDescriptor(foo(), "caller").get;
+var get2 = Object.getOwnPropertyDescriptor(foo(), "callee").get;
+assertEquals(get1, get2);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1389.js
similarity index 83%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1389.js
index aa93b25..9b89bbf 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1389.js
@@ -25,12 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test optimized implementation of postfix ++ on undefined input.
+// See http://code.google.com/p/v8/issues/detail?id=1389
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+for (var i=0; i<4; i++) {
+  (function () {
+    (function () {
+      (function () {
+        var x;
+        y = x++;
+      })();
+    })();
+  })();
 }
 
-test();
+assertEquals(NaN, y);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1401.js
similarity index 83%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1401.js
index aa93b25..33eb067 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1401.js
@@ -25,12 +25,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// See: http://code.google.com/p/v8/issues/detail?id=1401
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var bottom = 0;
+var sizes = new Array();
+
+for (i = 0; i < 10; i++) {
+  sizes[i] = 0;
 }
 
-test();
+function foo() {
+  var size = bottom + 1 + 10;
+  var t =  (sizes[++bottom] = size);
+  return t;
+}
+
+for (i = 0; i < 5; i++) {
+  assertEquals(i + 11, foo());
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1412.js
similarity index 65%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1412.js
index aa93b25..b043f19 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1412.js
@@ -25,12 +25,35 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that the apply with arguments optimization passes values
+// unchanged to strict-mode functions and builtins.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Flags: --allow-natives-syntax
+
+function strict() { "use strict"; return this; }
+
+function test_strict() {
+  assertEquals(void 0, strict.apply(undefined, arguments));
+  assertEquals(42, strict.apply(42, arguments));
+  assertEquals("asdf", strict.apply("asdf", arguments));
 }
 
-test();
+for (var i = 0; i < 10; i++) test_strict();
+%OptimizeFunctionOnNextCall(test_strict);
+test_strict();
+
+function test_builtin(receiver) {
+  Object.prototype.valueOf.apply(receiver, arguments);
+}
+
+for (var i = 0; i < 10; i++) test_builtin(this);
+%OptimizeFunctionOnNextCall(test_builtin);
+test_builtin(this);
+
+var exception = false;
+try {
+  test_builtin(undefined);
+} catch(e) {
+  exception = true;
+}
+assertTrue(exception);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1419.js
similarity index 76%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1419.js
index aa93b25..98a8b76 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1419.js
@@ -25,12 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that using bind several time does not change the length of existing
+// bound functions.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function foo() {
 }
 
-test();
+var f1 = function (x) {}.bind(foo);
+var f2 = function () {};
+
+assertEquals(1, f1.length);
+
+// the object we bind to can be any object
+f2.bind(foo);
+
+assertEquals(1, f1.length);
+
+var desc = Object.getOwnPropertyDescriptor(f1, 'length');
+assertEquals(false, desc.writable);
+assertEquals(false, desc.enumerable);
+assertEquals(false, desc.configurable);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1423.js
similarity index 67%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1423.js
index aa93b25..b0d0ca3 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1423.js
@@ -25,12 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test that the Lithium environment iterator does stop iteration early.
+"use strict";
+
+function f0() {
+  return f1('literal', true);
 }
 
-test();
+function f1(x, y) {
+  return f2(x, y);
+}
+
+// Because it's strict, f2 has an environment containing only the constants
+// undefined, 'literal', and false.  Bug 1423 would cause environment
+// iteration to stop early.
+//
+// Bug manifests as UNREACHABLE code (due to an unallocated register) in
+// debug builds.
+function f2(x, y) {
+  if (y) {
+    if (f3(x, 'other-literal')) {
+      return 0;
+    } else {
+      return 1;
+    }
+  } else {
+    return 2;
+  }
+}
+
+function f3(x, y) {
+  return x === y;
+}
+
+for (var i = 0; i < 5; ++i) f0();
+%OptimizeFunctionOnNextCall(f0);
+assertEquals(1, f0());
diff --git a/test/mjsunit/regress/regress-1436.js b/test/mjsunit/regress/regress-1436.js
new file mode 100644
index 0000000..390ccee
--- /dev/null
+++ b/test/mjsunit/regress/regress-1436.js
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that reduce and reduceRight call the callback function with
+// undefined as the receiver (which for non-strict functions is
+// transformed to the global object).
+
+// Check receiver for reduce and reduceRight.
+
+var global = this;
+function non_strict(){ assertEquals(global, this); }
+function strict(){ "use strict"; assertEquals(void 0, this); }
+function strict_null(){ "use strict"; assertEquals(null, this); }
+
+[2, 3].reduce(non_strict);
+[2, 3].reduce(strict);
+[2, 3].reduceRight(non_strict);
+[2, 3].reduceRight(strict);
+
+
+// Check the receiver for callbacks in other array methods.
+[2, 3].every(non_strict);
+[2, 3].every(non_strict, undefined);
+[2, 3].every(non_strict, null);
+[2, 3].every(strict);
+[2, 3].every(strict, undefined);
+[2, 3].every(strict_null, null);
+
+[2, 3].filter(non_strict);
+[2, 3].filter(non_strict, undefined);
+[2, 3].filter(non_strict, null);
+[2, 3].filter(strict);
+[2, 3].filter(strict, undefined);
+[2, 3].filter(strict_null, null);
+
+[2, 3].forEach(non_strict);
+[2, 3].forEach(non_strict, undefined);
+[2, 3].forEach(non_strict, null);
+[2, 3].forEach(strict);
+[2, 3].forEach(strict, undefined);
+[2, 3].forEach(strict_null, null);
+
+[2, 3].map(non_strict);
+[2, 3].map(non_strict, undefined);
+[2, 3].map(non_strict, null);
+[2, 3].map(strict);
+[2, 3].map(strict, undefined);
+[2, 3].map(strict_null, null);
+
+[2, 3].some(non_strict);
+[2, 3].some(non_strict, undefined);
+[2, 3].some(non_strict, null);
+[2, 3].some(strict);
+[2, 3].some(strict, undefined);
+[2, 3].some(strict_null, null);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1447.js
similarity index 82%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1447.js
index aa93b25..2c1ee59 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1447.js
@@ -25,12 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+[0].forEach(function(){ Object.freeze(Array.prototype.forEach); });
+[0].every(function(){ Object.seal(Array.prototype.every); });
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function testStrict(){
+  "use strict";
+  [0].forEach(function(){ Object.freeze(Array.prototype.forEach); });
+  [0].every(function(){ Object.seal(Array.prototype.every); });
 }
 
-test();
+testStrict();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1472.js
similarity index 66%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1472.js
index aa93b25..b2a30d2 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1472.js
@@ -25,12 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Avoid excessive expansions of regexp repetitions inside regexp repetitions.
+// Some of these caused stack overflows, others cause out-of-memory.
+var r1 = /(?:a(?:b(?:c(?:d(?:e(?:f(?:g(?:h(?:i(?:j(?:k(?:l(?:m(?:n(?:o(?:p(?:q(?:r(?:s(?:t(?:u(?:v(?:w(?:x(?:y(?:z(?:FooBar)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)+)/;
+"xxx".match(r1);
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+var r2 = /(?:a(?:b(?:c(?:d(?:e(?:f(?:g(?:h(?:i(?:j(?:k(?:l(?:FooBar){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}){0,2}/;
+"xxx".match(r2);
 
-test();
+var r3 = /(?:a(?:b(?:c(?:d(?:e(?:f(?:g(?:h(?:i(?:j(?:k(?:l(?:FooBar){2}){2}){2}){2}){2}){2}){2}){2}){2}){2}){2}){2}){2}/;
+"xxx".match(r3);
+
+var r4 = /(?:a(?:b(?:c(?:d(?:e(?:f(?:g(?:h(?:i(?:FooBar){3,6}){3,6}){3,6}){3,6}){3,6}){3,6}){3,6}){3,6}){3,6}){3,6}/;
+"xxx".match(r4);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1476.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1476.js
index aa93b25..1277e7f 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1476.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+function foo (i) { return (i % 2) | 0; }
 
-test();
+assertEquals (-1, foo(-1));
+assertEquals (-1, foo(-1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals (-1, foo(-1));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1513.js
similarity index 78%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1513.js
index aa93b25..06c5edf 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1513.js
@@ -25,12 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Deleting a mapped arguments property and adding it via
+// Object.defineProperty should not crash.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function testcase() {
+  return (function (a, b, c) {
+      delete arguments[0];
+      Object.defineProperty(arguments, "0", {
+              value: 10,
+              writable: false,
+              enumerable: false,
+              configurable: false
+            });
+      assertEquals(10, arguments[0]);
+    }(0, 1, 2));
 }
 
-test();
+testcase();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1521.js
similarity index 78%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1521.js
index aa93b25..3149f05 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1521.js
@@ -24,13 +24,25 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Optimized variable access inside through a catch context should work.
+function test(x) {
+  try {
+    throw new Error();
+  } catch (e) {
+    var y = {f: 1};
+    var f = function () {
+      var z = y;
+      var g = function () {
+        if (y.f === z.f) return x;
+      };
+      %OptimizeFunctionOnNextCall(g);
+      return g;
+    }
+    assertEquals(3, f()());
+  }
 }
 
-test();
+test(3);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1528.js
similarity index 79%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1528.js
index aa93b25..2eb6be1 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1528.js
@@ -25,12 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// With (or catch) scopes nested inside catch scopes should look at the
+// first outer non-catch scope to decide which closure to use when
+// allocating the new context.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Code below should not assert or crash.
+try {
+  fail;
+} catch (e) {
+  with({}) {  // With scope inside catch scope.
+    // Dynamic declaration forces runtime lookup to observe the context chain.
+    eval('const x = 7');
+  }
 }
-
-test();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1529.js
similarity index 88%
rename from test/mjsunit/override-eval-with-non-function.js
rename to test/mjsunit/regress/regress-1529.js
index aa93b25..212d7b3 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1529.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// See: http://code.google.com/p/v8/issues/detail?id=1529
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+try {
+  Error.prepareStackTrace = function (error, stackTrace) {
+    stackTrace.some();
+  };
+  x;
+} catch (e) {}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1531.js
similarity index 70%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1531.js
index aa93b25..09e61a6 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1531.js
@@ -25,12 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Regression test for computing elements keys of arguments object.  Should
+// not crash or assert.
+function test(x) {
+  arguments[10] = 0;
+  var arr = [];
+  for (var p in arguments) arr.push(p);
+  return arr;
+}
+assertEquals(["0", "10"], test(0));
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Regression test for lookup after delete of a dictionary-mode arguments
+// backing store.  Should not crash or assert.
+function test1(x, y, z) {
+  // Put into dictionary mode.
+  arguments.__defineGetter__("5", function () { return 0; });
+  // Delete a property from the dictionary.
+  delete arguments[5];
+  // Look up a property in the dictionary.
+  return arguments[2];
 }
 
-test();
+assertEquals(void 0, test1(0));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1546.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1546.js
index aa93b25..7f1fa58 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1546.js
@@ -25,12 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// See: http://code.google.com/p/v8/issues/detail?id=1546
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+// Should't throw. Scanner incorrectly truncated to char before comparing
+// with "*", so it ended the comment early.
+eval("/*\u822a/ */");
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1548.js
similarity index 67%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1548.js
index aa93b25..074007b 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1548.js
@@ -25,12 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that the caller and arguments objects are not available on native
+// functions.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+function testfn(f) { return [1].map(f)[0]; }
+function foo() { return [].map.caller; }
+assertEquals(null, testfn(foo));
 
-test();
+// Try to delete the caller property (to make sure that we can't get to the
+// caller accessor on the prototype.
+delete Array.prototype.map.caller;
+assertEquals(null, testfn(foo));
+
+// Redo tests with arguments object.
+function testarguments(f) { return [1].map(f)[0]; }
+function bar() { return [].map.arguments; }
+assertEquals(null, testfn(bar));
+
+// Try to delete the arguments property (to make sure that we can't get to the
+// caller accessor on the prototype.
+delete Array.prototype.map.arguments;
+assertEquals(null, testarguments(bar));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1560.js
similarity index 64%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1560.js
index aa93b25..a0aa7e6 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1560.js
@@ -25,12 +25,44 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax --expose-gc
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function mkCOWArray() {
+  var a = [''];
+  assertEquals('', a[0]);
+  return a;
 }
 
-test();
+function mkArray() {
+  var a = [];
+  a[0] = '';
+  return a;
+}
+
+function mkNumberDictionary() {
+  var a = new Array();
+  a[0] = '';
+  a[100000] = '';
+  return a;
+}
+
+function write(a, i) { a[i] = "bazinga!"; }
+
+function test(factories, w) {
+  factories.forEach(function(f) { w(f(), 0); });
+  factories.forEach(function(f) { w(f(), 0); });
+      %OptimizeFunctionOnNextCall(w);
+  factories.forEach(function(f) { w(f(), 0); });
+}
+
+// Monomorphic case.
+for (var i = 0; i < 5; i++) write(mkArray(), 0);
+%OptimizeFunctionOnNextCall(write);
+write(mkCOWArray(), 0);
+var failure = mkCOWArray();
+
+// Cleanup, then polymorphic case.
+%DeoptimizeFunction(write);
+gc();
+test([mkArray, mkNumberDictionary], write);
+test([mkArray, mkNumberDictionary, mkCOWArray], write);
diff --git a/test/mjsunit/bugs/bug-900066.js b/test/mjsunit/regress/regress-1563.js
similarity index 81%
rename from test/mjsunit/bugs/bug-900066.js
rename to test/mjsunit/regress/regress-1563.js
index 3b7cc3f..c25b6c7 100644
--- a/test/mjsunit/bugs/bug-900066.js
+++ b/test/mjsunit/regress/regress-1563.js
@@ -25,14 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When a property of the arguments array is deleted, it
-// must be "disconnected" from the corresponding parameter.
-// Re-introducing the property does not connect to the parameter.
+// Flags: --allow-natives-syntax
 
-function f(x) {
-  delete arguments[0];
-  arguments[0] = 100;
-  return x;
+obj = new PixelArray(10);
+
+// Test that undefined gets properly clamped in Crankshafted pixel array
+// assignments.
+function set_pixel(obj, arg) {
+  obj[0] = arg;
 }
 
-assertEquals(10, f(10));
+set_pixel(obj, 1.5);
+set_pixel(obj, NaN);
+%OptimizeFunctionOnNextCall(set_pixel);
+set_pixel(obj, undefined);
+set_pixel(obj, undefined);
+
+assertEquals(0, obj[0]);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1582.js
similarity index 77%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1582.js
index aa93b25..346d68a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1582.js
@@ -25,12 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function f(restIsArray, rest) {
+  var arr;
+  if (typeof rest === "object" && (rest instanceof Array)) {
+    arr = rest;
+  } else {
+    arr = arguments;
+  }
+  var i = arr.length;
+  while (--i >= 0) arr[i];
+  var arrIsArguments = (arr[1] !== rest);
+  assertEquals(restIsArray, arrIsArguments);
 }
 
-test();
+f(false, 'b', 'c');
+f(false, 'b', 'c');
+f(false, 'b', 'c');
+%OptimizeFunctionOnNextCall(f);
+f(true, ['b', 'c']);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1583.js
similarity index 66%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1583.js
index aa93b25..c4a344c 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1583.js
@@ -25,12 +25,33 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Regression test for a bug in recompilation of anonymous functions inside
+// catch.  We would incorrectly hoist them outside the catch in some cases.
+function f() {
+  try {
+    throw 0;
+  } catch (e) {
+    try {
+      var x = { a: 'hest' };
+      x.m = function (e) { return x.a; };
+    } catch (e) {
+    }
+  }
+  return x;
 }
 
-test();
+var o = f();
+assertEquals('hest', o.m());
+assertEquals('hest', o.m());
+assertEquals('hest', o.m());
+%OptimizeFunctionOnNextCall(o.m);
+assertEquals('hest', o.m());
+
+// Fixing the bug above introduced (revealed?) an inconsistency in named
+// getters and setters.  The property name was also treated as a function
+// name.
+var global = 'horse';
+var p = { get global() { return global; }};
+assertEquals('horse', p.global);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1586.js
similarity index 63%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1586.js
index aa93b25..b15e2f2 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1586.js
@@ -25,12 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-debug-as debug
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Test debug evaluation for functions without local context, but with
+// nested catch contexts.
 
-test();
+function f() {
+  var i = 1;          // Line 1.
+  {                   // Line 2.
+    try {             // Line 3.
+      throw 'stuff';  // Line 4.
+    } catch (e) {     // Line 5.
+      x = 2;          // Line 6.
+    }
+  }
+};
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+// Set breakpoint on line 6.
+var bp = Debug.setBreakPoint(f, 6);
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    result = exec_state.frame().evaluate("i").value();
+  }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+result = -1;
+f();
+assertEquals(1, result);
+
+// Clear breakpoint.
+Debug.clearBreakPoint(bp);
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-1620.js b/test/mjsunit/regress/regress-1620.js
new file mode 100644
index 0000000..6d72974
--- /dev/null
+++ b/test/mjsunit/regress/regress-1620.js
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Don't allow malformed unicode escape sequences in identifiers.
+// In strings and regexps we currently allow malformed unicode escape
+// sequences without throwing a SyntaxError. Instead "\u22gk" would
+// treat the "\u" as an identity escape, and evaluate to "u22gk".
+// Due to code sharing, we did the same in identifiers. This should
+// no longer be the case.
+// See: http://code.google.com/p/v8/issues/detail?id=1620
+
+assertThrows("var \\u\\u\\u = 42;");
+assertThrows("var \\u41 = 42;");
+assertThrows("var \\u123 = 42;");
+eval("var \\u1234 = 42;");
+assertEquals(42, eval("\u1234"));
+assertThrows("var uuu = 42; var x = \\u\\u\\u");
+
+// Regressions introduced and fixed again while fixing the above.
+
+// Handle 0xFFFD correctly (it's a valid value, and shouldn't be used
+// to mark an error).
+assertEquals(0xFFFD, "\uFFFD".charCodeAt(0));
+
+// Handle unicode escapes in regexp flags correctly.
+assertThrows("/x/g\\uim", SyntaxError);
+assertThrows("/x/g\\u2im", SyntaxError);
+assertThrows("/x/g\\u22im", SyntaxError);
+assertThrows("/x/g\\u222im", SyntaxError);
+assertThrows("/x/g\\\\u2222im", SyntaxError);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1625.js
similarity index 83%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1625.js
index aa93b25..a2ef8df 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1625.js
@@ -25,12 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that overwriting Array.prototype.push does not make
+// Object.defineProperties misbehave.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+Array.prototype.push = 1;
+var desc = {foo: {value: 10}, bar: {get: function() {return 42; }}};
+var obj = {};
+var x = Object.defineProperties(obj, desc);
+assertEquals(x.foo, 10);
+assertEquals(x.bar, 42);
diff --git a/test/mjsunit/regress/regress-1639.js b/test/mjsunit/regress/regress-1639.js
new file mode 100644
index 0000000..ed68c97
--- /dev/null
+++ b/test/mjsunit/regress/regress-1639.js
@@ -0,0 +1,85 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+var breaks = 0;
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+
+      assertEquals(-1, line.indexOf('NOBREAK'),
+                   "should not break on unexpected lines")
+      assertEquals('BREAK ' + breaks, line.substr(-7));
+      breaks++;
+      sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "continue",
+        arguments: { stepaction: "next" }
+      });
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function a(f) {
+  if (f) {  // NOBREAK: should not break here!
+    try {
+      f();
+    } catch(e) {
+    }
+  }
+}  // BREAK 2
+
+function b() {
+  c();  // BREAK 0
+}  // BREAK 1
+
+function c() {
+  a();
+}
+
+// Set a break point and call to invoke the debug event listener.
+Debug.setBreakPoint(b, 0, 0);
+a(b);
+// BREAK 3
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1647.js
similarity index 83%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1647.js
index aa93b25..a6afcc0 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1647.js
@@ -25,12 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Test for correct deoptimization in named function expressions.
 
-test();
+var t = { foo: function() {} };
+
+var f = (function bar() {
+ t.foo();
+ assertEquals("function", typeof bar);
+});
+
+for (var i = 0; i < 10; i++) f();
+%OptimizeFunctionOnNextCall(f);
+t.number = 2;
+f();
+
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-1650.js
similarity index 73%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-1650.js
index aa93b25..fb6a178 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-1650.js
@@ -25,12 +25,36 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+function g(f) { return f.call.apply(f.bind, arguments); }
 
-test();
+var x = new Object;
+
+function t() { }
+
+g(t, x);
+g(t, x);
+g(t, x);
+%OptimizeFunctionOnNextCall(g);
+
+function Fake() {}
+
+var fakeCallInvoked = false;
+
+Fake.prototype.call = function () {
+  assertSame(Fake.prototype.bind, this);
+  assertEquals(2, arguments.length);
+  assertSame(fake, arguments[0]);
+  assertSame(x, arguments[1]);
+  fakeCallInvoked = true;
+};
+
+Fake.prototype.bind = function () {
+};
+
+var fake = new Fake;
+
+g(fake, x);
+
+assertTrue(fakeCallInvoked);
diff --git a/test/mjsunit/regress/regress-176.js b/test/mjsunit/regress/regress-176.js
index b204812..ef0c4f1 100644
--- a/test/mjsunit/regress/regress-176.js
+++ b/test/mjsunit/regress/regress-176.js
@@ -27,24 +27,24 @@
 
 // See http://code.google.com/p/v8/issues/detail?id=176
 
-assertEquals("f,",
-             "foo".match(/(?:(?=(f)o))?f/).toString(),
-             "zero length match in (?:) with capture in lookahead");
-assertEquals("f,",
-             "foo".match(/(?=(f)o)?f/).toString(),
-             "zero length match in (?=) with capture in lookahead");
-assertEquals("fo,f",
-             "foo".match(/(?:(?=(f)o)f)?o/),
-             "non-zero length match with capture in lookahead");
-assertEquals("fo,f",
-             "foo".match(/(?:(?=(f)o)f?)?o/),
-             "non-zero length match with greedy ? in (?:)");
-assertEquals("fo,f",
-             "foo".match(/(?:(?=(f)o)f??)?o/),
-             "non-zero length match with non-greedy ? in (?:), o forces backtrack");
-assertEquals("fo,f",
-             "foo".match(/(?:(?=(f)o)f??)?./),
-             "non-zero length match with non-greedy ? in (?:), zero length match causes backtrack");
-assertEquals("f,",
-             "foo".match(/(?:(?=(f)o)fx)?./),
-             "x causes backtrack inside (?:)");
+assertArrayEquals(["f", undefined],
+                  "foo".match(/(?:(?=(f)o))?f/),
+                  "zero length match in (?:) with capture in lookahead");
+assertArrayEquals(["f", undefined],
+                  "foo".match(/(?=(f)o)?f/),
+                  "zero length match in (?=) with capture in lookahead");
+assertArrayEquals(["fo", "f"],
+                  "foo".match(/(?:(?=(f)o)f)?o/),
+                  "non-zero length match with capture in lookahead");
+assertArrayEquals(["fo", "f"],
+                  "foo".match(/(?:(?=(f)o)f?)?o/),
+                  "non-zero length match with greedy ? in (?:)");
+assertArrayEquals(["fo", "f"],
+                  "foo".match(/(?:(?=(f)o)f??)?o/),
+                  "non-zero length match with non-greedy ? in (?:), o forces backtrack");
+assertArrayEquals(["fo", "f"],
+                  "foo".match(/(?:(?=(f)o)f??)?./),
+                  "non-zero length match with non-greedy ? in (?:), zero length match causes backtrack");
+assertArrayEquals(["f", undefined],
+                  "foo".match(/(?:(?=(f)o)fx)?./),
+                  "x causes backtrack inside (?:)");
diff --git a/test/mjsunit/regress/regress-187.js b/test/mjsunit/regress/regress-187.js
index 44d8d7a..2f8b0a1 100644
--- a/test/mjsunit/regress/regress-187.js
+++ b/test/mjsunit/regress/regress-187.js
@@ -27,4 +27,4 @@
 
 // See http://code.google.com/p/v8/issues/detail?id=187
 
-assertEquals("f,", "foo".match(/(?:(?=(f)o)fx|)./));
+assertEquals(["f", undefined], "foo".match(/(?:(?=(f)o)fx|)./));
diff --git a/test/mjsunit/regress/regress-1919169.js b/test/mjsunit/regress/regress-1919169.js
index 774f265..a732312 100644
--- a/test/mjsunit/regress/regress-1919169.js
+++ b/test/mjsunit/regress/regress-1919169.js
@@ -30,7 +30,7 @@
  var s2 = "s2";
  for (var i = 0; i < 2; i++) {
    // Crashes in round i==1 with IllegalAccess in %StringAdd(x,y)
-   var res = 1 + s2;  
+   var res = 1 + s2;
    s2 = 2;
  }
 }
diff --git a/test/mjsunit/regress/regress-20070207.js b/test/mjsunit/regress/regress-20070207.js
index e90b2ec..b7f7a5c 100644
--- a/test/mjsunit/regress/regress-20070207.js
+++ b/test/mjsunit/regress/regress-20070207.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // The following regression test illustrates a problem in using the
-// value of setting a property in the arguments object. 
+// value of setting a property in the arguments object.
 
 function f(s) {
   arguments.length;
diff --git a/test/mjsunit/regress/regress-219.js b/test/mjsunit/regress/regress-219.js
index 4bfabdc..b751f0f 100644
--- a/test/mjsunit/regress/regress-219.js
+++ b/test/mjsunit/regress/regress-219.js
@@ -30,6 +30,10 @@
 // We should now allow duplicates of flags.
 // (See http://code.google.com/p/v8/issues/detail?id=219)
 
+// This has been reversed by issue 1628, since other browsers have also
+// tightened their syntax.
+// (See http://code.google.com/p/v8/issues/detail?id=1628)
+
 // Base tests: we recognize the basic flags
 
 function assertFlags(re, global, multiline, ignoreCase) {
@@ -53,124 +57,92 @@
 
 // Double i's
 
-re = /a/ii;
-assertFlags(re, false, false, true)
+assertThrows("/a/ii");
 
-re = /a/gii;
-assertFlags(re, true, false, true)
+assertThrows("/a/gii");
 
-re = /a/igi;
-assertFlags(re, true, false, true)
+assertThrows("/a/igi");
 
-re = /a/iig;
-assertFlags(re, true, false, true)
+assertThrows("/a/iig");
 
-re = /a/gimi;
-assertFlags(re, true, true, true)
+assertThrows("/a/gimi");
 
-re = /a/giim;
-assertFlags(re, true, true, true)
+assertThrows("/a/giim");
 
-re = /a/igim;
-assertFlags(re, true, true, true)
+assertThrows("/a/igim");
 
+assertThrows(function(){ return RegExp("a", "ii"); })
 
-re = RegExp("a", "ii");
-assertFlags(re, false, false, true)
+assertThrows(function(){ return RegExp("a", "gii"); })
 
-re = RegExp("a", "gii");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "igi"); })
 
-re = RegExp("a", "igi");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "iig"); })
 
-re = RegExp("a", "iig");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "gimi"); })
 
-re = RegExp("a", "gimi");
-assertFlags(re, true, true, true)
+assertThrows(function(){ return RegExp("a", "giim"); })
 
-re = RegExp("a", "giim");
-assertFlags(re, true, true, true)
-
-re = RegExp("a", "igim");
-assertFlags(re, true, true, true)
+assertThrows(function(){ return RegExp("a", "igim"); })
 
 // Tripple i's
 
-re = /a/iii;
-assertFlags(re, false, false, true)
+assertThrows("/a/iii");
 
-re = /a/giii;
-assertFlags(re, true, false, true)
+assertThrows("/a/giii");
 
-re = /a/igii;
-assertFlags(re, true, false, true)
+assertThrows("/a/igii");
 
-re = /a/iigi;
-assertFlags(re, true, false, true)
+assertThrows("/a/iigi");
 
-re = /a/iiig;
-assertFlags(re, true, false, true)
+assertThrows("/a/iiig");
 
-re = /a/miiig;
-assertFlags(re, true, true, true)
+assertThrows("/a/miiig");
 
+assertThrows(function(){ return RegExp("a", "iii"); })
 
-re = RegExp("a", "iii");
-assertFlags(re, false, false, true)
+assertThrows(function(){ return RegExp("a", "giii"); })
 
-re = RegExp("a", "giii");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "igii"); })
 
-re = RegExp("a", "igii");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "iigi"); })
 
-re = RegExp("a", "iigi");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "iiig"); })
 
-re = RegExp("a", "iiig");
-assertFlags(re, true, false, true)
+assertThrows(function(){ return RegExp("a", "miiig"); })
 
-re = RegExp("a", "miiig");
-assertFlags(re, true, true, true)
+// Illegal flags - valid flags late in string.
 
-// Illegal flags - flags late in string.
+assertThrows("/a/arglebargleglopglyf");
 
-re = /a/arglebargleglopglyf;
-assertFlags(re, true, false, false)
+assertThrows("/a/arglebargleglopglif");
 
-re = /a/arglebargleglopglif;
-assertFlags(re, true, false, true)
+assertThrows("/a/arglebargleglopglym");
 
-re = /a/arglebargleglopglym;
-assertFlags(re, true, true, false)
-
-re = /a/arglebargleglopglim;
-assertFlags(re, true, true, true)
+assertThrows("/a/arglebargleglopglim");
 
 // Case of flags still matters.
 
-re = /a/gmi;
+var re = /a/gmi;
 assertFlags(re, true, true, true)
 
-re = /a/Gmi;
-assertFlags(re, false, true, true)
+assertThrows("/a/Gmi");
 
-re = /a/gMi;
-assertFlags(re, true, false, true)
+assertThrows("/a/gMi");
 
-re = /a/gmI;
-assertFlags(re, true, true, false)
+assertThrows("/a/gmI");
 
-re = /a/GMi;
-assertFlags(re, false, false, true)
+assertThrows("/a/GMi");
 
-re = /a/GmI;
-assertFlags(re, false, true, false)
+assertThrows("/a/GmI");
 
-re = /a/gMI;
-assertFlags(re, true, false, false)
+assertThrows("/a/gMI");
 
-re = /a/GMI;
-assertFlags(re, false, false, false)
+assertThrows("/a/GMI");
+
+// Unicode escape sequences are not interpreted.
+
+assertThrows("/a/\\u0067");
+assertThrows("/a/\\u0069");
+assertThrows("/a/\\u006d");
+assertThrows("/a/\\u006D");
diff --git a/test/mjsunit/regress/regress-244.js b/test/mjsunit/regress/regress-244.js
index dc5336f..7503b15 100644
--- a/test/mjsunit/regress/regress-244.js
+++ b/test/mjsunit/regress/regress-244.js
@@ -54,13 +54,14 @@
   }
   for (var i = 0; i < kIllegalEncoded.length; i++) {
     var value = kIllegalEncoded[i];
-    var threw = false;
+    var exception = false;
     try {
       decodeURI(value);
-      assertUnreachable(value);
     } catch (e) {
+      exception = true;
       assertInstanceof(e, URIError);
     }
+    assertTrue(exception);
   }
 }
 
diff --git a/test/mjsunit/regress/regress-269.js b/test/mjsunit/regress/regress-269.js
index 49b24c0..ce165e0 100644
--- a/test/mjsunit/regress/regress-269.js
+++ b/test/mjsunit/regress/regress-269.js
@@ -40,10 +40,10 @@
 
 function g() {
 }
- 
+
 function f() {
   debugger;
   g.apply(null, ['']);
 }
 
-f()
\ No newline at end of file
+f()
diff --git a/test/mjsunit/regress/regress-399.js b/test/mjsunit/regress/regress-399.js
index 2ee998b..6c8eab5 100644
--- a/test/mjsunit/regress/regress-399.js
+++ b/test/mjsunit/regress/regress-399.js
@@ -28,5 +28,5 @@
 // See http://code.google.com/p/v8/issues/detail?id=399
 
 var date = new Date(1.009804e12);
-var year = String(date).match(/.*(200\d)/)[1];
+var year = Number(String(date).match(/.*(200\d)/)[1]);
 assertEquals(year, date.getFullYear());
diff --git a/test/mjsunit/regress/regress-485.js b/test/mjsunit/regress/regress-485.js
index 62c6fb9..f26e0eb 100755
--- a/test/mjsunit/regress/regress-485.js
+++ b/test/mjsunit/regress/regress-485.js
@@ -34,24 +34,6 @@
 var global2 = (function(){return this;})();
 assertEquals(global, global2, "direct call to local function returns global");
 
-var builtin = Object.prototype.valueOf;  // Builtin function that returns this.
-
-assertEquals(global, builtin(), "Direct call to builtin");
-
-assertEquals(global, builtin.call(), "call() to builtin");
-assertEquals(global, builtin.call(null), "call(null) to builtin");
-assertEquals(global, builtin.call(undefined), "call(undefined) to builtin");
-
-assertEquals(global, builtin.apply(), "apply() to builtin");
-assertEquals(global, builtin.apply(null), "apply(null) to builtin");
-assertEquals(global, builtin.apply(undefined), "apply(undefined) to builtin");
-
-assertEquals(global, builtin.call.call(builtin), "call.call() to builtin");
-assertEquals(global, builtin.call.apply(builtin), "call.apply() to builtin");
-assertEquals(global, builtin.apply.call(builtin), "apply.call() to builtin");
-assertEquals(global, builtin.apply.apply(builtin), "apply.apply() to builtin");
-
-
 // Builtin that depends on value of this to compute result.
 var builtin2 = Object.prototype.toString;
 
diff --git a/test/mjsunit/regress/regress-603.js b/test/mjsunit/regress/regress-603.js
index 7d4c322..f9344ee 100644
--- a/test/mjsunit/regress/regress-603.js
+++ b/test/mjsunit/regress/regress-603.js
@@ -29,21 +29,36 @@
 // not mess up the stack.
 // http://code.google.com/p/v8/issues/detail?id=603
 
-function test0() {
-  var re = /b../;
+var re = /b../;
+assertThrows(function() {
   return re('abcdefghijklm') + 'z';
-}
-assertEquals('bcdz', test0());
+});
 
 var re1 = /c../;
 re1.call = Function.prototype.call;
-var test1 = re1.call(null, 'abcdefghijklm') + 'z';
-assertEquals('cdez', test1);
+assertThrows(function() {
+  re1.call(null, 'abcdefghijklm') + 'z';
+});
 
 var re2 = /d../;
-var test2 = Function.prototype.call.call(re2, null, 'abcdefghijklm') + 'z';
-assertEquals('defz', test2);
+assertThrows(function() {
+  Function.prototype.call.call(re2, null, 'abcdefghijklm') + 'z';
+});
 
 var re3 = /e../;
-var test3 = Function.prototype.call.apply(re3, [null, 'abcdefghijklm']) + 'z';
-assertEquals('efgz', test3);
+assertThrows(function() {
+  Function.prototype.call.apply(
+      re3, [null, 'abcdefghijklm']) + 'z';
+});
+
+var re4 = /f../;
+assertThrows(function() {
+  Function.prototype.apply.call(
+      re4, null, ['abcdefghijklm']) + 'z';
+});
+
+var re5 = /g../;
+assertThrows(function() {
+  Function.prototype.apply.apply(
+      re4, [null, ['abcdefghijklm']]) + 'z';
+});
diff --git a/test/mjsunit/regress/regress-619.js b/test/mjsunit/regress/regress-619.js
index 24bdbc1..4d3e66b 100644
--- a/test/mjsunit/regress/regress-619.js
+++ b/test/mjsunit/regress/regress-619.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Tests that Object.defineProperty works correctly on array indices. 
+// Tests that Object.defineProperty works correctly on array indices.
 // Please see http://code.google.com/p/v8/issues/detail?id=619 for details.
 
 var obj = {};
diff --git a/test/mjsunit/regress/regress-678525.js b/test/mjsunit/regress/regress-678525.js
index 5ff9c3d..11eaf74 100644
--- a/test/mjsunit/regress/regress-678525.js
+++ b/test/mjsunit/regress/regress-678525.js
@@ -36,16 +36,16 @@
 assertEquals(56, '\8'.charCodeAt(0));
 
 assertEquals('\010', '\10');
-assertEquals('\011', '\11');    
+assertEquals('\011', '\11');
 assertEquals('\012', '\12');
 assertEquals('\013', '\13');
 assertEquals('\014', '\14');
 assertEquals('\015', '\15');
 assertEquals('\016', '\16');
 assertEquals('\017', '\17');
-    
+
 assertEquals('\020', '\20');
-assertEquals('\021', '\21');    
+assertEquals('\021', '\21');
 assertEquals('\022', '\22');
 assertEquals('\023', '\23');
 assertEquals('\024', '\24');
@@ -56,4 +56,4 @@
 assertEquals(73,  '\111'.charCodeAt(0));
 assertEquals(105, '\151'.charCodeAt(0));
 
-    
+
diff --git a/test/mjsunit/regress/regress-696.js b/test/mjsunit/regress/regress-696.js
index 21977e1..e443c42 100644
--- a/test/mjsunit/regress/regress-696.js
+++ b/test/mjsunit/regress/regress-696.js
@@ -28,7 +28,7 @@
 // See: http://code.google.com/p/v8/issues/detail?id=696
 // Because of the change in dateparser in revision 4557 to support time
 // only strings in Date.parse we also misleadingly supported strings with non
-// leading numbers. 
+// leading numbers.
 
 assertTrue(isNaN(Date.parse('x')));
 assertTrue(isNaN(Date.parse('1x')));
diff --git a/test/mjsunit/regress/regress-70066.js b/test/mjsunit/regress/regress-70066.js
index b8386a7..01c2f4f 100644
--- a/test/mjsunit/regress/regress-70066.js
+++ b/test/mjsunit/regress/regress-70066.js
@@ -57,21 +57,18 @@
 assertEquals(0, x, "test2");  // Global x is undisturbed.
 
 
-// Delete on an argument.  This hits the same code paths as test5 because
-// 'with' forces all parameters to be indirected through the arguments
-// object.
+// Delete on a parameter.
 function test3(value) {
   var status;
   with ({}) { status = delete value; }
   return value + ":" + status;
 }
 
-assertEquals("undefined:true", test3(3), "test3");
+assertEquals("3:false", test3(3), "test3");
 assertEquals(0, x, "test3");  // Global x is undisturbed.
 
 
-// Delete on an argument from an outer context.  This hits the same code
-// path as test2.
+// Delete on a parameter found in an outer context.
 function test4(value) {
   function f() {
     with ({}) { return delete value; }
@@ -84,15 +81,14 @@
 assertEquals(0, x, "test4");  // Global x is undisturbed.
 
 
-// Delete on an argument found in the arguments object.  Such properties are
-// normally DONT_DELETE in JavaScript but deletion is allowed by V8.
+// Delete on a parameter, arguments object should be unaffected.
 function test5(value) {
   var status;
   with ({}) { status = delete value; }
   return arguments[0] + ":" + status;
 }
 
-assertEquals("undefined:true", test5(5), "test5");
+assertEquals("5:false", test5(5), "test5");
 assertEquals(0, x, "test5");  // Global x is undisturbed.
 
 function test6(value) {
@@ -103,7 +99,7 @@
   return arguments[0] + ":" + status;
 }
 
-assertEquals("undefined:true", test6(6), "test6");
+assertEquals("6:false", test6(6), "test6");
 assertEquals(0, x, "test6");  // Global x is undisturbed.
 
 
diff --git a/test/mjsunit/regress/regress-720.js b/test/mjsunit/regress/regress-720.js
index 97e1284..267b32d 100644
--- a/test/mjsunit/regress/regress-720.js
+++ b/test/mjsunit/regress/regress-720.js
@@ -27,7 +27,7 @@
 
 // This regression test is used to ensure that Object.defineProperty
 // keeps the existing value of the writable flag if none is given
-// in the provided descriptor. 
+// in the provided descriptor.
 // See: http://code.google.com/p/v8/issues/detail?id=720
 
 var o = {x: 10};
diff --git a/test/mjsunit/regress/regress-747.js b/test/mjsunit/regress/regress-747.js
index 6fcc000..648c366 100644
--- a/test/mjsunit/regress/regress-747.js
+++ b/test/mjsunit/regress/regress-747.js
@@ -40,7 +40,7 @@
   callEval();
 } catch (e) {
   assertUnreachable();
-} 
+}
 
 gc();
 gc();
@@ -53,4 +53,4 @@
   callEval();
 } catch (e) {
   assertUnreachable();
-} 
+}
diff --git a/test/mjsunit/regress/regress-752.js b/test/mjsunit/regress/regress-752.js
index 1142a1f..d38870e 100644
--- a/test/mjsunit/regress/regress-752.js
+++ b/test/mjsunit/regress/regress-752.js
@@ -33,4 +33,4 @@
   return value === 42 ? new Boolean(false) : value;
 }
 
-assertEquals(JSON.stringify([42], replacer), "[false]");
+assertEquals("[false]", JSON.stringify([42], replacer));
diff --git a/test/mjsunit/regress/regress-760-1.js b/test/mjsunit/regress/regress-760-1.js
index 2e0cee5..081c993 100644
--- a/test/mjsunit/regress/regress-760-1.js
+++ b/test/mjsunit/regress/regress-760-1.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Check that when valueOf for a String object is overwritten it is called and
-// the result used when that object is added with a string.  
+// the result used when that object is added with a string.
 
 // See: http://code.google.com/p/v8/issues/detail?id=760
 
diff --git a/test/mjsunit/regress/regress-760-2.js b/test/mjsunit/regress/regress-760-2.js
index 1b1cbfe..549ed4e 100644
--- a/test/mjsunit/regress/regress-760-2.js
+++ b/test/mjsunit/regress/regress-760-2.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Check that when valueOf for a String object is overwritten it is called and
-// the result used when that object is added with a string.  
+// the result used when that object is added with a string.
 
 // See: http://code.google.com/p/v8/issues/detail?id=760
 
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-794.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-794.js
index aa93b25..409b2ef 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-794.js
@@ -25,12 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test the a bound function does not have a prototype.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {}
+assertFalse("prototype" in foo.bind());
diff --git a/test/mjsunit/regress/regress-798.js b/test/mjsunit/regress/regress-798.js
index 423c883..182eb4d 100644
--- a/test/mjsunit/regress/regress-798.js
+++ b/test/mjsunit/regress/regress-798.js
@@ -32,7 +32,7 @@
   try {
     y.x = 40;
   } catch (e) {
-    assertEquals(3, e.stack.split('\n').length); 
+    assertEquals(3, e.stack.split('\n').length);
   }
   return 40;
 });
@@ -41,7 +41,7 @@
   try {
     y.x = 40;
   } catch(e) {
-    assertEquals(3, e.stack.split('\n').length); 
+    assertEquals(3, e.stack.split('\n').length);
   }
 });
 
@@ -50,7 +50,7 @@
   try {
     y.x = 30;
   } catch (e) {
-    assertEquals(3, e.stack.split('\n').length); 
+    assertEquals(3, e.stack.split('\n').length);
   }
   return 30;
 }
@@ -59,7 +59,7 @@
   try {
     y.x = 30;
   } catch(e) {
-    assertEquals(3, e.stack.split('\n').length); 
+    assertEquals(3, e.stack.split('\n').length);
   }
 }
 
@@ -72,7 +72,7 @@
     try {
       y.x = 40;
     } catch (e) {
-      assertEquals(3, e.stack.split('\n').length); 
+      assertEquals(3, e.stack.split('\n').length);
     }
     return 40;
   },
@@ -80,7 +80,7 @@
     try {
       y.x = 40;
     } catch(e) {
-      assertEquals(3, e.stack.split('\n').length); 
+      assertEquals(3, e.stack.split('\n').length);
     }
   }
 }
@@ -88,7 +88,7 @@
 Object.defineProperty(x, 'c', descriptor)
 
 // Check that the stack for an exception in a getter and setter produce the
-// expected stack height.   
+// expected stack height.
 x.a;
 x.b;
 x.c;
@@ -106,4 +106,3 @@
 xx.a = 1;
 xx.b = 1;
 xx.c = 1;
-
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-82769.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-82769.js
index aa93b25..6a95e9a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-82769.js
@@ -25,12 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --noinline-new
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+x = -1;
+y = -0;
+for (var i = 0; i < 5; i++) {
+  assertEquals(0xFFFFFFFF, (x >>> y));
 }
-
-test();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-84234.js
similarity index 72%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-84234.js
index aa93b25..6377fb3 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-84234.js
@@ -25,12 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-gc --noopt
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var gTestcases = new Array();
+
+function TestCase(n, d, e, a) {
+  gTestcases[gTc++] = this;
+  for ( gTc=0; gTc < gTestcases.length; gTc++ );
 }
 
-test();
+for ( var i = 0x0530; i <= 0x058F; i++ ) {
+  new TestCase("15.5.4.11-6",
+               eval("var s = new String(String.fromCharCode(i)); s.toLowerCase().charCodeAt(0)"));
+}
+var gTc= 0;
+
+
+for (var j = 0; j < 10; j++) {
+  test();
+  function test() {
+    for ( 0; gTc < gTestcases.length; gTc++ ) {
+      var MYOBJECT = new MyObject();
+    }
+    gc();
+  }
+  function MyObject( n ) {
+    this.__proto__ = Number.prototype;
+  }
+}
diff --git a/test/mjsunit/regress/regress-87.js b/test/mjsunit/regress/regress-87.js
index 131cb58..10446fd 100644
--- a/test/mjsunit/regress/regress-87.js
+++ b/test/mjsunit/regress/regress-87.js
@@ -25,34 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-function testFlags(flagstring, global, ignoreCase, multiline) {
-  var text = "/x/"+flagstring;
-  var re = eval(text);
-  assertEquals(global, re.global, text + ".global");
-  assertEquals(ignoreCase, re.ignoreCase, text + ".ignoreCase");
-  assertEquals(multiline, re.multiline, text + ".multiline");
-}
+// In Issue 87, we allowed unicode escape sequences in RegExp flags.
+// However, according to ES5, they should not be interpreted, but passed
+// verbatim to the RegExp constructor.
+// (On top of that, the original test was bugged and never tested anything).
+// The behavior was changed in r8969 to not interpret escapes, but this
+// test didn't test that, and only failed when making invalid flag characters
+// an error too.
 
-testFlags("", false, false, false);
+assertThrows("/x/\\u0067");
+assertThrows("/x/\\u0069");
+assertThrows("/x/\\u006d");
 
-testFlags("\u0067", true, false, false);
+assertThrows("/x/\\u0067i");
+assertThrows("/x/\\u0069m");
+assertThrows("/x/\\u006dg");
 
-testFlags("\u0069", false, true, false)
+assertThrows("/x/m\\u0067");
+assertThrows("/x/g\\u0069");
+assertThrows("/x/i\\u006d");
 
-testFlags("\u006d", false, false, true);
+assertThrows("/x/m\\u0067i");
+assertThrows("/x/g\\u0069m");
+assertThrows("/x/i\\u006dg");
 
-testFlags("\u0068", false, false, false);
-
-testFlags("\u0020", false, false, false);
-
-
-testFlags("\u0067g", true, false, false);
-
-testFlags("g\u0067", true, false, false);
-
-testFlags("abc\u0067efg", true, false, false);
-
-testFlags("i\u0067", true, true, false);
-
-testFlags("\u0067i", true, true, false);
-
+assertThrows("/x/\\u0068");
+assertThrows("/x/\\u0020");
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-88591.js
similarity index 72%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-88591.js
index aa93b25..e42570a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-88591.js
@@ -25,12 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Regression test for a crash.  A data property in the global object's
+// prototype shadowed by a setter in the global object's prototype's
+// prototype would crash or assert when seen by Runtime_DeclareContextSlot.
+var called = false;
+Object.prototype.__defineSetter__('x', function(x) { called = true; });
+Object.prototype.__defineGetter__('x', function () { return 0; });
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+this.__proto__ = { x: 1 };
 
-test();
+try { fail; } catch (e) { eval('const x = 2'); }
+
+var o = Object.getOwnPropertyDescriptor(this, 'x');
+assertFalse(called);
+assertEquals(2, o.value);
+assertEquals(false, o.writable);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-88858.js
similarity index 66%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-88858.js
index aa93b25..ba33f87 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-88858.js
@@ -25,12 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-gc
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Verify that JSObject::PreventExtensions works for arguments objects.
 
-test();
+try {
+    function make_watcher(name) { }
+    var o, p;
+    function f(flag) {
+        if (flag) {
+            o = arguments;
+        } else {
+            p = arguments;
+            o.watch(0, (arguments-1901)('o'));
+            p.watch(0, make_watcher('p'));
+            p.unwatch(0);
+            o.unwatch(0);
+            p[0] = 4;
+            assertEq(flag, 4);
+        }
+    }
+    f(true);
+    f(false);
+    reportCompare(true, true);
+} catch(exc1) { }
+
+try {
+    function __noSuchMethod__() {
+       if (anonymous == "1")
+           return NaN;
+       return __construct__;
+    }
+    f.p = function() { };
+    Object.freeze(p);
+    new new freeze().p;
+    reportCompare(0, 0, "ok");
+} catch(exc2) { }
+
+gc();
diff --git a/test/mjsunit/regress/regress-892742.js b/test/mjsunit/regress/regress-892742.js
index a60395e..78a57b2 100644
--- a/test/mjsunit/regress/regress-892742.js
+++ b/test/mjsunit/regress/regress-892742.js
@@ -26,25 +26,23 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 function f() {
-  return/* useless*/1;
+  return/* Counts as non-line-terminating whitespace */1;
 };
 
-
-// According to ECMA-262, this comment should actually be parsed as a
-// line terminator making g() return undefined, but this is not the
-// way it's handled by Spidermonkey or KJS.
+// According to ECMA-262, this comment should be parsed as a
+// line terminator making g() return undefined.
 function g() {
-  return/* useless
-         */2;
+  return/* Counts as line-terminator whitespace.
+          */2;
 };
 
 function h() {
-  return// meaningful
+  return// Comment doesn't include line-terminator at end.
       3;
 };
 
 
 assertEquals(1, f());
-assertEquals(2, g());
-assertTrue(typeof h() == 'undefined', 'h');
+assertEquals(undefined, g());
+assertEquals(undefined, h());
 
diff --git a/test/mjsunit/bugs/bug-900066.js b/test/mjsunit/regress/regress-91008.js
similarity index 79%
copy from test/mjsunit/bugs/bug-900066.js
copy to test/mjsunit/regress/regress-91008.js
index 3b7cc3f..d7ea2df 100644
--- a/test/mjsunit/bugs/bug-900066.js
+++ b/test/mjsunit/regress/regress-91008.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,14 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When a property of the arguments array is deleted, it
-// must be "disconnected" from the corresponding parameter.
-// Re-introducing the property does not connect to the parameter.
-
-function f(x) {
-  delete arguments[0];
-  arguments[0] = 100;
-  return x;
+function testsort(n) {
+  var numbers=new Array(n);
+  for (var i=0;i<n;i++) numbers[i]=i;
+  delete numbers[50];
+  delete numbers[150];
+  delete numbers[25000];
+  delete numbers[n-1];
+  delete numbers[n-2];
+  delete numbers[30];
+  delete numbers[2];
+  delete numbers[1];
+  delete numbers[0];
+  numbers.sort();
 }
 
-assertEquals(10, f(10));
+testsort(100000)
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-91010.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-91010.js
index aa93b25..a077999 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-91010.js
@@ -25,12 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+try {
+  try {
+    var N = 100*1000;
+    var array = Array(N);
+    for (var i = 0; i != N; ++i)
+      array[i] = i;
+  } catch(ex) {}
+  array.unshift('Kibo');
+} catch(ex) {}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-91013.js
similarity index 73%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-91013.js
index aa93b25..c61e2b1 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-91013.js
@@ -25,12 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that KeyedStore stub for unboxed double arrays backing store
+// correctly returns stored value as the result.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Flags: --allow-natives-syntax --unbox-double-arrays
+
+// Create array with unboxed double array backing store.
+var i = 100000;
+var a = new Array(i);
+for (var j = 0; j < i; j++) {
+  a[j] = 0.5;
 }
 
-test();
+assertTrue(%HasFastDoubleElements(a));
+
+// Store some smis into it.
+for (var j = 0; j < 10; j++) {
+  assertEquals(j, a[j] = j);
+}
+
+// Store some heap numbers into it.
+for (var j = 0; j < 10; j++) {
+  var v = j + 0.5;
+  assertEquals(v, a[j] = v);
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-91120.js
similarity index 73%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-91120.js
index aa93b25..117acac 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-91120.js
@@ -25,12 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// We intend that the function declaration for g inside catch is hoisted to
+// function f's scope.  Invoke it before try/catch, in the try block, in the
+// catch block, after try/catch, and outside f, and verify that it has
+// access to the proper binding of x.
+var x = 'global';
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function f() {
+  var x = 'function';
+  assertEquals('function', g());
+  try {
+    assertEquals('function', g());
+    throw 'catch';
+  } catch (x) {
+    function g() { return x; }
+    assertEquals('function', g());
+  }
+  assertEquals('function', g());
+  return g;
 }
 
-test();
+assertEquals('function', f()());
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-91787.js
similarity index 86%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-91787.js
index aa93b25..96310d0 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-91787.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Unterminated non-ASCII string literals in JSON code were not
+// detected correctly.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+// Shouldn't crash (due to stack overflow).
+// Should throw due to invalid syntax.
+assertThrows(function() {
+  JSON.parse('"\x80unterminated');
+});
diff --git a/test/mjsunit/regress/regress-918.js b/test/mjsunit/regress/regress-918.js
index 4b6ddba..871e9d9 100644
--- a/test/mjsunit/regress/regress-918.js
+++ b/test/mjsunit/regress/regress-918.js
@@ -28,6 +28,6 @@
 // Parser should not accept parentheses around labels.
 // See http://code.google.com/p/v8/issues/detail?id=918
 
-// The label was parsed as an expression and then tested for being a 
+// The label was parsed as an expression and then tested for being a
 // single identifier. This threw away the parentheses.
 assertThrows("(label):42;");
diff --git a/test/mjsunit/regress/regress-925537.js b/test/mjsunit/regress/regress-925537.js
index 11582ea..d50c568 100644
--- a/test/mjsunit/regress/regress-925537.js
+++ b/test/mjsunit/regress/regress-925537.js
@@ -28,8 +28,8 @@
 function assertClose(expected, actual) {
   var delta = 0.00001;
   if (Math.abs(expected - actual) > delta) {
-    print('Failure: Expected <' + actual + '> to be close to <' + 
-          expected + '>');    
+    print('Failure: Expected <' + actual + '> to be close to <' +
+          expected + '>');
   }
 }
 
diff --git a/test/mjsunit/regress/regress-937896.js b/test/mjsunit/regress/regress-937896.js
index e8e5ef2..e7831da 100644
--- a/test/mjsunit/regress/regress-937896.js
+++ b/test/mjsunit/regress/regress-937896.js
@@ -41,7 +41,7 @@
       }
     }
   } catch (e) {
-    // Empty. 
+    // Empty.
   }
   return 42;
 }
diff --git a/test/mjsunit/bugs/bug-900066.js b/test/mjsunit/regress/regress-94425.js
similarity index 77%
copy from test/mjsunit/bugs/bug-900066.js
copy to test/mjsunit/regress/regress-94425.js
index 3b7cc3f..4a48f4a 100644
--- a/test/mjsunit/bugs/bug-900066.js
+++ b/test/mjsunit/regress/regress-94425.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,14 +25,22 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When a property of the arguments array is deleted, it
-// must be "disconnected" from the corresponding parameter.
-// Re-introducing the property does not connect to the parameter.
+// Flags: --expose-gc
 
-function f(x) {
-  delete arguments[0];
-  arguments[0] = 100;
-  return x;
-}
+var N = 2040 - 2 + 10;
+var arr = new Array(N);
 
-assertEquals(10, f(10));
+gc();
+gc();
+gc();
+
+// arr is in the large object space now.
+// Write new space object into it.
+arr[arr.length - 2] = new Object;
+
+// Shift array multiple times to ensure that young
+// object crosses region boundary.
+for (var i = 0; i < 9; i++) arr.shift();
+
+// Do a GC to verify region dirty marks.
+gc();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-95113.js
similarity index 79%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-95113.js
index aa93b25..f01b270 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-95113.js
@@ -25,12 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function get_double_array() {
+  var a = new Array(100000);
+  var i = 0;
+  while (!%HasFastDoubleElements(a)) {
+    a[i] = i;
+    i++;
+  }
+  assertTrue(%HasFastDoubleElements(a));
+  a.length = 1;
+  a[0] = 1.5;
+  a.length = 2;
+  a[1] = 2.5;
+  assertEquals(a[0], 1.5);
+  assertEquals(a[1], 2.5);
+  assertTrue(%HasFastDoubleElements(a));
+  return a;
 }
 
-test();
+var a = get_double_array();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-95485.js
similarity index 84%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-95485.js
index aa93b25..2510072 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-95485.js
@@ -25,12 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function Test() {
+  var left  = 'XXX';
+  var right = 'YYY';
+  for (var i = 0; i < 3; i++) {
+    var cons = left + right;
+    var substring = cons.substring(2, 4);
+    try {
+      with ({Test: i})
+          continue;
+    } finally { }
+  }
+  return substring;
 }
 
-test();
+assertEquals('XY', Test());
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-955.js
similarity index 74%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-955.js
index aa93b25..9a9a0b0 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-955.js
@@ -25,12 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// See: http://code.google.com/p/v8/issues/detail?id=955
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+// Correctly parse signed numbers.
+assertEquals(-0, parseInt("-0"));
+assertEquals(0, parseInt("+0"));
 
-test();
+// Don't allow whitespace after signs in parseInt.
+assertEquals(NaN, parseInt("- 0"));
+assertEquals(NaN, parseInt("+ 0"));
+assertEquals(NaN, parseInt("-\t0"));
+assertEquals(NaN, parseInt("+\t0"));
+
+// Do allow whitespace at start.
+assertEquals(-0, parseInt(" -0"));
+assertEquals(0, parseInt(" +0"));
+assertEquals(-0, parseInt("\t-0"));
+assertEquals(0, parseInt("\t+0"));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-95920.js
similarity index 67%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-95920.js
index aa93b25..20e73fb 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-95920.js
@@ -25,12 +25,34 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Tests that objects with external arrays cannot be sealed or have their
+// properties redefined.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+(function() {
+  assertThrows(function() {
+    [0].every(function(){ Object.seal((new Int8Array(42))); });
+    assertUnreable();
+    }, TypeError)
+})();
 
-test();
+(function() {
+  assertThrows(function() {
+    [0].every(function(){ Object.freeze((new Int8Array(42))); });
+    assertUnreable();
+    }, TypeError)
+})();
+
+(function() {
+  assertThrows(function() {
+    [0].every(function(){ Object.preventExtensions((new Int8Array(42))); });
+    assertUnreable();
+    }, TypeError)
+})();
+
+(function() {
+  assertThrows(function() {
+      Object.defineProperty(new Int8Array(42), "1",
+                            { writable: false, value: "1" });
+      assertUnreable();
+    })
+})();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-96523.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-96523.js
index aa93b25..e611ce3 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-96523.js
@@ -25,12 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+with ({x:'outer'}) {
+  (function() {
+    var x = 'inner';
+    try {
+      throw 'Exception';
+    } catch (e) {
+      assertEquals('inner', x);
+    }
+  })()
 }
-
-test();
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-97116.js
similarity index 75%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-97116.js
index aa93b25..b858ca5 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-97116.js
@@ -25,12 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-gc --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Check that we are not flushing code for inlined functions that
+// have a pending lazy deoptimization on the stack.
+
+function deopt() {
+  try { } catch (e) { }  // Avoid inlining.
+  %DeoptimizeFunction(outer);
+  for (var i = 0; i < 10; i++) gc();  // Force code flushing.
 }
 
-test();
+function outer(should_deopt) {
+  inner(should_deopt);
+}
+
+function inner(should_deopt) {
+  if (should_deopt) deopt();
+}
+
+outer(false);
+outer(false);
+%OptimizeFunctionOnNextCall(outer);
+outer(true);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-arguments-gc.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-arguments-gc.js
index aa93b25..baa4e16 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-arguments-gc.js
@@ -25,12 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --expose-gc --nocleanup_code_caches_at_gc
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+function f(x) {
+  gc();
+  arguments[0] = {};
 }
 
-test();
+f(1);
+f(1);
+f(1);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-bind-receiver.js
similarity index 70%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-bind-receiver.js
index aa93b25..fc83a4e 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-bind-receiver.js
@@ -25,12 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+function strict() { 'use strict'; return this; }
+function lenient() { return this; }
+var obj = {};
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
+assertEquals(true, strict.bind(true)());
+assertEquals(42, strict.bind(42)());
+assertEquals("", strict.bind("")());
+assertEquals(null, strict.bind(null)());
+assertEquals(undefined, strict.bind(undefined)());
+assertEquals(obj, strict.bind(obj)());
 
-test();
+assertEquals(true, lenient.bind(true)() instanceof Boolean);
+assertEquals(true, lenient.bind(42)() instanceof Number);
+assertEquals(true, lenient.bind("")() instanceof String);
+assertEquals(this, lenient.bind(null)());
+assertEquals(this, lenient.bind(undefined)());
+assertEquals(obj, lenient.bind(obj)());
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-crbug-84186.js
similarity index 78%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-crbug-84186.js
index aa93b25..865bf9e 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-crbug-84186.js
@@ -25,12 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Test that the expected string is parsed in the json parser when the length
+// is so big that the string can't fit in new space, and it includes special
+// characters.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var json = '{"key":"';
+var key = '';
+var expected = '';
+for(var i = 0; i < 60000; i++) {
+  key = key + "TESTING" + i + "\\n";
+  expected = expected + "TESTING" + i + "\n";
 }
-
-test();
+json = json + key  + '"}';
+var out = JSON.parse(json);
+assertEquals(expected, out.key);
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-crbug-87478.js
similarity index 84%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-crbug-87478.js
index aa93b25..115b3fd 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-crbug-87478.js
@@ -25,12 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Check that arguments access ICs correctly handle non-JSObject
+// receivers.
+function f(array) { return array[0]; }
+function args(a) { return arguments; }
+// Put the keyed load in f into the keyed-load-arguments state.
+for (var i = 0; i < 10; i++) {
+  f(args(1));
 }
-
-test();
+f('123');
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-fundecl.js
similarity index 81%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-fundecl.js
index aa93b25..fddb589 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-fundecl.js
@@ -25,12 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --allow-natives-syntax
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Test hoisting of function declarations in the optimizing
+// compiler in case of deoptimization.
+
+function h(a, b) {
+  var r = a + b;
+  function X() { return 42; }
+  return r + X();
 }
 
-test();
+for (var i = 0; i < 5; i++) h(1,2);
+
+%OptimizeFunctionOnNextCall(h);
+
+assertEquals(45, h(1,2));
+assertEquals("foo742", h("foo", 7));
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/regress/regress-regexp-codeflush.js
similarity index 67%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/regress/regress-regexp-codeflush.js
index aa93b25..5fa42bf 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/regress/regress-regexp-codeflush.js
@@ -25,12 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Flags: --gc_global
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+// Regression test for regexp that has multiple matches and which
+// internally calls RegExpImpl::IrregexpExecOnce more than once without
+// ensuring that the regexp is compiled.
+// This can create a crash if the code was exchanged with the sweep
+// generation (for code flushing support) in GC durring the matching.
+
+var re = new RegExp('(s)', "g");
+
+function foo() {
+  return "42";
 }
 
-test();
+// Run enough times to get a number of GC's (all mark sweep because of the
+// --gc_global) flag.
+for ( var i = 0; i < 10; i++) {
+  // Make a long string with plenty of matches for re.
+  var x = "s foo s bar s foo s bar s";
+  x = x + x;
+  x = x + x;
+  x = x + x;
+  x = x + x;
+  x = x + x;
+  x = x + x;
+  x = x + x;
+  x.replace(re, foo);
+}
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/mjsunit/scope-calls-eval.js
similarity index 74%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/mjsunit/scope-calls-eval.js
index aa93b25..4a941aa 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/mjsunit/scope-calls-eval.js
@@ -25,12 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Tests if the information about eval calls in a function is
+// propagated correctly through catch and with blocks.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+
+function f1() {
+  var x = 5;
+  function g() {
+    try {
+      throw '';
+    } catch (e) {
+      eval('var x = 3;');
+    }
+    try {
+      throw '';
+    } catch (e) {
+      return x;
+    }
+  }
+  return g();
 }
 
-test();
+
+function f2() {
+  var x = 5;
+  function g() {
+    with ({e:42}) {
+      eval('var x = 3;');
+    }
+    with ({e:42}) {
+      return x;
+    }
+  }
+  return g();
+}
+
+
+assertEquals(3, f1());
+assertEquals(3, f2());
diff --git a/test/mjsunit/setter-on-constructor-prototype.js b/test/mjsunit/setter-on-constructor-prototype.js
index d5718f9..a74f7da 100644
--- a/test/mjsunit/setter-on-constructor-prototype.js
+++ b/test/mjsunit/setter-on-constructor-prototype.js
@@ -35,14 +35,14 @@
   if (ensure_fast_case) {
     %ToFastProperties(C1.prototype);
   }
-  
+
   for (var i = 0; i < 10; i++) {
     var c1 = new C1();
     assertEquals("undefined", typeof c1.x);
     assertEquals(23, c1.y);
   }
-  
-  
+
+
   function C2() {
     this.x = 23;
   };
@@ -51,14 +51,14 @@
   if (ensure_fast_case) {
     %ToFastProperties(C2.prototype.__proto__)
   }
-  
+
   for (var i = 0; i < 10; i++) {
     var c2 = new C2();
     assertEquals("undefined", typeof c2.x);
     assertEquals(23, c2.y);
   }
-  
-  
+
+
   function C3() {
     this.x = 23;
   };
@@ -67,14 +67,14 @@
   if (ensure_fast_case) {
     %ToFastProperties(C3.prototype);
   }
-  
+
   for (var i = 0; i < 10; i++) {
     var c3 = new C3();
     assertEquals("undefined", typeof c3.x);
     assertEquals(23, c3.y);
   }
-  
-  
+
+
   function C4() {
     this.x = 23;
   };
@@ -84,14 +84,14 @@
   if (ensure_fast_case) {
     %ToFastProperties(C4.prototype.__proto__);
   }
-  
+
   for (var i = 0; i < 10; i++) {
     var c4 = new C4();
     assertEquals("undefined", typeof c4.x);
     assertEquals(23, c4.y);
   }
-  
-  
+
+
   function D() {
     this.x = 23;
   };
@@ -99,7 +99,7 @@
   if (ensure_fast_case) {
     %ToFastProperties(D.prototype);
   }
-  
+
   for (var i = 0; i < 10; i++) {
     var d = new D();
     assertEquals(23, d.x);
diff --git a/test/mjsunit/strict-mode-eval.js b/test/mjsunit/strict-mode-eval.js
index 018ed9e..391daaa 100644
--- a/test/mjsunit/strict-mode-eval.js
+++ b/test/mjsunit/strict-mode-eval.js
@@ -42,36 +42,44 @@
 eval_alias(code4);
 
 function strict1() {
+  var exception = false;
   try {
     eval(code1);
-    assertUnreachable("did not throw exception");
   } catch (e) {
+    exception = true;
     assertInstanceof(e, SyntaxError);
   }
+  assertTrue(exception);
 
   function strict2() {
+    var exception = false;
     try {
       eval(code2);
-      assertUnreachable("did not throw exception");
     } catch (e) {
+      exception = true;
       assertInstanceof(e, SyntaxError);
     }
+    assertTrue(exception);
 
     function strict3() {
+      var exception = false;
       try {
         eval(code3);
-        assertUnreachable("did not throw exception");
       } catch (e) {
+        exception = true;
         assertInstanceof(e, SyntaxError);
       }
+      assertTrue(exception);
 
       function strict4() {
+        var exception = false;
         try {
           eval(code4);
-          assertUnreachable("did not throw exception");
         } catch (e) {
+          exception = true;
           assertInstanceof(e, SyntaxError);
         }
+        assertTrue(exception);
       }
       strict4();
     }
diff --git a/test/mjsunit/strict-mode-implicit-receiver.js b/test/mjsunit/strict-mode-implicit-receiver.js
new file mode 100644
index 0000000..338f6d1
--- /dev/null
+++ b/test/mjsunit/strict-mode-implicit-receiver.js
@@ -0,0 +1,192 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var y = 3;
+
+function get_y() { return this.y; }
+function strict_get_y() { "use strict"; return this.y; }
+
+// Test calls to strict mode function as methods.
+for (var i = 0; i < 10; i++) assertEquals(3, strict_get_y.call(this));
+var o = { y: 42 };
+for (var i = 0; i < 10; i++) assertEquals(42, strict_get_y.call(o));
+
+// Test calls to strict mode function with implicit receiver.
+function g() {
+  var exception = false;
+  try { strict_get_y(); } catch(e) { exception = true; }
+  assertTrue(exception);
+}
+for (var i = 0; i < 3; i++) g();
+
+// Test calls to local strict mode function with implicit receiver.
+function local_function_test() {
+  function get_y() { return this.y; }
+  function strict_get_y() { "use strict"; return this.y; }
+  assertEquals(3, get_y());
+  assertEquals(3, get_y(23));
+  var exception = false;
+  try {
+    strict_get_y();
+  } catch(e) {
+    exception = true;
+  }
+  assertTrue(exception);
+}
+
+for (var i = 0; i < 10; i++) {
+  local_function_test();
+}
+
+// Test call to catch variable strict-mode function. Implicit
+// receiver.
+var exception = false;
+try {
+  throw strict_get_y;
+} catch(f) {
+  try {
+    f();
+  } catch(e) {
+    exception = true;
+  }
+  assertTrue(exception);
+}
+
+
+// Test calls to strict-mode function with the object from a with
+// statement as the receiver.
+with(this) {
+  assertEquals(3, strict_get_y());
+  assertEquals(3, strict_get_y(32));
+}
+
+var o = { y: 27 };
+o.f = strict_get_y;
+with(o) {
+  assertEquals(27, f());
+  assertEquals(27, f(23));
+}
+
+
+// Check calls to eval within a function with 'undefined' as receiver.
+function implicit_receiver_eval() {
+  "use strict";
+  return eval("this");
+}
+
+assertEquals(void 0, implicit_receiver_eval());
+assertEquals(void 0, implicit_receiver_eval(32));
+
+
+// Strict mode function to get inlined.
+function strict_return_receiver() {
+  "use strict";
+  return this;
+}
+
+// Inline with implicit receiver.
+function g() {
+  return strict_return_receiver();
+}
+
+for (var i = 0; i < 5; i++) {
+  assertEquals(void 0, g());
+  assertEquals(void 0, g(42));
+}
+%OptimizeFunctionOnNextCall(g);
+assertEquals(void 0, g(42));
+assertEquals(void 0, g());
+
+// Inline with explicit receiver.
+function g2() {
+  var o = {};
+  o.f = strict_return_receiver;
+  return o.f();
+}
+
+for (var i = 0; i < 5; i++) {
+  assertTrue(typeof g2() == "object");
+  assertTrue(typeof g2(42) == "object");
+}
+%OptimizeFunctionOnNextCall(g2);
+assertTrue(typeof g2() == "object");
+assertTrue(typeof g2(42) == "object");
+
+// Test calls of aliased eval.
+function outer_eval_receiver() {
+  var eval = function() { return this; }
+  function inner_strict() {
+    "use strict";
+    assertEquals('object', typeof eval());
+  }
+  inner_strict();
+}
+outer_eval_receiver();
+
+function outer_eval_conversion3(eval, expected) {
+  function inner_strict() {
+    "use strict";
+    var x = eval("this");
+    assertEquals(expected, typeof x);
+  }
+  inner_strict();
+}
+
+function strict_return_this() { "use strict"; return this; }
+function return_this() { return this; }
+function strict_eval(s) { "use strict"; return eval(s); }
+function non_strict_eval(s) { return eval(s); }
+
+outer_eval_conversion3(strict_return_this, 'undefined');
+outer_eval_conversion3(return_this, 'object');
+outer_eval_conversion3(strict_eval, 'undefined');
+outer_eval_conversion3(non_strict_eval, 'object');
+
+// TODO(ager): I'm not sure this is in accordance with the spec. At
+// the moment, any call to eval where eval is not bound in the global
+// context is treated as an indirect call to eval which means that the
+// global context is used and the global object is passed as the
+// receiver.
+outer_eval_conversion3(eval, 'object');
+
+function test_constant_function() {
+  var o = { f: function() { "use strict"; return this; } };
+  this.__proto__ = o;
+  for (var i = 0; i < 10; i++) assertEquals(void 0, f());
+}
+test_constant_function();
+
+function test_field() {
+  var o = { };
+  o.f = function() {};
+  o.f = function() { "use strict"; return this; };
+  this.__proto__ = o;
+  for (var i = 0; i < 10; i++) assertEquals(void 0, f());
+}
+test_field();
diff --git a/test/mjsunit/strict-mode-opt.js b/test/mjsunit/strict-mode-opt.js
index e2eae33..5ca5c27 100644
--- a/test/mjsunit/strict-mode-opt.js
+++ b/test/mjsunit/strict-mode-opt.js
@@ -25,8 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 var global = 0;
-var MAX = 1000000;
+var MAX = 5;
 
 // Attempt to inline strcit in non-strict.
 
@@ -43,6 +45,7 @@
 (function testInlineStrictInNonStrict() {
   for (var i = 0; i <= MAX; i ++) {
     try {
+      if (i == MAX - 1) %OptimizeFunctionOnNextCall(nonstrictCallStrict);
       nonstrictCallStrict(i);
     } catch (e) {
       assertInstanceof(e, ReferenceError);
@@ -68,6 +71,7 @@
 (function testInlineNonStrictInStrict() {
   for (var i = 0; i <= MAX; i ++) {
     try {
+      if (i == MAX - 1) %OptimizeFunctionOnNextCall(nonstrictCallStrict);
       strictCallNonStrict(i);
     } catch (e) {
       fail("no exception", "exception");
@@ -87,6 +91,7 @@
 (function testOptimizeStrictAssignToUndefined() {
   for (var i = 0; i <= MAX; i ++) {
     try {
+      if (i == MAX - 1) %OptimizeFunctionOnNextCall(nonstrictCallStrict);
       strictAssignToUndefined(i);
     } catch (e) {
       assertInstanceof(e, ReferenceError);
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index beca582..30234ba 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -67,6 +67,9 @@
   with ({}) {};
 })();
 
+// Incorrectly place 'use strict' directive.
+assertThrows("function foo (x) 'use strict'; {}", SyntaxError);
+
 // 'use strict' in non-directive position.
 (function UseStrictNonDirective() {
   void(0);
@@ -319,14 +322,8 @@
            +arguments, -arguments, ~arguments, !arguments];
 })();
 
-// 7.6.1.2 Future Reserved Words
-var future_reserved_words = [
-  "class",
-  "enum",
-  "export",
-  "extends",
-  "import",
-  "super",
+// 7.6.1.2 Future Reserved Words in strict mode
+var future_strict_reserved_words = [
   "implements",
   "interface",
   "let",
@@ -337,14 +334,17 @@
   "static",
   "yield" ];
 
-function testFutureReservedWord(word) {
+function testFutureStrictReservedWord(word) {
   // Simple use of each reserved word
   CheckStrictMode("var " + word + " = 1;", SyntaxError);
+  CheckStrictMode("typeof (" + word + ");", SyntaxError);
 
   // object literal properties
   eval("var x = { " + word + " : 42 };");
   eval("var x = { get " + word + " () {} };");
   eval("var x = { set " + word + " (value) {} };");
+  eval("var x = { get " + word + " () { 'use strict'; } };");
+  eval("var x = { set " + word + " (value) { 'use strict'; } };");
 
   // object literal with string literal property names
   eval("var x = { '" + word + "' : 42 };");
@@ -364,7 +364,6 @@
 
   // Function names and arguments when the body is strict
   assertThrows("function " + word + " () { 'use strict'; }", SyntaxError);
-  assertThrows("function foo (" + word + ")  'use strict'; {}", SyntaxError);
   assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }",
                SyntaxError);
   assertThrows("function foo (a, " + word + ") { 'use strict'; }", SyntaxError);
@@ -374,17 +373,14 @@
   assertThrows("var foo = function (" + word + ") { 'use strict'; }",
                SyntaxError);
 
-  // get/set when the body is strict
-  eval("var x = { get " + word + " () { 'use strict'; } };");
-  eval("var x = { set " + word + " (value) { 'use strict'; } };");
-  assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };",
-               SyntaxError);
+  // setter parameter when the body is strict
+  CheckStrictMode("var x = { set foo(" + word + ") {} };", SyntaxError);
   assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };",
                SyntaxError);
 }
 
-for (var i = 0; i < future_reserved_words.length; i++) {
-  testFutureReservedWord(future_reserved_words[i]);
+for (var i = 0; i < future_strict_reserved_words.length; i++) {
+  testFutureStrictReservedWord(future_strict_reserved_words[i]);
 }
 
 function testAssignToUndefined(test, should_throw) {
@@ -842,12 +838,14 @@
   }
 
   for (var i = 0; i < 10; i ++) {
+    var exception = false;
     try {
       strict(o, name);
-      assertUnreachable();
     } catch(e) {
+      exception = true;
       assertInstanceof(e, TypeError);
     }
+    assertTrue(exception);
   }
 })();
 
@@ -1179,3 +1177,10 @@
     assertEquals(test(i), true);
   }
 })();
+
+
+(function TestStrictModeEval() {
+  "use strict";
+  eval("var eval_local = 10;");
+  assertThrows(function() { return eval_local; }, ReferenceError);
+})();
diff --git a/test/mjsunit/string-compare-alignment.js b/test/mjsunit/string-compare-alignment.js
index a291417..0ae8eb7 100644
--- a/test/mjsunit/string-compare-alignment.js
+++ b/test/mjsunit/string-compare-alignment.js
@@ -29,8 +29,9 @@
 // This situation can arise with sliced strings.  This tests for an ARM bug
 // that was fixed in r554.
 
-var base = "Now is the time for all good men to come to the aid of the party. " + 
-           "Now is the time for all good men to come to the aid of the party."
+var base =
+    "Now is the time for all good men to come to the aid of the party. " +
+    "Now is the time for all good men to come to the aid of the party."
 var s1 = base.substring(0, 64);
 var s2 = base.substring(66, 130);
 
diff --git a/test/mjsunit/string-fromcharcode.js b/test/mjsunit/string-fromcharcode.js
index 7a2db5f..1986dda 100644
--- a/test/mjsunit/string-fromcharcode.js
+++ b/test/mjsunit/string-fromcharcode.js
@@ -65,8 +65,10 @@
   assertEquals("  ", fcc(0x20 + 0.5, 0x20));
 
   var receiver = (num < 5) ? String : (num < 9) ? "dummy" : 42;
-  fcc2 = (num < 5) ? fcc : (num < 9) ? constFun("dummy") : constFun(42);
-  var expected = (num < 5) ? " " : (num < 9) ? "dummy" : 42;
+  fcc2 = (num < 5) ? fcc
+                   : (num < 9) ? constFun(Object("dummy"))
+                               : constFun(Object(42));
+  var expected = (num < 5) ? " " : (num < 9) ? Object("dummy") : Object(42);
   assertEquals(expected, receiver.fromCharCode(0x20));
   assertEquals(expected, receiver.fromCharCode(0x20 - 0x10000));
   assertEquals(expected, receiver.fromCharCode(0x20 + 0.5));
diff --git a/test/mjsunit/string-index.js b/test/mjsunit/string-index.js
index 1d6476e..315708c 100644
--- a/test/mjsunit/string-index.js
+++ b/test/mjsunit/string-index.js
@@ -61,7 +61,7 @@
 assertEquals("undefined", typeof(foo[-2]), "negative index");
 
 var S = new String("foo");
-assertEquals("foo", S);
+assertEquals(Object("foo"), S);
 assertEquals("f", S[0], "string object");
 assertEquals("f", S["0"], "string object");
 S[0] = 'bente';
@@ -131,7 +131,7 @@
 assertEquals(false, "3" in S);
 
 var N = new Number(43);
-assertEquals(43, N);
+assertEquals(Object(43), N);
 N[-2] = "Alpha";
 assertEquals("Alpha", N[-2]);
 N[0] = "Zappa";
diff --git a/test/mjsunit/string-indexof-1.js b/test/mjsunit/string-indexof-1.js
index c5ae4b8..db3623f 100644
--- a/test/mjsunit/string-indexof-1.js
+++ b/test/mjsunit/string-indexof-1.js
@@ -63,7 +63,7 @@
 assertEquals(2, twoByteString.indexOf("\u03a3"), "First Sigma");
 assertEquals(3, twoByteString.indexOf("\u03a3",3), "Second Sigma");
 assertEquals(4, twoByteString.indexOf("\u0395"), "Epsilon");
-assertEquals(-1, twoByteString.indexOf("\u0392"), "Not beta");  
+assertEquals(-1, twoByteString.indexOf("\u0392"), "Not beta");
 
 // Test multi-char pattern
 assertEquals(0, twoByteString.indexOf("\u039a\u0391"), "lambda Alpha");
@@ -71,7 +71,7 @@
 assertEquals(2, twoByteString.indexOf("\u03a3\u03a3"), "Sigma Sigma");
 assertEquals(3, twoByteString.indexOf("\u03a3\u0395"), "Sigma Epsilon");
 
-assertEquals(-1, twoByteString.indexOf("\u0391\u03a3\u0395"), 
+assertEquals(-1, twoByteString.indexOf("\u0391\u03a3\u0395"),
     "Not Alpha Sigma Epsilon");
 
 //single char pattern
diff --git a/test/mjsunit/string-indexof-2.js b/test/mjsunit/string-indexof-2.js
index a7c3f60..48db84d 100644
--- a/test/mjsunit/string-indexof-2.js
+++ b/test/mjsunit/string-indexof-2.js
@@ -57,10 +57,10 @@
     var index = -1;
     do {
       index = lipsum.indexOf(substring, index + 1);
-      assertTrue(index != -1, 
+      assertTrue(index != -1,
                  "Lipsum substring " + i + ".." + (i + len-1) + " not found");
-      assertEquals(lipsum.substring(index, index + len), substring, 
-          "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" + 
+      assertEquals(lipsum.substring(index, index + len), substring,
+          "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" +
               index + ".." + (index + len - 1));
     } while (index >= 0 && index < i);
     assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1));
diff --git a/test/mjsunit/string-replace.js b/test/mjsunit/string-replace.js
index 9e4f559..6b022df 100644
--- a/test/mjsunit/string-replace.js
+++ b/test/mjsunit/string-replace.js
@@ -207,3 +207,8 @@
 
 replaceTest("[ab-aabb-ab-b][az-aazz-az-z]",
             "abaz", /a(.)/g, replacer);
+
+var str = 'She sells seashells by the seashore.';
+var re = /sh/g;
+assertEquals('She sells sea$schells by the sea$schore.',
+             str.replace(re,"$$" + 'sch'))
diff --git a/test/mjsunit/string-slices-regexp.js b/test/mjsunit/string-slices-regexp.js
new file mode 100644
index 0000000..a8cadae
--- /dev/null
+++ b/test/mjsunit/string-slices-regexp.js
@@ -0,0 +1,81 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --string-slices
+
+//assertEquals('345"12345 6"1234567"123',
+//             '12345""12345 6""1234567""1234'.slice(2,-1).replace(/""/g, '"'));
+
+var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
+for(var i = 0; i < 1000; i++) {
+  assertTrue(/^([a-z]+): (.*)/.test(foo.substring(foo.indexOf("regexp:"))));
+  assertEquals("regexp", RegExp.$1, "RegExp.$1");
+}
+
+var re = /^(((N({)?)|(R)|(U)|(V)|(B)|(H)|(n((n)|(r)|(v)|(h))?)|(r(r)?)|(v)|(b((n)|(b))?)|(h))|((Y)|(A)|(E)|(o(u)?)|(p(u)?)|(q(u)?)|(s)|(t)|(u)|(w)|(x(u)?)|(y)|(z)|(a((T)|(A)|(L))?)|(c)|(e)|(f(u)?)|(g(u)?)|(i)|(j)|(l)|(m(u)?)))+/;
+var r = new RegExp(re)
+var str = "_Avtnennan gunzvmu pubExnY nEvln vaTxh rmuhguhaTxnY_".slice(1,-1);
+str = str + str;
+assertTrue(r.test(str));
+assertTrue(r.test(str));
+var re = /x/;
+assertEquals("a.yb", "_axyb_".slice(1,-1).replace(re, "."));
+re.compile("y");
+assertEquals("ax.b", "_axyb_".slice(1,-1).replace(re, "."));
+re.compile("(x)");
+assertEquals(["x", "x"], re.exec("_axyb_".slice(1,-1)));
+re.compile("(y)");
+assertEquals(["y", "y"], re.exec("_axyb_".slice(1,-1)));
+
+for(var i = 0; i < 100; i++) {
+  var a = "aaaaaaaaaaaaaaaaaaaaaaaabbaacabbabaaaaabbaaaabbac".slice(24,-1);
+  var b = "bbaacabbabaaaaabbaaaabba" + a;
+  // The first time, the cons string will be flattened and handled by the
+  // runtime system.
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(b));
+  // The second time, the cons string is already flattened and will be
+  // handled by generated code.
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(b));
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(a));
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(a));
+}
+
+var c = "ABCDEFGHIJKLMN".slice(2,-2);
+var d = "ABCDEF\u1234GHIJKLMN".slice(2,-2);
+var e = "ABCDEFGHIJKLMN".slice(0,-2);
+assertTrue(/^C.*L$/.test(c));
+assertTrue(/^C.*L$/.test(c));
+assertTrue(/^C.*L$/.test(d));
+assertTrue(/^C.*L$/.test(d));
+assertTrue(/^A\w{10}L$/.test(e));
+assertTrue(/^A\w{10}L$/.test(e));
+
+var e = "qui-opIasd-fghjklzx-cvbn-mqwer-tyuio-pasdf-ghIjkl-zx".slice(6,-6);
+var e_split = e.split("-");
+assertEquals(e_split[0], "Iasd");
+assertEquals(e_split[1], "fghjklzx");
+assertEquals(e_split[6], "ghI");
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
new file mode 100755
index 0000000..8cc1f81
--- /dev/null
+++ b/test/mjsunit/string-slices.js
@@ -0,0 +1,199 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --string-slices --expose-externalize-string
+
+var s = 'abcdefghijklmn';
+assertEquals(s, s.substr());
+assertEquals(s, s.substr(0));
+assertEquals(s, s.substr('0'));
+assertEquals(s, s.substr(void 0));
+assertEquals(s, s.substr(null));
+assertEquals(s, s.substr(false));
+assertEquals(s, s.substr(0.9));
+assertEquals(s, s.substr({ valueOf: function() { return 0; } }));
+assertEquals(s, s.substr({ toString: function() { return '0'; } }));
+
+var s1 = s.substring(1);
+assertEquals(s1, s.substr(1));
+assertEquals(s1, s.substr('1'));
+assertEquals(s1, s.substr(true));
+assertEquals(s1, s.substr(1.1));
+assertEquals(s1, s.substr({ valueOf: function() { return 1; } }));
+assertEquals(s1, s.substr({ toString: function() { return '1'; } }));
+
+
+assertEquals(s.substring(s.length - 1), s.substr(-1));
+assertEquals(s.substring(s.length - 1), s.substr(-1.2));
+assertEquals(s.substring(s.length - 1), s.substr(-1.7));
+assertEquals(s.substring(s.length - 2), s.substr(-2));
+assertEquals(s.substring(s.length - 2), s.substr(-2.3));
+assertEquals(s.substring(s.length - 2, s.length - 1), s.substr(-2, 1));
+assertEquals(s, s.substr(-100));
+assertEquals('abc', s.substr(-100, 3));
+assertEquals(s1, s.substr(-s.length + 1));
+
+// assertEquals('', s.substr(0, void 0)); // smjs and rhino
+assertEquals('abcdefghijklmn', s.substr(0, void 0));  // kjs and v8
+assertEquals('', s.substr(0, null));
+assertEquals(s, s.substr(0, String(s.length)));
+assertEquals('a', s.substr(0, true));
+
+
+// Test substrings of different lengths and alignments.
+// First ASCII.
+var x = "ASCII";
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 0; j < 25; j++) {
+    var z = x.substring(i, i+j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+// Then two-byte strings.
+x = "UC16\u2028";  // Non-ascii char forces two-byte string.
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 0; j < 25; j++) {
+    var z = x.substring(i, i + j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+
+// Keep creating strings to to force allocation failure on substring creation.
+var x = "0123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 1000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
+
+
+// Same with two-byte strings
+var x = "\u2028123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 1000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
+
+// Substring of substring.
+var cache = [];
+var last = x;
+var offset = 0;
+for (var i = 0; i < 64; i++) {
+  var z = last.substring(i);
+  last = z;
+  cache.push(z);
+  offset += i;
+}
+for (var i = 63; i >= 0; i--) {
+  var z = cache.pop();
+  assertTrue(/\u2028123456789ABCDEF/.test(z));
+  assertEquals(xl - offset, z.length);
+  assertEquals(x.charAt(i*(i+1)/2), z.charAt(0));
+  offset -= i;
+}
+
+// Test charAt for different strings.
+function f(s1, s2, s3, i) {
+  assertEquals(String.fromCharCode(97+i%11), s1.charAt(i%11));
+  assertEquals(String.fromCharCode(97+i%11), s2.charAt(i%11));
+  assertEquals(String.fromCharCode(98+i%11), s3.charAt(i%11));
+  assertEquals(String.fromCharCode(101), s3.charAt(3));
+}
+
+flat = "abcdefghijkl12345";
+cons = flat + flat.toUpperCase();
+slice = "abcdefghijklmn12345".slice(1, -1);
+for ( var i = 0; i < 1000; i++) {
+  f(flat, cons, slice, i);
+}
+flat = "abcdefghijkl1\u20232345";
+cons = flat + flat.toUpperCase();
+slice = "abcdefghijklmn1\u20232345".slice(1, -1);
+for ( var i = 0; i < 1000; i++) {
+  f(flat, cons, slice, i);
+}
+
+// Concatenate substrings.
+var ascii = 'abcdefghijklmnop';
+var utf = '\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9\u03BA\u03BB';
+assertEquals("klmno", ascii.substring(10,15) + ascii.substring(16));
+assertEquals("\u03B4\u03B7", utf.substring(3,4) + utf.substring(6,7));
+assertEquals("klp", ascii.substring(10,12) + ascii.substring(15,16));
+assertEquals("\u03B1\u03B4\u03B5", utf.substring(0,1) + utf.substring(5,3));
+assertEquals("", ascii.substring(16) + utf.substring(16));
+assertEquals("bcdef\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9",
+    ascii.substring(1,6) + utf.substring(3,9));
+assertEquals("\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9abcdefghijklmnop",
+    utf.substring(3,9) + ascii);
+assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
+    utf.substring(5,1) + utf.substring(3,7));
+
+/*
+// Externalizing strings.
+var a = "123456789qwertyuiopasdfghjklzxcvbnm";
+var b = a.slice(1,-1);
+assertEquals(a.slice(1,-1), b);
+externalizeString(a);
+assertEquals(a.slice(1,-1), b);
+*/
diff --git a/test/mjsunit/string-split.js b/test/mjsunit/string-split.js
index 6fcf557..d8412f0 100644
--- a/test/mjsunit/string-split.js
+++ b/test/mjsunit/string-split.js
@@ -68,13 +68,13 @@
 
 
 /* "ab".split(/((?=.))/)
- * 
+ *
  * KJS:   ,a,,b
  * SM:    a,,b,
  * IE:    a,b
  * Opera: a,,b
  * V8:    a,,b
- * 
+ *
  * Opera seems to have this right.  The others make no sense.
  */
 assertArrayEquals(["a", "", "b"], "ab".split(/((?=.))/));
@@ -116,3 +116,15 @@
 assertEquals(["a", "b", "c"], "abc".split("", numberObj(3)));
 assertEquals(["a", "b", "c"], "abc".split("", 4));
 assertEquals(["a", "b", "c"], "abc".split("", numberObj(4)));
+
+
+var all_ascii_codes = [];
+for (var i = 0; i < 128; i++) all_ascii_codes[i] = i;
+var all_ascii_string = String.fromCharCode.apply(String, all_ascii_codes);
+
+var split_chars = all_ascii_string.split("");
+assertEquals(128, split_chars.length);
+for (var i = 0; i < 128; i++) {
+  assertEquals(1, split_chars[i].length);
+  assertEquals(i, split_chars[i].charCodeAt(0));
+}
diff --git a/test/mjsunit/substr.js b/test/mjsunit/substr.js
index f69a9c0..cab8b1b 100755
--- a/test/mjsunit/substr.js
+++ b/test/mjsunit/substr.js
@@ -55,7 +55,7 @@
 assertEquals('abc', s.substr(-100, 3));
 assertEquals(s1, s.substr(-s.length + 1));
 
-// assertEquals('', s.substr(0, void 0)); // smjs and rhino 
+// assertEquals('', s.substr(0, void 0)); // smjs and rhino
 assertEquals('abcdefghijklmn', s.substr(0, void 0));  // kjs and v8
 assertEquals('', s.substr(0, null));
 assertEquals(s, s.substr(0, String(s.length)));
@@ -135,3 +135,20 @@
   assertEquals(xl - (i % xl), z.length);
   cache.push(z);
 }
+
+// Substring of substring.
+var cache = [];
+var last = x;
+var offset = 0;
+for (var i = 0; i < 64; i++) {
+  var z = last.substring(i);
+  last = z;
+  cache.push(z);
+  offset += i;
+}
+for (var i = 63; i >= 0; i--) {
+  var z = cache.pop();
+  assertTrue(/\u2028123456789ABCDEF/.test(z));
+  assertEquals(xl - offset, z.length);
+  offset -= i;
+}
diff --git a/test/mjsunit/testcfg.py b/test/mjsunit/testcfg.py
index 3dd6581..87ed4fa 100644
--- a/test/mjsunit/testcfg.py
+++ b/test/mjsunit/testcfg.py
@@ -107,7 +107,7 @@
     return self_script
 
   def AfterRun(self, result):
-    if self.self_script and (not result.HasPreciousOutput()):
+    if self.self_script and (not result or (not result.HasPreciousOutput())):
       test.CheckedUnlink(self.self_script)
 
 class MjsunitTestConfiguration(test.TestConfiguration):
@@ -127,13 +127,15 @@
     third_party = [current_path + ['third_party', t] for t in self.Ls(join(self.root, 'third_party'))]
     tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
     compiler = [current_path + ['compiler', t] for t in self.Ls(join(self.root, 'compiler'))]
+    harmony = [current_path + ['harmony', t] for t in self.Ls(join(self.root, 'harmony'))]
     mjsunit.sort()
     regress.sort()
     bugs.sort()
     third_party.sort()
     tools.sort()
     compiler.sort()
-    all_tests = mjsunit + regress + bugs + third_party + tools + compiler
+    harmony.sort()
+    all_tests = mjsunit + regress + bugs + third_party + tools + compiler + harmony
     result = []
     for test in all_tests:
       if self.Contains(path, test):
@@ -143,7 +145,7 @@
     return result
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'mjsunit.status')
diff --git a/test/mjsunit/third_party/object-keys.js b/test/mjsunit/third_party/object-keys.js
index 999ce70..d09265c 100644
--- a/test/mjsunit/third_party/object-keys.js
+++ b/test/mjsunit/third_party/object-keys.js
@@ -54,7 +54,7 @@
 assertEquals('string', typeof(Object.keys([1])[0]));
 
 function argsTest(a, b, c) {
-  assertEquals([0, 1, 2], Object.keys(arguments));
+  assertEquals(['0', '1', '2'], Object.keys(arguments));
 }
 
 argsTest(1, 2, 3);
diff --git a/test/mjsunit/third_party/regexp-pcre.js b/test/mjsunit/third_party/regexp-pcre.js
index dcb1b32..c049fb4 100644
--- a/test/mjsunit/third_party/regexp-pcre.js
+++ b/test/mjsunit/third_party/regexp-pcre.js
@@ -1643,135 +1643,135 @@
 res[1564] = /[^\xaa]/m;
 res[1565] = /[^\xaa]/m;
 res[1566] = / End of testinput10 /;
-assertEquals("abc", res[1].exec("abc"), 0);
-assertEquals("abc", res[1].exec("defabc"), 1);
-assertEquals("abc", res[1].exec("Aabc"), 2);
-assertEquals(null, res[1].exec("*** Failers", 3));
-assertEquals("abc", res[1].exec("Adefabc"), 4);
-assertEquals("ABC", res[1].exec("ABC"), 5);
-assertEquals("abc", res[2].exec("abc"), 6);
-assertEquals(null, res[2].exec("Aabc", 7));
-assertEquals(null, res[2].exec("*** Failers", 8));
-assertEquals(null, res[2].exec("defabc", 9));
-assertEquals(null, res[2].exec("Adefabc", 10));
-assertEquals("abc", res[7].exec("abc"), 11);
-assertEquals(null, res[7].exec("*** Failers", 12));
-assertEquals(null, res[7].exec("def\nabc", 13));
+assertToStringEquals("abc", res[1].exec("abc"), 0);
+assertToStringEquals("abc", res[1].exec("defabc"), 1);
+assertToStringEquals("abc", res[1].exec("Aabc"), 2);
+assertNull(res[1].exec("*** Failers", 3));
+assertToStringEquals("abc", res[1].exec("Adefabc"), 4);
+assertToStringEquals("ABC", res[1].exec("ABC"), 5);
+assertToStringEquals("abc", res[2].exec("abc"), 6);
+assertNull(res[2].exec("Aabc", 7));
+assertNull(res[2].exec("*** Failers", 8));
+assertNull(res[2].exec("defabc", 9));
+assertNull(res[2].exec("Adefabc", 10));
+assertToStringEquals("abc", res[7].exec("abc"), 11);
+assertNull(res[7].exec("*** Failers", 12));
+assertNull(res[7].exec("def\nabc", 13));
 assertThrows("var re = /x{5,4}/;", 14);
 assertThrows("var re = /[abcd/;", 15);
 assertThrows("var re = /[z-a]/;", 16);
 assertThrows("var re = /^*/;", 17);
 assertThrows("var re = /(abc/;", 18);
 assertThrows("var re = /(?# abc/;", 19);
-assertEquals("cat", res[11].exec("this sentence eventually mentions a cat"), 20);
-assertEquals("elephant", res[11].exec("this sentences rambles on and on for a while and then reaches elephant"), 21);
-assertEquals("cat", res[12].exec("this sentence eventually mentions a cat"), 22);
-assertEquals("elephant", res[12].exec("this sentences rambles on and on for a while and then reaches elephant"), 23);
-assertEquals("CAT", res[13].exec("this sentence eventually mentions a CAT cat"), 24);
-assertEquals("elephant", res[13].exec("this sentences rambles on and on for a while to elephant ElePhant"), 25);
+assertToStringEquals("cat", res[11].exec("this sentence eventually mentions a cat"), 20);
+assertToStringEquals("elephant", res[11].exec("this sentences rambles on and on for a while and then reaches elephant"), 21);
+assertToStringEquals("cat", res[12].exec("this sentence eventually mentions a cat"), 22);
+assertToStringEquals("elephant", res[12].exec("this sentences rambles on and on for a while and then reaches elephant"), 23);
+assertToStringEquals("CAT", res[13].exec("this sentence eventually mentions a CAT cat"), 24);
+assertToStringEquals("elephant", res[13].exec("this sentences rambles on and on for a while to elephant ElePhant"), 25);
 assertThrows("var re = /{4,5}abc/;", 26);
-assertEquals("abcb,a,b,c", res[18].exec("abcb"), 27);
-assertEquals("abcb,a,b,c", res[18].exec("O0abcb"), 28);
-assertEquals("abcb,a,b,c", res[18].exec("O3abcb"), 29);
-assertEquals("abcb,a,b,c", res[18].exec("O6abcb"), 30);
-assertEquals("abcb,a,b,c", res[18].exec("O9abcb"), 31);
-assertEquals("abcb,a,b,c", res[18].exec("O12abcb"), 32);
-assertEquals("abc,a,,", res[19].exec("abc"), 33);
-assertEquals("abc,a,,", res[19].exec("O0abc"), 34);
-assertEquals("abc,a,,", res[19].exec("O3abc"), 35);
-assertEquals("abc,a,,", res[19].exec("O6abc"), 36);
-assertEquals("aba,,a,b", res[19].exec("aba"), 37);
-assertEquals("aba,,a,b", res[19].exec("O0aba"), 38);
-assertEquals("aba,,a,b", res[19].exec("O3aba"), 39);
-assertEquals("aba,,a,b", res[19].exec("O6aba"), 40);
-assertEquals("aba,,a,b", res[19].exec("O9aba"), 41);
-assertEquals("aba,,a,b", res[19].exec("O12aba"), 42);
-assertEquals("abc", res[20].exec("abc"), 43);
-assertEquals(null, res[20].exec("*** Failers", 44));
-assertEquals(null, res[20].exec("abc\n", 45));
-assertEquals(null, res[20].exec("abc\ndef", 46));
-assertEquals("the quick brown fox", res[22].exec("the quick brown fox"), 47);
-assertEquals("the quick brown fox", res[22].exec("this is a line with the quick brown fox"), 48);
-assertEquals("abc", res[23].exec("abcdef"), 49);
-assertEquals("abc", res[23].exec("abcdefB"), 50);
-assertEquals("defabc,abc,abc,", res[24].exec("defabc"), 51);
-assertEquals("Zdefabc,abc,abc,", res[24].exec("Zdefabc"), 52);
-assertEquals("abc", res[25].exec("abc"), 53);
-assertEquals(null, res[25].exec("*** Failers", 54));
-assertEquals("abc", res[26].exec("abcdef"), 55);
-assertEquals("abc", res[26].exec("abcdefB"), 56);
-assertEquals("defabc,abc,abc,", res[27].exec("defabc"), 57);
-assertEquals("Zdefabc,abc,abc,", res[27].exec("Zdefabc"), 58);
-assertEquals("the quick brown fox", res[28].exec("the quick brown fox"), 59);
-assertEquals(null, res[28].exec("*** Failers", 60));
-assertEquals("The Quick Brown Fox", res[28].exec("The Quick Brown Fox"), 61);
-assertEquals("the quick brown fox", res[29].exec("the quick brown fox"), 62);
-assertEquals("The Quick Brown Fox", res[29].exec("The Quick Brown Fox"), 63);
-assertEquals(null, res[30].exec("*** Failers", 64));
-assertEquals(null, res[30].exec("abc\ndef", 65));
-assertEquals("abc", res[31].exec("abc"), 66);
-assertEquals(null, res[31].exec("abc\n", 67));
-assertEquals("abc,abc", res[33].exec("abc"), 68);
+assertToStringEquals("abcb,a,b,c", res[18].exec("abcb"), 27);
+assertToStringEquals("abcb,a,b,c", res[18].exec("O0abcb"), 28);
+assertToStringEquals("abcb,a,b,c", res[18].exec("O3abcb"), 29);
+assertToStringEquals("abcb,a,b,c", res[18].exec("O6abcb"), 30);
+assertToStringEquals("abcb,a,b,c", res[18].exec("O9abcb"), 31);
+assertToStringEquals("abcb,a,b,c", res[18].exec("O12abcb"), 32);
+assertToStringEquals("abc,a,,", res[19].exec("abc"), 33);
+assertToStringEquals("abc,a,,", res[19].exec("O0abc"), 34);
+assertToStringEquals("abc,a,,", res[19].exec("O3abc"), 35);
+assertToStringEquals("abc,a,,", res[19].exec("O6abc"), 36);
+assertToStringEquals("aba,,a,b", res[19].exec("aba"), 37);
+assertToStringEquals("aba,,a,b", res[19].exec("O0aba"), 38);
+assertToStringEquals("aba,,a,b", res[19].exec("O3aba"), 39);
+assertToStringEquals("aba,,a,b", res[19].exec("O6aba"), 40);
+assertToStringEquals("aba,,a,b", res[19].exec("O9aba"), 41);
+assertToStringEquals("aba,,a,b", res[19].exec("O12aba"), 42);
+assertToStringEquals("abc", res[20].exec("abc"), 43);
+assertNull(res[20].exec("*** Failers", 44));
+assertNull(res[20].exec("abc\n", 45));
+assertNull(res[20].exec("abc\ndef", 46));
+assertToStringEquals("the quick brown fox", res[22].exec("the quick brown fox"), 47);
+assertToStringEquals("the quick brown fox", res[22].exec("this is a line with the quick brown fox"), 48);
+assertToStringEquals("abc", res[23].exec("abcdef"), 49);
+assertToStringEquals("abc", res[23].exec("abcdefB"), 50);
+assertToStringEquals("defabc,abc,abc,", res[24].exec("defabc"), 51);
+assertToStringEquals("Zdefabc,abc,abc,", res[24].exec("Zdefabc"), 52);
+assertToStringEquals("abc", res[25].exec("abc"), 53);
+assertNull(res[25].exec("*** Failers", 54));
+assertToStringEquals("abc", res[26].exec("abcdef"), 55);
+assertToStringEquals("abc", res[26].exec("abcdefB"), 56);
+assertToStringEquals("defabc,abc,abc,", res[27].exec("defabc"), 57);
+assertToStringEquals("Zdefabc,abc,abc,", res[27].exec("Zdefabc"), 58);
+assertToStringEquals("the quick brown fox", res[28].exec("the quick brown fox"), 59);
+assertNull(res[28].exec("*** Failers", 60));
+assertToStringEquals("The Quick Brown Fox", res[28].exec("The Quick Brown Fox"), 61);
+assertToStringEquals("the quick brown fox", res[29].exec("the quick brown fox"), 62);
+assertToStringEquals("The Quick Brown Fox", res[29].exec("The Quick Brown Fox"), 63);
+assertNull(res[30].exec("*** Failers", 64));
+assertNull(res[30].exec("abc\ndef", 65));
+assertToStringEquals("abc", res[31].exec("abc"), 66);
+assertNull(res[31].exec("abc\n", 67));
+assertToStringEquals("abc,abc", res[33].exec("abc"), 68);
 assertThrows("var re = /)/;", 69);
-assertEquals("-pr", res[35].exec("co-processors, and for"), 70);
-assertEquals("<def>ghi<klm>", res[36].exec("abc<def>ghi<klm>nop"), 71);
-assertEquals("<def>", res[37].exec("abc<def>ghi<klm>nop"), 72);
-assertEquals("<def>", res[37].exec("abc<def>ghi<klm>nop"), 73);
-assertEquals(null, res[37].exec("abc========def", 74));
-assertEquals(null, res[37].exec("foo", 75));
-assertEquals(null, res[37].exec("catfoo", 76));
-assertEquals(null, res[37].exec("*** Failers", 77));
-assertEquals(null, res[37].exec("the barfoo", 78));
-assertEquals(null, res[37].exec("and cattlefoo", 79));
-assertEquals("a", res[40].exec("a"), 80);
-assertEquals(null, res[40].exec("a\n", 81));
-assertEquals(null, res[40].exec("*** Failers", 82));
-assertEquals("a", res[40].exec("Za"), 83);
-assertEquals(null, res[40].exec("Za\n", 84));
-assertEquals("a", res[41].exec("a"), 85);
-assertEquals("a", res[41].exec("a\n"), 86);
-assertEquals("a", res[41].exec("Za\n"), 87);
-assertEquals(null, res[41].exec("*** Failers", 88));
-assertEquals("a", res[41].exec("Za"), 89);
-assertEquals("b", res[44].exec("foo\nbarbar"), 90);
-assertEquals("a", res[44].exec("***Failers"), 91);
-assertEquals("b", res[44].exec("rhubarb"), 92);
-assertEquals("b", res[44].exec("barbell"), 93);
-assertEquals("a", res[44].exec("abc\nbarton"), 94);
-assertEquals("b", res[44].exec("foo\nbarbar"), 95);
-assertEquals("a", res[44].exec("***Failers"), 96);
-assertEquals("b", res[44].exec("rhubarb"), 97);
-assertEquals("b", res[44].exec("barbell"), 98);
-assertEquals("a", res[44].exec("abc\nbarton"), 99);
-assertEquals("a", res[44].exec("abc"), 100);
-assertEquals("a", res[44].exec("def\nabc"), 101);
-assertEquals("a", res[44].exec("*** Failers"), 102);
-assertEquals("a", res[44].exec("defabc"), 103);
-assertEquals(null, res[45].exec("the bullock-cart", 104));
-assertEquals(null, res[45].exec("a donkey-cart race", 105));
-assertEquals(null, res[45].exec("*** Failers", 106));
-assertEquals(null, res[45].exec("cart", 107));
-assertEquals(null, res[45].exec("horse-and-cart", 108));
-assertEquals(null, res[45].exec("alphabetabcd", 109));
-assertEquals(null, res[45].exec("endingxyz", 110));
-assertEquals(null, res[45].exec("abxyZZ", 111));
-assertEquals(null, res[45].exec("abXyZZ", 112));
-assertEquals(null, res[45].exec("ZZZ", 113));
-assertEquals(null, res[45].exec("zZZ", 114));
-assertEquals(null, res[45].exec("bZZ", 115));
-assertEquals(null, res[45].exec("BZZ", 116));
-assertEquals(null, res[45].exec("*** Failers", 117));
-assertEquals(null, res[45].exec("ZZ", 118));
-assertEquals(null, res[45].exec("abXYZZ", 119));
-assertEquals(null, res[45].exec("zzz", 120));
-assertEquals(null, res[45].exec("bzz", 121));
-assertEquals(null, res[45].exec("bar", 122));
-assertEquals(null, res[45].exec("foobbar", 123));
-assertEquals(null, res[45].exec("*** Failers", 124));
-assertEquals(null, res[45].exec("fooabar", 125));
-assertEquals(null, res[46].exec("*** Failers", 126));
-assertEquals(null, res[46].exec("a", 127));
-assertEquals(null, res[48].exec("aaaaaa", 128));
+assertToStringEquals("-pr", res[35].exec("co-processors, and for"), 70);
+assertToStringEquals("<def>ghi<klm>", res[36].exec("abc<def>ghi<klm>nop"), 71);
+assertToStringEquals("<def>", res[37].exec("abc<def>ghi<klm>nop"), 72);
+assertToStringEquals("<def>", res[37].exec("abc<def>ghi<klm>nop"), 73);
+assertNull(res[37].exec("abc========def", 74));
+assertNull(res[37].exec("foo", 75));
+assertNull(res[37].exec("catfoo", 76));
+assertNull(res[37].exec("*** Failers", 77));
+assertNull(res[37].exec("the barfoo", 78));
+assertNull(res[37].exec("and cattlefoo", 79));
+assertToStringEquals("a", res[40].exec("a"), 80);
+assertNull(res[40].exec("a\n", 81));
+assertNull(res[40].exec("*** Failers", 82));
+assertToStringEquals("a", res[40].exec("Za"), 83);
+assertNull(res[40].exec("Za\n", 84));
+assertToStringEquals("a", res[41].exec("a"), 85);
+assertToStringEquals("a", res[41].exec("a\n"), 86);
+assertToStringEquals("a", res[41].exec("Za\n"), 87);
+assertNull(res[41].exec("*** Failers", 88));
+assertToStringEquals("a", res[41].exec("Za"), 89);
+assertToStringEquals("b", res[44].exec("foo\nbarbar"), 90);
+assertToStringEquals("a", res[44].exec("***Failers"), 91);
+assertToStringEquals("b", res[44].exec("rhubarb"), 92);
+assertToStringEquals("b", res[44].exec("barbell"), 93);
+assertToStringEquals("a", res[44].exec("abc\nbarton"), 94);
+assertToStringEquals("b", res[44].exec("foo\nbarbar"), 95);
+assertToStringEquals("a", res[44].exec("***Failers"), 96);
+assertToStringEquals("b", res[44].exec("rhubarb"), 97);
+assertToStringEquals("b", res[44].exec("barbell"), 98);
+assertToStringEquals("a", res[44].exec("abc\nbarton"), 99);
+assertToStringEquals("a", res[44].exec("abc"), 100);
+assertToStringEquals("a", res[44].exec("def\nabc"), 101);
+assertToStringEquals("a", res[44].exec("*** Failers"), 102);
+assertToStringEquals("a", res[44].exec("defabc"), 103);
+assertNull(res[45].exec("the bullock-cart", 104));
+assertNull(res[45].exec("a donkey-cart race", 105));
+assertNull(res[45].exec("*** Failers", 106));
+assertNull(res[45].exec("cart", 107));
+assertNull(res[45].exec("horse-and-cart", 108));
+assertNull(res[45].exec("alphabetabcd", 109));
+assertNull(res[45].exec("endingxyz", 110));
+assertNull(res[45].exec("abxyZZ", 111));
+assertNull(res[45].exec("abXyZZ", 112));
+assertNull(res[45].exec("ZZZ", 113));
+assertNull(res[45].exec("zZZ", 114));
+assertNull(res[45].exec("bZZ", 115));
+assertNull(res[45].exec("BZZ", 116));
+assertNull(res[45].exec("*** Failers", 117));
+assertNull(res[45].exec("ZZ", 118));
+assertNull(res[45].exec("abXYZZ", 119));
+assertNull(res[45].exec("zzz", 120));
+assertNull(res[45].exec("bzz", 121));
+assertNull(res[45].exec("bar", 122));
+assertNull(res[45].exec("foobbar", 123));
+assertNull(res[45].exec("*** Failers", 124));
+assertNull(res[45].exec("fooabar", 125));
+assertNull(res[46].exec("*** Failers", 126));
+assertNull(res[46].exec("a", 127));
+assertNull(res[48].exec("aaaaaa", 128));
 assertThrows("var re = /a[b-a]/;", 129);
 assertThrows("var re = /a[/;", 130);
 assertThrows("var re = /*a/;", 131);
@@ -1794,4810 +1794,4810 @@
 assertThrows("var re = /a(?{\"{\"}})b/;", 148);
 assertThrows("var re = /[a[:xyz:/;", 149);
 assertThrows("var re = /a{37,17}/;", 150);
-assertEquals("abcd,a,d", res[58].exec("abcd"), 151);
-assertEquals("abcd,a,d", res[58].exec("abcdC2"), 152);
-assertEquals("abcd,a,d", res[58].exec("abcdC5"), 153);
-assertEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyz"), 154);
-assertEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyzC1"), 155);
-assertEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyzG1"), 156);
-assertEquals("abcdefghijklmno,abcdefghijklmno", res[60].exec("abcdefghijklmnopqrstuvwxyz"), 157);
-assertEquals("abcdefghijklmno,abcdefghijklmno", res[60].exec("abcdefghijklmnopqrstuvwxyzC1G1"), 158);
-assertEquals("abcdefghijklmnop,abcdefghijklmnop", res[61].exec("abcdefghijklmnopqrstuvwxyz"), 159);
-assertEquals("abcdefghijklmnop,abcdefghijklmnop", res[61].exec("abcdefghijklmnopqrstuvwxyzC1G1L"), 160);
-assertEquals("adef,a,,f", res[62].exec("adefG1G2G3G4L"), 161);
-assertEquals("bcdef,bc,bc,f", res[62].exec("bcdefG1G2G3G4L"), 162);
-assertEquals("adef,a,,f", res[62].exec("adefghijkC0"), 163);
-assertEquals("abc\x00def", res[63].exec("abc\x00defLC0"), 164);
-assertEquals("iss", res[69].exec("Mississippi"), 165);
-assertEquals("iss", res[70].exec("Mississippi"), 166);
-assertEquals("iss", res[71].exec("Mississippi"), 167);
-assertEquals("iss", res[72].exec("Mississippi"), 168);
-assertEquals("iss", res[73].exec("Mississippi"), 169);
-assertEquals(null, res[73].exec("*** Failers", 170));
-assertEquals("iss", res[73].exec("MississippiA"), 171);
-assertEquals("iss", res[73].exec("Mississippi"), 172);
-assertEquals(null, res[73].exec("Mississippi", 173));
-assertEquals("iss", res[74].exec("ississippi"), 174);
-assertEquals("abciss", res[75].exec("abciss\nxyzisspqr"), 175);
-assertEquals("Mis", res[76].exec("Mississippi"), 176);
-assertEquals("sis", res[76].exec("MississippiA"), 177);
-assertEquals("ri ", res[76].exec("Missouri river"), 178);
-assertEquals("riv", res[76].exec("Missouri riverA"), 179);
-assertEquals("Mis", res[77].exec("Mississippi"), 180);
-assertEquals("ab\n", res[78].exec("ab\nab\ncd"), 181);
-assertEquals("ab\n", res[79].exec("ab\nab\ncd"), 182);
-assertEquals("a", res[115].exec("a"), 183);
-assertEquals("b", res[115].exec("b"), 184);
-assertEquals("ab", res[115].exec("ab"), 185);
-assertEquals("", res[115].exec("\\"), 186);
-assertEquals("", res[115].exec("*** Failers"), 187);
-assertEquals("", res[115].exec("N"), 188);
-assertEquals("", res[116].exec("abcd"), 189);
-assertEquals("", res[116].exec("-abc"), 190);
-assertEquals("", res[116].exec("Nab-c"), 191);
-assertEquals("", res[116].exec("*** Failers"), 192);
-assertEquals("", res[116].exec("Nabc"), 193);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzz"), 194);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO0"), 195);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO1"), 196);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO2"), 197);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO3"), 198);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO4"), 199);
-assertEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO5"), 200);
-assertEquals("(abcd", res[118].exec("(abcd)"), 201);
-assertEquals("(abcd", res[118].exec("(abcd)xyz"), 202);
-assertEquals(null, res[118].exec("xyz(abcd)", 203));
-assertEquals(null, res[118].exec("(ab(xy)cd)pqr", 204));
-assertEquals(null, res[118].exec("(ab(xycd)pqr", 205));
-assertEquals(null, res[118].exec("() abc ()", 206));
-assertEquals(null, res[118].exec("12(abcde(fsh)xyz(foo(bar))lmno)89", 207));
-assertEquals(null, res[118].exec("*** Failers", 208));
-assertEquals("abcd", res[118].exec("abcd"), 209);
-assertEquals("abcd", res[118].exec("abcd)"), 210);
-assertEquals("(abcd", res[118].exec("(abcd"), 211);
-assertEquals(null, res[118].exec("(ab(xy)cd)pqr", 212));
-assertEquals(null, res[118].exec("1(abcd)(x(y)z)pqr", 213));
-assertEquals("(abcd", res[118].exec("(abcd)"), 214);
-assertEquals(null, res[118].exec("(ab(xy)cd)", 215));
-assertEquals(null, res[118].exec("(a(b(c)d)e)", 216));
-assertEquals(null, res[118].exec("((ab))", 217));
-assertEquals(null, res[118].exec("*** Failers", 218));
-assertEquals(null, res[118].exec("()", 219));
-assertEquals(null, res[118].exec("()", 220));
-assertEquals(null, res[118].exec("12(abcde(fsh)xyz(foo(bar))lmno)89", 221));
-assertEquals(null, res[118].exec("(ab(xy)cd)", 222));
-assertEquals(null, res[118].exec("(ab(xy)cd)", 223));
-assertEquals(null, res[118].exec("(ab(xy)cd)", 224));
-assertEquals(null, res[118].exec("(123ab(xy)cd)", 225));
-assertEquals(null, res[118].exec("(ab(xy)cd)", 226));
-assertEquals(null, res[118].exec("(123ab(xy)cd)", 227));
-assertEquals(null, res[118].exec("(ab(xy)cd)", 228));
-assertEquals("(abcd", res[118].exec("(abcd(xyz<p>qrs)123)"), 229);
-assertEquals(null, res[118].exec("(ab(cd)ef)", 230));
-assertEquals(null, res[118].exec("(ab(cd(ef)gh)ij)", 231));
-assertEquals(null, res[146].exec("A", 232));
-assertEquals(null, res[146].exec("a", 233));
-assertEquals(null, res[147].exec("A", 234));
-assertEquals(null, res[147].exec("a", 235));
-assertEquals(null, res[147].exec("ab", 236));
-assertEquals(null, res[147].exec("aB", 237));
-assertEquals(null, res[147].exec("*** Failers", 238));
-assertEquals(null, res[147].exec("Ab", 239));
-assertEquals(null, res[147].exec("AB", 240));
+assertToStringEquals("abcd,a,d", res[58].exec("abcd"), 151);
+assertToStringEquals("abcd,a,d", res[58].exec("abcdC2"), 152);
+assertToStringEquals("abcd,a,d", res[58].exec("abcdC5"), 153);
+assertToStringEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyz"), 154);
+assertToStringEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyzC1"), 155);
+assertToStringEquals("abcdefghijklmnopqrst,abcdefghijklmnopqrst", res[59].exec("abcdefghijklmnopqrstuvwxyzG1"), 156);
+assertToStringEquals("abcdefghijklmno,abcdefghijklmno", res[60].exec("abcdefghijklmnopqrstuvwxyz"), 157);
+assertToStringEquals("abcdefghijklmno,abcdefghijklmno", res[60].exec("abcdefghijklmnopqrstuvwxyzC1G1"), 158);
+assertToStringEquals("abcdefghijklmnop,abcdefghijklmnop", res[61].exec("abcdefghijklmnopqrstuvwxyz"), 159);
+assertToStringEquals("abcdefghijklmnop,abcdefghijklmnop", res[61].exec("abcdefghijklmnopqrstuvwxyzC1G1L"), 160);
+assertToStringEquals("adef,a,,f", res[62].exec("adefG1G2G3G4L"), 161);
+assertToStringEquals("bcdef,bc,bc,f", res[62].exec("bcdefG1G2G3G4L"), 162);
+assertToStringEquals("adef,a,,f", res[62].exec("adefghijkC0"), 163);
+assertToStringEquals("abc\x00def", res[63].exec("abc\x00defLC0"), 164);
+assertToStringEquals("iss", res[69].exec("Mississippi"), 165);
+assertToStringEquals("iss", res[70].exec("Mississippi"), 166);
+assertToStringEquals("iss", res[71].exec("Mississippi"), 167);
+assertToStringEquals("iss", res[72].exec("Mississippi"), 168);
+assertToStringEquals("iss", res[73].exec("Mississippi"), 169);
+assertNull(res[73].exec("*** Failers", 170));
+assertToStringEquals("iss", res[73].exec("MississippiA"), 171);
+assertToStringEquals("iss", res[73].exec("Mississippi"), 172);
+assertNull(res[73].exec("Mississippi", 173));
+assertToStringEquals("iss", res[74].exec("ississippi"), 174);
+assertToStringEquals("abciss", res[75].exec("abciss\nxyzisspqr"), 175);
+assertToStringEquals("Mis", res[76].exec("Mississippi"), 176);
+assertToStringEquals("sis", res[76].exec("MississippiA"), 177);
+assertToStringEquals("ri ", res[76].exec("Missouri river"), 178);
+assertToStringEquals("riv", res[76].exec("Missouri riverA"), 179);
+assertToStringEquals("Mis", res[77].exec("Mississippi"), 180);
+assertToStringEquals("ab\n", res[78].exec("ab\nab\ncd"), 181);
+assertToStringEquals("ab\n", res[79].exec("ab\nab\ncd"), 182);
+assertToStringEquals("a", res[115].exec("a"), 183);
+assertToStringEquals("b", res[115].exec("b"), 184);
+assertToStringEquals("ab", res[115].exec("ab"), 185);
+assertToStringEquals("", res[115].exec("\\"), 186);
+assertToStringEquals("", res[115].exec("*** Failers"), 187);
+assertToStringEquals("", res[115].exec("N"), 188);
+assertToStringEquals("", res[116].exec("abcd"), 189);
+assertToStringEquals("", res[116].exec("-abc"), 190);
+assertToStringEquals("", res[116].exec("Nab-c"), 191);
+assertToStringEquals("", res[116].exec("*** Failers"), 192);
+assertToStringEquals("", res[116].exec("Nabc"), 193);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzz"), 194);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO0"), 195);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO1"), 196);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO2"), 197);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO3"), 198);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO4"), 199);
+assertToStringEquals("aaaabbbbzz,bbbb,z,z", res[117].exec("aaaabbbbzzzzO5"), 200);
+assertToStringEquals("(abcd", res[118].exec("(abcd)"), 201);
+assertToStringEquals("(abcd", res[118].exec("(abcd)xyz"), 202);
+assertNull(res[118].exec("xyz(abcd)", 203));
+assertNull(res[118].exec("(ab(xy)cd)pqr", 204));
+assertNull(res[118].exec("(ab(xycd)pqr", 205));
+assertNull(res[118].exec("() abc ()", 206));
+assertNull(res[118].exec("12(abcde(fsh)xyz(foo(bar))lmno)89", 207));
+assertNull(res[118].exec("*** Failers", 208));
+assertToStringEquals("abcd", res[118].exec("abcd"), 209);
+assertToStringEquals("abcd", res[118].exec("abcd)"), 210);
+assertToStringEquals("(abcd", res[118].exec("(abcd"), 211);
+assertNull(res[118].exec("(ab(xy)cd)pqr", 212));
+assertNull(res[118].exec("1(abcd)(x(y)z)pqr", 213));
+assertToStringEquals("(abcd", res[118].exec("(abcd)"), 214);
+assertNull(res[118].exec("(ab(xy)cd)", 215));
+assertNull(res[118].exec("(a(b(c)d)e)", 216));
+assertNull(res[118].exec("((ab))", 217));
+assertNull(res[118].exec("*** Failers", 218));
+assertNull(res[118].exec("()", 219));
+assertNull(res[118].exec("()", 220));
+assertNull(res[118].exec("12(abcde(fsh)xyz(foo(bar))lmno)89", 221));
+assertNull(res[118].exec("(ab(xy)cd)", 222));
+assertNull(res[118].exec("(ab(xy)cd)", 223));
+assertNull(res[118].exec("(ab(xy)cd)", 224));
+assertNull(res[118].exec("(123ab(xy)cd)", 225));
+assertNull(res[118].exec("(ab(xy)cd)", 226));
+assertNull(res[118].exec("(123ab(xy)cd)", 227));
+assertNull(res[118].exec("(ab(xy)cd)", 228));
+assertToStringEquals("(abcd", res[118].exec("(abcd(xyz<p>qrs)123)"), 229);
+assertNull(res[118].exec("(ab(cd)ef)", 230));
+assertNull(res[118].exec("(ab(cd(ef)gh)ij)", 231));
+assertNull(res[146].exec("A", 232));
+assertNull(res[146].exec("a", 233));
+assertNull(res[147].exec("A", 234));
+assertNull(res[147].exec("a", 235));
+assertNull(res[147].exec("ab", 236));
+assertNull(res[147].exec("aB", 237));
+assertNull(res[147].exec("*** Failers", 238));
+assertNull(res[147].exec("Ab", 239));
+assertNull(res[147].exec("AB", 240));
 assertThrows("var re = /[\\200-\\110]/;", 241);
-assertEquals("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 ,19 ,20 ,21 ,22 ,23 ,24 ,25 ,26 ,27 ,28 ,29 ,30 ,31 ,32 ,33 ,34 ,35 ,36 ,37 ,38 ,39 ,40 ,41 ,42 ,43 ,44 ,45 ,46 ,47 ,48 ,49 ,50 ,51 ,52 ,53 ,54 ,55 ,56 ,57 ,58 ,59 ,60 ,61 ,62 ,63 ,64 ,65 ,66 ,67 ,68 ,69 ,70 ,71 ,72 ,73 ,74 ,75 ,76 ,77 ,78 ,79 ,80 ,81 ,82 ,83 ,84 ,85 ,86 ,87 ,88 ,89 ,90 ,91 ,92 ,93 ,94 ,95 ,96 ,97 ,98 ,99 ,100 ,101 ,102 ,103 ,104 ,105 ,106 ,107 ,108 ,109 ,110 ,111 ,112 ,113 ,114 ,115 ,116 ,117 ,118 ,119 ,120 ,121 ,122 ,123 ,124 ,125 ,126 ,127 ,128 ,129 ,130 ,131 ,132 ,133 ,134 ,135 ,136 ,137 ,138 ,139 ,140 ,141 ,142 ,143 ,144 ,145 ,146 ,147 ,148 ,149 ,150 ,151 ,152 ,153 ,154 ,155 ,156 ,157 ,158 ,159 ,160 ,161 ,162 ,163 ,164 ,165 ,166 ,167 ,168 ,169 ,170 ,171 ,172 ,173 ,174 ,175 ,176 ,177 ,178 ,179 ,180 ,181 ,182 ,183 ,184 ,185 ,186 ,187 ,188 ,189 ,190 ,191 ,192 ,193 ,194 ,195 ,196 ,197 ,198 ,199 ,200 ,201 ,202 ,203 ,204 ,205 ,206 ,207 ,208 ,209 ,210 ,211 ,212 ,213 ,214 ,215 ,216 ,217 ,218 ,219 ,220 ,221 ,222 ,223 ,224 ,225 ,226 ,227 ,228 ,229 ,230 ,231 ,232 ,233 ,234 ,235 ,236 ,237 ,238 ,239 ,240 ,241 ,242 ,243 ,244 ,245 ,246 ,247 ,248 ,249 ,250 ,251 ,252 ,253 ,254 ,255 ,256 ,257 ,258 ,259 ,260 ,261 ,262 ,263 ,264 ,265 ,266 ,267 ,268 ,269 ,ABC,ABC", res[149].exec("O900 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC"), 242);
-assertEquals("mainmain,main,", res[151].exec("mainmain"), 243);
-assertEquals("mainOmain,main,", res[151].exec("mainOmain"), 244);
-assertEquals("aba,a,", res[153].exec("aba"), 245);
-assertEquals("aabbaa,aa,", res[154].exec("aabbaa"), 246);
-assertEquals("aabbaa,aa,", res[155].exec("aabbaa"), 247);
-assertEquals("aabbaa,aa,", res[156].exec("aabbaa"), 248);
-assertEquals("aabbaa,", res[157].exec("aabbaa"), 249);
-assertEquals("aabbaa,aa,,", res[158].exec("aabbaa"), 250);
-assertEquals("aabbaa,,", res[159].exec("aabbaa"), 251);
-assertEquals("aabbaa,", res[160].exec("aabbaa"), 252);
-assertEquals("aabbbaa,", res[161].exec("aabbbaa"), 253);
-assertEquals("aabbbaa,", res[162].exec("aabbbaa"), 254);
-assertEquals("aabbaa,", res[163].exec("aabbaa"), 255);
-assertEquals("aabbbaa,", res[164].exec("aabbbaa"), 256);
-assertEquals("aabbbaa,aa,,", res[165].exec("aabbbaa"), 257);
-assertEquals("aabbbbaa,aa,,", res[166].exec("aabbbbaa"), 258);
+assertToStringEquals("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 ,19 ,20 ,21 ,22 ,23 ,24 ,25 ,26 ,27 ,28 ,29 ,30 ,31 ,32 ,33 ,34 ,35 ,36 ,37 ,38 ,39 ,40 ,41 ,42 ,43 ,44 ,45 ,46 ,47 ,48 ,49 ,50 ,51 ,52 ,53 ,54 ,55 ,56 ,57 ,58 ,59 ,60 ,61 ,62 ,63 ,64 ,65 ,66 ,67 ,68 ,69 ,70 ,71 ,72 ,73 ,74 ,75 ,76 ,77 ,78 ,79 ,80 ,81 ,82 ,83 ,84 ,85 ,86 ,87 ,88 ,89 ,90 ,91 ,92 ,93 ,94 ,95 ,96 ,97 ,98 ,99 ,100 ,101 ,102 ,103 ,104 ,105 ,106 ,107 ,108 ,109 ,110 ,111 ,112 ,113 ,114 ,115 ,116 ,117 ,118 ,119 ,120 ,121 ,122 ,123 ,124 ,125 ,126 ,127 ,128 ,129 ,130 ,131 ,132 ,133 ,134 ,135 ,136 ,137 ,138 ,139 ,140 ,141 ,142 ,143 ,144 ,145 ,146 ,147 ,148 ,149 ,150 ,151 ,152 ,153 ,154 ,155 ,156 ,157 ,158 ,159 ,160 ,161 ,162 ,163 ,164 ,165 ,166 ,167 ,168 ,169 ,170 ,171 ,172 ,173 ,174 ,175 ,176 ,177 ,178 ,179 ,180 ,181 ,182 ,183 ,184 ,185 ,186 ,187 ,188 ,189 ,190 ,191 ,192 ,193 ,194 ,195 ,196 ,197 ,198 ,199 ,200 ,201 ,202 ,203 ,204 ,205 ,206 ,207 ,208 ,209 ,210 ,211 ,212 ,213 ,214 ,215 ,216 ,217 ,218 ,219 ,220 ,221 ,222 ,223 ,224 ,225 ,226 ,227 ,228 ,229 ,230 ,231 ,232 ,233 ,234 ,235 ,236 ,237 ,238 ,239 ,240 ,241 ,242 ,243 ,244 ,245 ,246 ,247 ,248 ,249 ,250 ,251 ,252 ,253 ,254 ,255 ,256 ,257 ,258 ,259 ,260 ,261 ,262 ,263 ,264 ,265 ,266 ,267 ,268 ,269 ,ABC,ABC", res[149].exec("O900 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC"), 242);
+assertToStringEquals("mainmain,main,", res[151].exec("mainmain"), 243);
+assertToStringEquals("mainOmain,main,", res[151].exec("mainOmain"), 244);
+assertToStringEquals("aba,a,", res[153].exec("aba"), 245);
+assertToStringEquals("aabbaa,aa,", res[154].exec("aabbaa"), 246);
+assertToStringEquals("aabbaa,aa,", res[155].exec("aabbaa"), 247);
+assertToStringEquals("aabbaa,aa,", res[156].exec("aabbaa"), 248);
+assertToStringEquals("aabbaa,", res[157].exec("aabbaa"), 249);
+assertToStringEquals("aabbaa,aa,,", res[158].exec("aabbaa"), 250);
+assertToStringEquals("aabbaa,,", res[159].exec("aabbaa"), 251);
+assertToStringEquals("aabbaa,", res[160].exec("aabbaa"), 252);
+assertToStringEquals("aabbbaa,", res[161].exec("aabbbaa"), 253);
+assertToStringEquals("aabbbaa,", res[162].exec("aabbbaa"), 254);
+assertToStringEquals("aabbaa,", res[163].exec("aabbaa"), 255);
+assertToStringEquals("aabbbaa,", res[164].exec("aabbbaa"), 256);
+assertToStringEquals("aabbbaa,aa,,", res[165].exec("aabbbaa"), 257);
+assertToStringEquals("aabbbbaa,aa,,", res[166].exec("aabbbbaa"), 258);
 assertThrows("var re = //;", 259);
-assertEquals("a", res[169].exec("ab"), 260);
-assertEquals("a", res[169].exec("aB"), 261);
-assertEquals("*", res[169].exec("*** Failers"), 262);
-assertEquals("A", res[169].exec("AB"), 263);
-assertEquals("a", res[169].exec("ab"), 264);
-assertEquals("a", res[169].exec("aB"), 265);
-assertEquals("*", res[169].exec("*** Failers"), 266);
-assertEquals("A", res[169].exec("AB"), 267);
-assertEquals(null, res[172].exec("\\", 268));
-assertEquals(null, res[177].exec("*** Failers", 269));
-assertEquals(null, res[177].exec("xxxxx", 270));
-assertEquals(null, res[177].exec("now is the time for all good men to come to the aid of the party", 271));
-assertEquals(null, res[177].exec("*** Failers", 272));
-assertEquals(null, res[177].exec("this is not a line with only words and spaces!", 273));
-assertEquals(null, res[177].exec("12345a", 274));
-assertEquals(null, res[177].exec("*** Failers", 275));
-assertEquals(null, res[177].exec("12345+", 276));
-assertEquals(null, res[177].exec("aaab", 277));
-assertEquals(null, res[177].exec("aaab", 278));
-assertEquals(null, res[177].exec("aaab", 279));
-assertEquals(null, res[177].exec("((abc(ade)ufh()()x", 280));
-assertEquals(null, res[177].exec("(abc)", 281));
-assertEquals(null, res[177].exec("(abc(def)xyz)", 282));
-assertEquals(null, res[177].exec("*** Failers", 283));
-assertEquals(null, res[177].exec("((()aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 284));
-assertEquals(null, res[177].exec("xaaaab", 285));
-assertEquals(null, res[177].exec("xaaaab", 286));
+assertToStringEquals("a", res[169].exec("ab"), 260);
+assertToStringEquals("a", res[169].exec("aB"), 261);
+assertToStringEquals("*", res[169].exec("*** Failers"), 262);
+assertToStringEquals("A", res[169].exec("AB"), 263);
+assertToStringEquals("a", res[169].exec("ab"), 264);
+assertToStringEquals("a", res[169].exec("aB"), 265);
+assertToStringEquals("*", res[169].exec("*** Failers"), 266);
+assertToStringEquals("A", res[169].exec("AB"), 267);
+assertNull(res[172].exec("\\", 268));
+assertNull(res[177].exec("*** Failers", 269));
+assertNull(res[177].exec("xxxxx", 270));
+assertNull(res[177].exec("now is the time for all good men to come to the aid of the party", 271));
+assertNull(res[177].exec("*** Failers", 272));
+assertNull(res[177].exec("this is not a line with only words and spaces!", 273));
+assertNull(res[177].exec("12345a", 274));
+assertNull(res[177].exec("*** Failers", 275));
+assertNull(res[177].exec("12345+", 276));
+assertNull(res[177].exec("aaab", 277));
+assertNull(res[177].exec("aaab", 278));
+assertNull(res[177].exec("aaab", 279));
+assertNull(res[177].exec("((abc(ade)ufh()()x", 280));
+assertNull(res[177].exec("(abc)", 281));
+assertNull(res[177].exec("(abc(def)xyz)", 282));
+assertNull(res[177].exec("*** Failers", 283));
+assertNull(res[177].exec("((()aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 284));
+assertNull(res[177].exec("xaaaab", 285));
+assertNull(res[177].exec("xaaaab", 286));
 assertThrows("var re = /[/;", 287);
 assertThrows("var re = /[a-/;", 288);
-assertEquals(null, res[189].exec("<>", 289));
-assertEquals(null, res[189].exec("<abcd>", 290));
-assertEquals(null, res[189].exec("<abc <123> hij>", 291));
-assertEquals(null, res[189].exec("<abc <def> hij>", 292));
-assertEquals(null, res[189].exec("<abc<>def>", 293));
-assertEquals(null, res[189].exec("<abc<>", 294));
-assertEquals(null, res[189].exec("*** Failers", 295));
-assertEquals(null, res[189].exec("<abc", 296));
-assertEquals("bc123bc,bc,bc", res[195].exec("abc123bc"), 297);
-assertEquals("abc", res[215].exec("abcdef"), 298);
-assertEquals("abc", res[215].exec("1234abcdef"), 299);
-assertEquals(null, res[215].exec("*** Failers", 300));
-assertEquals("abc", res[215].exec("abcxyz"), 301);
-assertEquals("abc", res[215].exec("abcxyzf"), 302);
-assertEquals("abc", res[215].exec("123abcdef"), 303);
-assertEquals("abc", res[215].exec("1234abcdef"), 304);
-assertEquals(null, res[215].exec("*** Failers", 305));
-assertEquals("abc", res[215].exec("abcdef"), 306);
-assertEquals(null, res[215].exec("*** Failers", 307));
-assertEquals("abc", res[215].exec("\x83x0abcdef"), 308);
-assertEquals("abc", res[215].exec("123abcdef"), 309);
-assertEquals("abc", res[215].exec("123abcdefC+"), 310);
-assertEquals("abc", res[215].exec("123abcdefC-"), 311);
-assertEquals(null, res[215].exec("*** Failers", 312));
-assertEquals("abc", res[215].exec("123abcdefC!1"), 313);
-assertEquals("abc", res[215].exec("abcabcabc"), 314);
-assertEquals("abc", res[215].exec("abcabcC!1!3"), 315);
-assertEquals(null, res[215].exec("*** Failers", 316));
-assertEquals("abc", res[215].exec("abcabcabcC!1!3"), 317);
-assertEquals("C", res[215].exec("123C+"), 318);
-assertEquals("C", res[215].exec("123456C+"), 319);
-assertEquals("C", res[215].exec("123456789C+"), 320);
-assertEquals("abc", res[215].exec("xyzabcC+"), 321);
-assertEquals("abc", res[215].exec("XxyzabcC+"), 322);
-assertEquals("abc", res[215].exec("abcdefC+"), 323);
-assertEquals("abc", res[215].exec("abcxyzC+"), 324);
-assertEquals("c", res[215].exec("abbbbbcccC*1"), 325);
-assertEquals("c", res[215].exec("abbbbbcccC*1"), 326);
-assertEquals(null, res[215].exec("xab", 327));
-assertEquals("c", res[215].exec("xbc"), 328);
-assertEquals(null, res[215].exec("xde", 329));
-assertEquals(null, res[215].exec("xxab", 330));
-assertEquals(null, res[215].exec("xxxab", 331));
-assertEquals(null, res[215].exec("*** Failers", 332));
-assertEquals(null, res[215].exec("xyab", 333));
-assertEquals("abc", res[215].exec("abc"), 334);
-assertEquals("c", res[215].exec("a(b)c"), 335);
-assertEquals("c", res[215].exec("a(b(c))d"), 336);
-assertEquals(null, res[215].exec("*** Failers)", 337));
-assertEquals("c", res[215].exec("a(b(c)d"), 338);
-assertEquals(null, res[215].exec("1221", 339));
-assertEquals("c", res[215].exec("Satan, oscillate my metallic sonatas!"), 340);
-assertEquals("c", res[215].exec("A man, a plan, a canal: Panama!"), 341);
-assertEquals(null, res[215].exec("Able was I ere I saw Elba.", 342));
-assertEquals(null, res[215].exec("*** Failers", 343));
-assertEquals("c", res[215].exec("The quick brown fox"), 344);
-assertEquals(null, res[215].exec("12", 345));
-assertEquals(null, res[215].exec("(((2+2)*-3)-7)", 346));
-assertEquals(null, res[215].exec("-12", 347));
-assertEquals(null, res[215].exec("*** Failers", 348));
-assertEquals(null, res[215].exec("((2+2)*-3)-7)", 349));
-assertEquals(null, res[215].exec("xyz", 350));
-assertEquals(null, res[215].exec("xxyzxyzz", 351));
-assertEquals(null, res[215].exec("*** Failers", 352));
-assertEquals(null, res[215].exec("xxyzz", 353));
-assertEquals(null, res[215].exec("xxyzxyzxyzz", 354));
-assertEquals(null, res[215].exec("<>", 355));
-assertEquals("abc", res[215].exec("<abcd>"), 356);
-assertEquals("abc", res[215].exec("<abc <123> hij>"), 357);
-assertEquals("abc", res[215].exec("<abc <def> hij>"), 358);
-assertEquals("abc", res[215].exec("<abc<>def>"), 359);
-assertEquals("abc", res[215].exec("<abc<>"), 360);
-assertEquals(null, res[215].exec("*** Failers", 361));
-assertEquals("abc", res[215].exec("<abc"), 362);
-assertEquals("abc", res[215].exec("abcdefabc"), 363);
-assertEquals(null, res[215].exec("a=a", 364));
-assertEquals(null, res[215].exec("a=b", 365));
-assertEquals("c", res[215].exec("a=bc"), 366);
-assertEquals(null, res[215].exec("a=a", 367));
-assertEquals(null, res[215].exec("a=b", 368));
-assertEquals("c", res[215].exec("a=bc"), 369);
-assertEquals(null, res[215].exec("abde", 370));
-assertEquals("c", res[215].exec("acde"), 371);
-assertEquals(null, res[215].exec("1221", 372));
-assertEquals("c", res[215].exec("Satan, oscillate my metallic sonatas!"), 373);
-assertEquals("c", res[215].exec("A man, a plan, a canal: Panama!"), 374);
-assertEquals(null, res[215].exec("Able was I ere I saw Elba.", 375));
-assertEquals(null, res[215].exec("*** Failers", 376));
-assertEquals("c", res[215].exec("The quick brown fox"), 377);
-assertEquals(null, res[228].exec("abcdefgh", 378));
-assertEquals(null, res[228].exec("abcdefghC1Gtwo", 379));
-assertEquals(null, res[228].exec("abcdefghConeCtwo", 380));
-assertEquals(null, res[228].exec("abcdefghCthree", 381));
-assertEquals("zz,", res[228].exec("zzaaCZ"), 382);
-assertEquals("zz,", res[228].exec("zzaaCA"), 383);
-assertEquals(null, res[228].exec("[10,20,30,5,5,4,4,2,43,23,4234]", 384));
-assertEquals(null, res[228].exec("*** Failers", 385));
-assertEquals(null, res[228].exec("[]", 386));
-assertEquals(null, res[228].exec("[10,20,30,5,5,4,4,2,43,23,4234]", 387));
-assertEquals(null, res[228].exec("[]", 388));
-assertEquals(" Baby Bjorn Active Carrier - With free SHIPPING!!, Baby Bjorn Active Carrier - With free SHIPPING!!,,", res[229].exec(" Baby Bjorn Active Carrier - With free SHIPPING!!"), 389);
-assertEquals(" Baby Bjorn Active Carrier - With free SHIPPING!!, Baby Bjorn Active Carrier - With free SHIPPING!!,,", res[230].exec(" Baby Bjorn Active Carrier - With free SHIPPING!!"), 390);
-assertEquals(null, res[238].exec("Note: that { does NOT introduce a quantifier", 391));
-assertEquals("aacaacaacaacaac123,aac", res[239].exec("aacaacaacaacaac123"), 392);
-assertEquals(null, res[243].exec("abP", 393));
-assertEquals(null, res[243].exec("abcP", 394));
-assertEquals(null, res[243].exec("abcdP", 395));
-assertEquals("abcde", res[243].exec("abcdeP"), 396);
-assertEquals(null, res[243].exec("the quick brown abcP", 397));
-assertEquals(null, res[243].exec("** FailersP", 398));
-assertEquals(null, res[243].exec("the quick brown abxyz foxP", 399));
-assertEquals(null, res[243].exec("13/05/04P", 400));
-assertEquals(null, res[243].exec("13/5/2004P", 401));
-assertEquals(null, res[243].exec("02/05/09P", 402));
-assertEquals(null, res[243].exec("1P", 403));
-assertEquals(null, res[243].exec("1/2P", 404));
-assertEquals(null, res[243].exec("1/2/0P", 405));
-assertEquals(null, res[243].exec("1/2/04P", 406));
-assertEquals(null, res[243].exec("0P", 407));
-assertEquals(null, res[243].exec("02/P", 408));
-assertEquals(null, res[243].exec("02/0P", 409));
-assertEquals(null, res[243].exec("02/1P", 410));
-assertEquals(null, res[243].exec("** FailersP", 411));
-assertEquals(null, res[243].exec("P", 412));
-assertEquals(null, res[243].exec("123P", 413));
-assertEquals(null, res[243].exec("33/4/04P", 414));
-assertEquals(null, res[243].exec("3/13/04P", 415));
-assertEquals(null, res[243].exec("0/1/2003P", 416));
-assertEquals(null, res[243].exec("0/P", 417));
-assertEquals(null, res[243].exec("02/0/P", 418));
-assertEquals(null, res[243].exec("02/13P", 419));
-assertEquals("123", res[248].exec("123P"), 420);
-assertEquals(null, res[248].exec("aP", 421));
-assertEquals(null, res[248].exec("bP", 422));
-assertEquals(null, res[248].exec("cP", 423));
-assertEquals(null, res[248].exec("c12P", 424));
-assertEquals("c123", res[248].exec("c123P"), 425);
-assertEquals(null, res[249].exec("1P", 426));
-assertEquals(null, res[249].exec("123P", 427));
-assertEquals("123X", res[249].exec("123X"), 428);
-assertEquals(null, res[249].exec("1234P", 429));
-assertEquals("1234X", res[249].exec("1234X"), 430);
-assertEquals(null, res[249].exec("12345P", 431));
-assertEquals("12345X", res[249].exec("12345X"), 432);
-assertEquals(null, res[249].exec("*** Failers", 433));
-assertEquals(null, res[249].exec("1X", 434));
-assertEquals(null, res[249].exec("123456P", 435));
-assertEquals(null, res[249].exec("abc", 436));
-assertEquals(null, res[249].exec("** Failers", 437));
-assertEquals(null, res[249].exec("bca", 438));
-assertEquals(null, res[249].exec("abc", 439));
-assertEquals(null, res[249].exec("** Failers", 440));
-assertEquals(null, res[249].exec("bca", 441));
-assertEquals(null, res[249].exec("abc", 442));
-assertEquals(null, res[249].exec("** Failers", 443));
-assertEquals(null, res[249].exec("def", 444));
-assertEquals(null, res[249].exec("abc", 445));
-assertEquals(null, res[249].exec("** Failers", 446));
-assertEquals(null, res[249].exec("def", 447));
-assertEquals(null, res[249].exec("<!DOCTYPE seite SYSTEM \"http://www.lco.lineas.de/xmlCms.dtd\">\n<seite>\n<dokumenteninformation>\n<seitentitel>Partner der LCO</seitentitel>\n<sprache>de</sprache>\n<seitenbeschreibung>Partner der LINEAS Consulting\nGmbH</seitenbeschreibung>\n<schluesselworte>LINEAS Consulting GmbH Hamburg\nPartnerfirmen</schluesselworte>\n<revisit>30 days</revisit>\n<robots>index,follow</robots>\n<menueinformation>\n<aktiv>ja</aktiv>\n<menueposition>3</menueposition>\n<menuetext>Partner</menuetext>\n</menueinformation>\n<lastedited>\n<autor>LCO</autor>\n<firma>LINEAS Consulting</firma>\n<datum>15.10.2003</datum>\n</lastedited>\n</dokumenteninformation>\n<inhalt>\n\n<absatzueberschrift>Die Partnerfirmen der LINEAS Consulting\nGmbH</absatzueberschrift>\n\n<absatz><link ziel=\"http://www.ca.com/\" zielfenster=\"_blank\">\n<bild name=\"logo_ca.gif\" rahmen=\"no\"/></link> <link\nziel=\"http://www.ey.com/\" zielfenster=\"_blank\"><bild\nname=\"logo_euy.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><link ziel=\"http://www.cisco.de/\" zielfenster=\"_blank\">\n<bild name=\"logo_cisco.gif\" rahmen=\"ja\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.atelion.de/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_atelion.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><link ziel=\"http://www.line-information.de/\"\nzielfenster=\"_blank\">\n<bild name=\"logo_line_information.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><bild name=\"logo_aw.gif\" rahmen=\"no\"/></absatz>\n\n<absatz><link ziel=\"http://www.incognis.de/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_incognis.gif\" rahmen=\"no\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.addcraft.com/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_addcraft.gif\" rahmen=\"no\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.comendo.com/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_comendo.gif\" rahmen=\"no\"/></link></absatz>\n\n</inhalt>\n</seite>", 448));
-assertEquals("line\nbreak", res[251].exec("this is a line\nbreak"), 449);
-assertEquals("line\nbreak", res[251].exec("line one\nthis is a line\nbreak in the second line"), 450);
-assertEquals("line\nbreak", res[252].exec("this is a line\nbreak"), 451);
-assertEquals(null, res[252].exec("** Failers", 452));
-assertEquals("line\nbreak", res[252].exec("line one\nthis is a line\nbreak in the second line"), 453);
-assertEquals("line\nbreak", res[253].exec("this is a line\nbreak"), 454);
-assertEquals(null, res[253].exec("** Failers", 455));
-assertEquals("line\nbreak", res[253].exec("line one\nthis is a line\nbreak in the second line"), 456);
-assertEquals("ab-cd", res[254].exec("ab-cd"), 457);
-assertEquals("ab=cd", res[254].exec("ab=cd"), 458);
-assertEquals(null, res[254].exec("** Failers", 459));
-assertEquals(null, res[254].exec("ab\ncd", 460));
-assertEquals("ab-cd", res[255].exec("ab-cd"), 461);
-assertEquals("ab=cd", res[255].exec("ab=cd"), 462);
-assertEquals(null, res[255].exec("ab\ncd", 463));
-assertEquals(null, res[255].exec("AbCd", 464));
-assertEquals(null, res[255].exec("** Failers", 465));
-assertEquals(null, res[255].exec("abcd", 466));
+assertNull(res[189].exec("<>", 289));
+assertNull(res[189].exec("<abcd>", 290));
+assertNull(res[189].exec("<abc <123> hij>", 291));
+assertNull(res[189].exec("<abc <def> hij>", 292));
+assertNull(res[189].exec("<abc<>def>", 293));
+assertNull(res[189].exec("<abc<>", 294));
+assertNull(res[189].exec("*** Failers", 295));
+assertNull(res[189].exec("<abc", 296));
+assertToStringEquals("bc123bc,bc,bc", res[195].exec("abc123bc"), 297);
+assertToStringEquals("abc", res[215].exec("abcdef"), 298);
+assertToStringEquals("abc", res[215].exec("1234abcdef"), 299);
+assertNull(res[215].exec("*** Failers", 300));
+assertToStringEquals("abc", res[215].exec("abcxyz"), 301);
+assertToStringEquals("abc", res[215].exec("abcxyzf"), 302);
+assertToStringEquals("abc", res[215].exec("123abcdef"), 303);
+assertToStringEquals("abc", res[215].exec("1234abcdef"), 304);
+assertNull(res[215].exec("*** Failers", 305));
+assertToStringEquals("abc", res[215].exec("abcdef"), 306);
+assertNull(res[215].exec("*** Failers", 307));
+assertToStringEquals("abc", res[215].exec("\x83x0abcdef"), 308);
+assertToStringEquals("abc", res[215].exec("123abcdef"), 309);
+assertToStringEquals("abc", res[215].exec("123abcdefC+"), 310);
+assertToStringEquals("abc", res[215].exec("123abcdefC-"), 311);
+assertNull(res[215].exec("*** Failers", 312));
+assertToStringEquals("abc", res[215].exec("123abcdefC!1"), 313);
+assertToStringEquals("abc", res[215].exec("abcabcabc"), 314);
+assertToStringEquals("abc", res[215].exec("abcabcC!1!3"), 315);
+assertNull(res[215].exec("*** Failers", 316));
+assertToStringEquals("abc", res[215].exec("abcabcabcC!1!3"), 317);
+assertToStringEquals("C", res[215].exec("123C+"), 318);
+assertToStringEquals("C", res[215].exec("123456C+"), 319);
+assertToStringEquals("C", res[215].exec("123456789C+"), 320);
+assertToStringEquals("abc", res[215].exec("xyzabcC+"), 321);
+assertToStringEquals("abc", res[215].exec("XxyzabcC+"), 322);
+assertToStringEquals("abc", res[215].exec("abcdefC+"), 323);
+assertToStringEquals("abc", res[215].exec("abcxyzC+"), 324);
+assertToStringEquals("c", res[215].exec("abbbbbcccC*1"), 325);
+assertToStringEquals("c", res[215].exec("abbbbbcccC*1"), 326);
+assertNull(res[215].exec("xab", 327));
+assertToStringEquals("c", res[215].exec("xbc"), 328);
+assertNull(res[215].exec("xde", 329));
+assertNull(res[215].exec("xxab", 330));
+assertNull(res[215].exec("xxxab", 331));
+assertNull(res[215].exec("*** Failers", 332));
+assertNull(res[215].exec("xyab", 333));
+assertToStringEquals("abc", res[215].exec("abc"), 334);
+assertToStringEquals("c", res[215].exec("a(b)c"), 335);
+assertToStringEquals("c", res[215].exec("a(b(c))d"), 336);
+assertNull(res[215].exec("*** Failers)", 337));
+assertToStringEquals("c", res[215].exec("a(b(c)d"), 338);
+assertNull(res[215].exec("1221", 339));
+assertToStringEquals("c", res[215].exec("Satan, oscillate my metallic sonatas!"), 340);
+assertToStringEquals("c", res[215].exec("A man, a plan, a canal: Panama!"), 341);
+assertNull(res[215].exec("Able was I ere I saw Elba.", 342));
+assertNull(res[215].exec("*** Failers", 343));
+assertToStringEquals("c", res[215].exec("The quick brown fox"), 344);
+assertNull(res[215].exec("12", 345));
+assertNull(res[215].exec("(((2+2)*-3)-7)", 346));
+assertNull(res[215].exec("-12", 347));
+assertNull(res[215].exec("*** Failers", 348));
+assertNull(res[215].exec("((2+2)*-3)-7)", 349));
+assertNull(res[215].exec("xyz", 350));
+assertNull(res[215].exec("xxyzxyzz", 351));
+assertNull(res[215].exec("*** Failers", 352));
+assertNull(res[215].exec("xxyzz", 353));
+assertNull(res[215].exec("xxyzxyzxyzz", 354));
+assertNull(res[215].exec("<>", 355));
+assertToStringEquals("abc", res[215].exec("<abcd>"), 356);
+assertToStringEquals("abc", res[215].exec("<abc <123> hij>"), 357);
+assertToStringEquals("abc", res[215].exec("<abc <def> hij>"), 358);
+assertToStringEquals("abc", res[215].exec("<abc<>def>"), 359);
+assertToStringEquals("abc", res[215].exec("<abc<>"), 360);
+assertNull(res[215].exec("*** Failers", 361));
+assertToStringEquals("abc", res[215].exec("<abc"), 362);
+assertToStringEquals("abc", res[215].exec("abcdefabc"), 363);
+assertNull(res[215].exec("a=a", 364));
+assertNull(res[215].exec("a=b", 365));
+assertToStringEquals("c", res[215].exec("a=bc"), 366);
+assertNull(res[215].exec("a=a", 367));
+assertNull(res[215].exec("a=b", 368));
+assertToStringEquals("c", res[215].exec("a=bc"), 369);
+assertNull(res[215].exec("abde", 370));
+assertToStringEquals("c", res[215].exec("acde"), 371);
+assertNull(res[215].exec("1221", 372));
+assertToStringEquals("c", res[215].exec("Satan, oscillate my metallic sonatas!"), 373);
+assertToStringEquals("c", res[215].exec("A man, a plan, a canal: Panama!"), 374);
+assertNull(res[215].exec("Able was I ere I saw Elba.", 375));
+assertNull(res[215].exec("*** Failers", 376));
+assertToStringEquals("c", res[215].exec("The quick brown fox"), 377);
+assertNull(res[228].exec("abcdefgh", 378));
+assertNull(res[228].exec("abcdefghC1Gtwo", 379));
+assertNull(res[228].exec("abcdefghConeCtwo", 380));
+assertNull(res[228].exec("abcdefghCthree", 381));
+assertToStringEquals("zz,", res[228].exec("zzaaCZ"), 382);
+assertToStringEquals("zz,", res[228].exec("zzaaCA"), 383);
+assertNull(res[228].exec("[10,20,30,5,5,4,4,2,43,23,4234]", 384));
+assertNull(res[228].exec("*** Failers", 385));
+assertNull(res[228].exec("[]", 386));
+assertNull(res[228].exec("[10,20,30,5,5,4,4,2,43,23,4234]", 387));
+assertNull(res[228].exec("[]", 388));
+assertToStringEquals(" Baby Bjorn Active Carrier - With free SHIPPING!!, Baby Bjorn Active Carrier - With free SHIPPING!!,,", res[229].exec(" Baby Bjorn Active Carrier - With free SHIPPING!!"), 389);
+assertToStringEquals(" Baby Bjorn Active Carrier - With free SHIPPING!!, Baby Bjorn Active Carrier - With free SHIPPING!!,,", res[230].exec(" Baby Bjorn Active Carrier - With free SHIPPING!!"), 390);
+assertNull(res[238].exec("Note: that { does NOT introduce a quantifier", 391));
+assertToStringEquals("aacaacaacaacaac123,aac", res[239].exec("aacaacaacaacaac123"), 392);
+assertNull(res[243].exec("abP", 393));
+assertNull(res[243].exec("abcP", 394));
+assertNull(res[243].exec("abcdP", 395));
+assertToStringEquals("abcde", res[243].exec("abcdeP"), 396);
+assertNull(res[243].exec("the quick brown abcP", 397));
+assertNull(res[243].exec("** FailersP", 398));
+assertNull(res[243].exec("the quick brown abxyz foxP", 399));
+assertNull(res[243].exec("13/05/04P", 400));
+assertNull(res[243].exec("13/5/2004P", 401));
+assertNull(res[243].exec("02/05/09P", 402));
+assertNull(res[243].exec("1P", 403));
+assertNull(res[243].exec("1/2P", 404));
+assertNull(res[243].exec("1/2/0P", 405));
+assertNull(res[243].exec("1/2/04P", 406));
+assertNull(res[243].exec("0P", 407));
+assertNull(res[243].exec("02/P", 408));
+assertNull(res[243].exec("02/0P", 409));
+assertNull(res[243].exec("02/1P", 410));
+assertNull(res[243].exec("** FailersP", 411));
+assertNull(res[243].exec("P", 412));
+assertNull(res[243].exec("123P", 413));
+assertNull(res[243].exec("33/4/04P", 414));
+assertNull(res[243].exec("3/13/04P", 415));
+assertNull(res[243].exec("0/1/2003P", 416));
+assertNull(res[243].exec("0/P", 417));
+assertNull(res[243].exec("02/0/P", 418));
+assertNull(res[243].exec("02/13P", 419));
+assertToStringEquals("123", res[248].exec("123P"), 420);
+assertNull(res[248].exec("aP", 421));
+assertNull(res[248].exec("bP", 422));
+assertNull(res[248].exec("cP", 423));
+assertNull(res[248].exec("c12P", 424));
+assertToStringEquals("c123", res[248].exec("c123P"), 425);
+assertNull(res[249].exec("1P", 426));
+assertNull(res[249].exec("123P", 427));
+assertToStringEquals("123X", res[249].exec("123X"), 428);
+assertNull(res[249].exec("1234P", 429));
+assertToStringEquals("1234X", res[249].exec("1234X"), 430);
+assertNull(res[249].exec("12345P", 431));
+assertToStringEquals("12345X", res[249].exec("12345X"), 432);
+assertNull(res[249].exec("*** Failers", 433));
+assertNull(res[249].exec("1X", 434));
+assertNull(res[249].exec("123456P", 435));
+assertNull(res[249].exec("abc", 436));
+assertNull(res[249].exec("** Failers", 437));
+assertNull(res[249].exec("bca", 438));
+assertNull(res[249].exec("abc", 439));
+assertNull(res[249].exec("** Failers", 440));
+assertNull(res[249].exec("bca", 441));
+assertNull(res[249].exec("abc", 442));
+assertNull(res[249].exec("** Failers", 443));
+assertNull(res[249].exec("def", 444));
+assertNull(res[249].exec("abc", 445));
+assertNull(res[249].exec("** Failers", 446));
+assertNull(res[249].exec("def", 447));
+assertNull(res[249].exec("<!DOCTYPE seite SYSTEM \"http://www.lco.lineas.de/xmlCms.dtd\">\n<seite>\n<dokumenteninformation>\n<seitentitel>Partner der LCO</seitentitel>\n<sprache>de</sprache>\n<seitenbeschreibung>Partner der LINEAS Consulting\nGmbH</seitenbeschreibung>\n<schluesselworte>LINEAS Consulting GmbH Hamburg\nPartnerfirmen</schluesselworte>\n<revisit>30 days</revisit>\n<robots>index,follow</robots>\n<menueinformation>\n<aktiv>ja</aktiv>\n<menueposition>3</menueposition>\n<menuetext>Partner</menuetext>\n</menueinformation>\n<lastedited>\n<autor>LCO</autor>\n<firma>LINEAS Consulting</firma>\n<datum>15.10.2003</datum>\n</lastedited>\n</dokumenteninformation>\n<inhalt>\n\n<absatzueberschrift>Die Partnerfirmen der LINEAS Consulting\nGmbH</absatzueberschrift>\n\n<absatz><link ziel=\"http://www.ca.com/\" zielfenster=\"_blank\">\n<bild name=\"logo_ca.gif\" rahmen=\"no\"/></link> <link\nziel=\"http://www.ey.com/\" zielfenster=\"_blank\"><bild\nname=\"logo_euy.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><link ziel=\"http://www.cisco.de/\" zielfenster=\"_blank\">\n<bild name=\"logo_cisco.gif\" rahmen=\"ja\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.atelion.de/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_atelion.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><link ziel=\"http://www.line-information.de/\"\nzielfenster=\"_blank\">\n<bild name=\"logo_line_information.gif\" rahmen=\"no\"/></link>\n</absatz>\n\n<absatz><bild name=\"logo_aw.gif\" rahmen=\"no\"/></absatz>\n\n<absatz><link ziel=\"http://www.incognis.de/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_incognis.gif\" rahmen=\"no\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.addcraft.com/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_addcraft.gif\" rahmen=\"no\"/></link></absatz>\n\n<absatz><link ziel=\"http://www.comendo.com/\"\nzielfenster=\"_blank\"><bild\nname=\"logo_comendo.gif\" rahmen=\"no\"/></link></absatz>\n\n</inhalt>\n</seite>", 448));
+assertToStringEquals("line\nbreak", res[251].exec("this is a line\nbreak"), 449);
+assertToStringEquals("line\nbreak", res[251].exec("line one\nthis is a line\nbreak in the second line"), 450);
+assertToStringEquals("line\nbreak", res[252].exec("this is a line\nbreak"), 451);
+assertNull(res[252].exec("** Failers", 452));
+assertToStringEquals("line\nbreak", res[252].exec("line one\nthis is a line\nbreak in the second line"), 453);
+assertToStringEquals("line\nbreak", res[253].exec("this is a line\nbreak"), 454);
+assertNull(res[253].exec("** Failers", 455));
+assertToStringEquals("line\nbreak", res[253].exec("line one\nthis is a line\nbreak in the second line"), 456);
+assertToStringEquals("ab-cd", res[254].exec("ab-cd"), 457);
+assertToStringEquals("ab=cd", res[254].exec("ab=cd"), 458);
+assertNull(res[254].exec("** Failers", 459));
+assertNull(res[254].exec("ab\ncd", 460));
+assertToStringEquals("ab-cd", res[255].exec("ab-cd"), 461);
+assertToStringEquals("ab=cd", res[255].exec("ab=cd"), 462);
+assertNull(res[255].exec("ab\ncd", 463));
+assertNull(res[255].exec("AbCd", 464));
+assertNull(res[255].exec("** Failers", 465));
+assertNull(res[255].exec("abcd", 466));
 // We are compatible with JSC, and don't throw an exception in this case.
 // assertThrows("var re = /(){2,4294967295}/;", 467);
-assertEquals(null, res[255].exec("abcdefghijklAkB", 468));
-assertEquals(null, res[255].exec("abcdefghijklAkB", 469));
-assertEquals(null, res[255].exec("abcdefghijklAkB", 470));
-assertEquals(null, res[255].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 471));
-assertEquals(null, res[255].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 472));
-assertEquals(null, res[255].exec("(this(and)that", 473));
-assertEquals(null, res[255].exec("(this(and)that)", 474));
-assertEquals(null, res[255].exec("(this(and)that)stuff", 475));
-assertEquals(null, res[255].exec("(this(and)that", 476));
-assertEquals(null, res[255].exec("(this(and)that)", 477));
-assertEquals(null, res[255].exec("(this(and)that", 478));
-assertEquals(null, res[255].exec("(this(and)that)", 479));
-assertEquals(null, res[255].exec("(this(and)that", 480));
-assertEquals(null, res[255].exec("(this(and)that)", 481));
-assertEquals(null, res[255].exec("((this))", 482));
-assertEquals(null, res[255].exec("(this(and)that", 483));
-assertEquals(null, res[255].exec("(this(and)that)", 484));
-assertEquals(null, res[255].exec("(this)", 485));
-assertEquals(null, res[255].exec("((this))", 486));
-assertEquals("abc,b", res[256].exec("abc"), 487);
-assertEquals("abc,b", res[256].exec("abc"), 488);
-assertEquals(null, res[256].exec("a1bCA", 489));
-assertEquals(null, res[256].exec("a2bCA", 490));
-assertEquals(null, res[257].exec("a bc dCACBCC", 491));
-assertEquals(null, res[257].exec("aabc", 492));
-assertEquals(null, res[257].exec("bc", 493));
-assertEquals(null, res[257].exec("** Failers", 494));
-assertEquals(null, res[257].exec("abc", 495));
-assertEquals(null, res[257].exec("bXaX", 496));
-assertEquals(null, res[257].exec("bbXaaX", 497));
-assertEquals(null, res[257].exec("(b)\\Xa\\X", 498));
-assertEquals(null, res[257].exec("bXXaYYaY", 499));
-assertEquals(null, res[257].exec("bXYaXXaX", 500));
-assertEquals(null, res[257].exec("bXXaYYaY", 501));
-assertEquals("\x0b,\x0b", res[259].exec("\x0b,\x0b"), 502);
-assertEquals("\x0c,\x0d", res[259].exec("\x0c,\x0d"), 503);
-assertEquals("abc", res[260].exec("xyz\nabc"), 504);
-assertEquals("abc", res[260].exec("xyz\nabc<lf>"), 505);
-assertEquals("abc", res[260].exec("xyz\x0d\nabc<lf>"), 506);
-assertEquals("abc", res[260].exec("xyz\x0dabc<cr>"), 507);
-assertEquals("abc", res[260].exec("xyz\x0d\nabc<crlf>"), 508);
-assertEquals(null, res[260].exec("** Failers", 509));
-assertEquals("abc", res[260].exec("xyz\nabc<cr>"), 510);
-assertEquals("abc", res[260].exec("xyz\x0d\nabc<cr>"), 511);
-assertEquals("abc", res[260].exec("xyz\nabc<crlf>"), 512);
-assertEquals("abc", res[260].exec("xyz\x0dabc<crlf>"), 513);
-assertEquals("abc", res[260].exec("xyz\x0dabc<lf>"), 514);
-assertEquals("abc", res[261].exec("xyzabc"), 515);
-assertEquals("abc", res[261].exec("xyzabc\n"), 516);
-assertEquals("abc", res[261].exec("xyzabc\npqr"), 517);
-assertEquals("abc", res[261].exec("xyzabc\x0d<cr>"), 518);
-assertEquals("abc", res[261].exec("xyzabc\x0dpqr<cr>"), 519);
-assertEquals("abc", res[261].exec("xyzabc\x0d\n<crlf>"), 520);
-assertEquals("abc", res[261].exec("xyzabc\x0d\npqr<crlf>"), 521);
-assertEquals(null, res[261].exec("** Failers", 522));
-assertEquals("abc", res[261].exec("xyzabc\x0d"), 523);
-assertEquals("abc", res[261].exec("xyzabc\x0dpqr"), 524);
-assertEquals("abc", res[261].exec("xyzabc\x0d\n"), 525);
-assertEquals("abc", res[261].exec("xyzabc\x0d\npqr"), 526);
-assertEquals("abc", res[262].exec("xyz\x0dabcdef"), 527);
-assertEquals("abc", res[262].exec("xyz\nabcdef<lf>"), 528);
-assertEquals(null, res[262].exec("** Failers", 529));
-assertEquals("abc", res[262].exec("xyz\nabcdef"), 530);
-assertEquals("abc", res[263].exec("xyz\nabcdef"), 531);
-assertEquals("abc", res[263].exec("xyz\x0dabcdef<cr>"), 532);
-assertEquals(null, res[263].exec("** Failers", 533));
-assertEquals("abc", res[263].exec("xyz\x0dabcdef"), 534);
-assertEquals("abc", res[264].exec("xyz\x0d\nabcdef"), 535);
-assertEquals("abc", res[264].exec("xyz\x0dabcdef<cr>"), 536);
-assertEquals(null, res[264].exec("** Failers", 537));
-assertEquals("abc", res[264].exec("xyz\x0dabcdef"), 538);
-assertEquals("abc", res[266].exec("xyz\x0dabc<bad>"), 539);
-assertEquals("abc", res[266].exec("abc"), 540);
-assertEquals("abc", res[267].exec("abc\ndef"), 541);
-assertEquals("abc", res[267].exec("abc\x0ddef"), 542);
-assertEquals("abc", res[267].exec("abc\x0d\ndef"), 543);
-assertEquals("<cr>abc", res[267].exec("<cr>abc\ndef"), 544);
-assertEquals("<cr>abc", res[267].exec("<cr>abc\x0ddef"), 545);
-assertEquals("<cr>abc", res[267].exec("<cr>abc\x0d\ndef"), 546);
-assertEquals("<crlf>abc", res[267].exec("<crlf>abc\ndef"), 547);
-assertEquals("<crlf>abc", res[267].exec("<crlf>abc\x0ddef"), 548);
-assertEquals("<crlf>abc", res[267].exec("<crlf>abc\x0d\ndef"), 549);
-assertEquals(null, res[268].exec("abc\ndef", 550));
-assertEquals(null, res[268].exec("abc\x0ddef", 551));
-assertEquals(null, res[268].exec("abc\x0d\ndef", 552));
-assertEquals("XY,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,XY,Y", res[269].exec("XYO400"), 553);
-assertEquals("aaaA5", res[278].exec("aaaA5"), 554);
-assertEquals(null, res[278].exec("** Failers", 555));
-assertEquals(null, res[278].exec("aaaa5", 556));
-assertEquals("aaaA5", res[279].exec("aaaA5"), 557);
-assertEquals("aaaa5", res[279].exec("aaaa5"), 558);
-assertEquals("x", res[350].exec("xyCabcCxyz"), 559);
-assertEquals("x", res[350].exec("xyCabcCxyz"), 560);
-assertEquals("b", res[350].exec("bXaX"), 561);
-assertEquals("b", res[350].exec("bXbX"), 562);
-assertEquals("*", res[350].exec("** Failers"), 563);
-assertEquals("aX", res[350].exec("aXaX"), 564);
-assertEquals("aX", res[350].exec("aXbX"), 565);
-assertEquals("x", res[350].exec("xx"), 566);
-assertEquals("x", res[350].exec("xy"), 567);
-assertEquals("y", res[350].exec("yy"), 568);
-assertEquals("y", res[350].exec("yx"), 569);
-assertEquals("x", res[350].exec("xx"), 570);
-assertEquals("x", res[350].exec("xy"), 571);
-assertEquals("y", res[350].exec("yy"), 572);
-assertEquals("y", res[350].exec("yx"), 573);
-assertEquals("b", res[350].exec("bxay"), 574);
-assertEquals("b", res[350].exec("bxby"), 575);
-assertEquals("*", res[350].exec("** Failers"), 576);
-assertEquals("ax", res[350].exec("axby"), 577);
-assertEquals("X", res[350].exec("XxXxxx"), 578);
-assertEquals("X", res[350].exec("XxXyyx"), 579);
-assertEquals("X", res[350].exec("XxXyxx"), 580);
-assertEquals("*", res[350].exec("** Failers"), 581);
-assertEquals("x", res[350].exec("x"), 582);
-assertEquals("ab", res[350].exec("abcabc"), 583);
-assertEquals("Xaaa,a", res[351].exec("Xaaa"), 584);
-assertEquals("Xaba,a", res[351].exec("Xaba"), 585);
+assertNull(res[255].exec("abcdefghijklAkB", 468));
+assertNull(res[255].exec("abcdefghijklAkB", 469));
+assertNull(res[255].exec("abcdefghijklAkB", 470));
+assertNull(res[255].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 471));
+assertNull(res[255].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 472));
+assertNull(res[255].exec("(this(and)that", 473));
+assertNull(res[255].exec("(this(and)that)", 474));
+assertNull(res[255].exec("(this(and)that)stuff", 475));
+assertNull(res[255].exec("(this(and)that", 476));
+assertNull(res[255].exec("(this(and)that)", 477));
+assertNull(res[255].exec("(this(and)that", 478));
+assertNull(res[255].exec("(this(and)that)", 479));
+assertNull(res[255].exec("(this(and)that", 480));
+assertNull(res[255].exec("(this(and)that)", 481));
+assertNull(res[255].exec("((this))", 482));
+assertNull(res[255].exec("(this(and)that", 483));
+assertNull(res[255].exec("(this(and)that)", 484));
+assertNull(res[255].exec("(this)", 485));
+assertNull(res[255].exec("((this))", 486));
+assertToStringEquals("abc,b", res[256].exec("abc"), 487);
+assertToStringEquals("abc,b", res[256].exec("abc"), 488);
+assertNull(res[256].exec("a1bCA", 489));
+assertNull(res[256].exec("a2bCA", 490));
+assertNull(res[257].exec("a bc dCACBCC", 491));
+assertNull(res[257].exec("aabc", 492));
+assertNull(res[257].exec("bc", 493));
+assertNull(res[257].exec("** Failers", 494));
+assertNull(res[257].exec("abc", 495));
+assertNull(res[257].exec("bXaX", 496));
+assertNull(res[257].exec("bbXaaX", 497));
+assertNull(res[257].exec("(b)\\Xa\\X", 498));
+assertNull(res[257].exec("bXXaYYaY", 499));
+assertNull(res[257].exec("bXYaXXaX", 500));
+assertNull(res[257].exec("bXXaYYaY", 501));
+assertToStringEquals("\x0b,\x0b", res[259].exec("\x0b,\x0b"), 502);
+assertToStringEquals("\x0c,\x0d", res[259].exec("\x0c,\x0d"), 503);
+assertToStringEquals("abc", res[260].exec("xyz\nabc"), 504);
+assertToStringEquals("abc", res[260].exec("xyz\nabc<lf>"), 505);
+assertToStringEquals("abc", res[260].exec("xyz\x0d\nabc<lf>"), 506);
+assertToStringEquals("abc", res[260].exec("xyz\x0dabc<cr>"), 507);
+assertToStringEquals("abc", res[260].exec("xyz\x0d\nabc<crlf>"), 508);
+assertNull(res[260].exec("** Failers", 509));
+assertToStringEquals("abc", res[260].exec("xyz\nabc<cr>"), 510);
+assertToStringEquals("abc", res[260].exec("xyz\x0d\nabc<cr>"), 511);
+assertToStringEquals("abc", res[260].exec("xyz\nabc<crlf>"), 512);
+assertToStringEquals("abc", res[260].exec("xyz\x0dabc<crlf>"), 513);
+assertToStringEquals("abc", res[260].exec("xyz\x0dabc<lf>"), 514);
+assertToStringEquals("abc", res[261].exec("xyzabc"), 515);
+assertToStringEquals("abc", res[261].exec("xyzabc\n"), 516);
+assertToStringEquals("abc", res[261].exec("xyzabc\npqr"), 517);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d<cr>"), 518);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0dpqr<cr>"), 519);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d\n<crlf>"), 520);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d\npqr<crlf>"), 521);
+assertNull(res[261].exec("** Failers", 522));
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d"), 523);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0dpqr"), 524);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d\n"), 525);
+assertToStringEquals("abc", res[261].exec("xyzabc\x0d\npqr"), 526);
+assertToStringEquals("abc", res[262].exec("xyz\x0dabcdef"), 527);
+assertToStringEquals("abc", res[262].exec("xyz\nabcdef<lf>"), 528);
+assertNull(res[262].exec("** Failers", 529));
+assertToStringEquals("abc", res[262].exec("xyz\nabcdef"), 530);
+assertToStringEquals("abc", res[263].exec("xyz\nabcdef"), 531);
+assertToStringEquals("abc", res[263].exec("xyz\x0dabcdef<cr>"), 532);
+assertNull(res[263].exec("** Failers", 533));
+assertToStringEquals("abc", res[263].exec("xyz\x0dabcdef"), 534);
+assertToStringEquals("abc", res[264].exec("xyz\x0d\nabcdef"), 535);
+assertToStringEquals("abc", res[264].exec("xyz\x0dabcdef<cr>"), 536);
+assertNull(res[264].exec("** Failers", 537));
+assertToStringEquals("abc", res[264].exec("xyz\x0dabcdef"), 538);
+assertToStringEquals("abc", res[266].exec("xyz\x0dabc<bad>"), 539);
+assertToStringEquals("abc", res[266].exec("abc"), 540);
+assertToStringEquals("abc", res[267].exec("abc\ndef"), 541);
+assertToStringEquals("abc", res[267].exec("abc\x0ddef"), 542);
+assertToStringEquals("abc", res[267].exec("abc\x0d\ndef"), 543);
+assertToStringEquals("<cr>abc", res[267].exec("<cr>abc\ndef"), 544);
+assertToStringEquals("<cr>abc", res[267].exec("<cr>abc\x0ddef"), 545);
+assertToStringEquals("<cr>abc", res[267].exec("<cr>abc\x0d\ndef"), 546);
+assertToStringEquals("<crlf>abc", res[267].exec("<crlf>abc\ndef"), 547);
+assertToStringEquals("<crlf>abc", res[267].exec("<crlf>abc\x0ddef"), 548);
+assertToStringEquals("<crlf>abc", res[267].exec("<crlf>abc\x0d\ndef"), 549);
+assertNull(res[268].exec("abc\ndef", 550));
+assertNull(res[268].exec("abc\x0ddef", 551));
+assertNull(res[268].exec("abc\x0d\ndef", 552));
+assertToStringEquals("XY,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,XY,Y", res[269].exec("XYO400"), 553);
+assertToStringEquals("aaaA5", res[278].exec("aaaA5"), 554);
+assertNull(res[278].exec("** Failers", 555));
+assertNull(res[278].exec("aaaa5", 556));
+assertToStringEquals("aaaA5", res[279].exec("aaaA5"), 557);
+assertToStringEquals("aaaa5", res[279].exec("aaaa5"), 558);
+assertToStringEquals("x", res[350].exec("xyCabcCxyz"), 559);
+assertToStringEquals("x", res[350].exec("xyCabcCxyz"), 560);
+assertToStringEquals("b", res[350].exec("bXaX"), 561);
+assertToStringEquals("b", res[350].exec("bXbX"), 562);
+assertToStringEquals("*", res[350].exec("** Failers"), 563);
+assertToStringEquals("aX", res[350].exec("aXaX"), 564);
+assertToStringEquals("aX", res[350].exec("aXbX"), 565);
+assertToStringEquals("x", res[350].exec("xx"), 566);
+assertToStringEquals("x", res[350].exec("xy"), 567);
+assertToStringEquals("y", res[350].exec("yy"), 568);
+assertToStringEquals("y", res[350].exec("yx"), 569);
+assertToStringEquals("x", res[350].exec("xx"), 570);
+assertToStringEquals("x", res[350].exec("xy"), 571);
+assertToStringEquals("y", res[350].exec("yy"), 572);
+assertToStringEquals("y", res[350].exec("yx"), 573);
+assertToStringEquals("b", res[350].exec("bxay"), 574);
+assertToStringEquals("b", res[350].exec("bxby"), 575);
+assertToStringEquals("*", res[350].exec("** Failers"), 576);
+assertToStringEquals("ax", res[350].exec("axby"), 577);
+assertToStringEquals("X", res[350].exec("XxXxxx"), 578);
+assertToStringEquals("X", res[350].exec("XxXyyx"), 579);
+assertToStringEquals("X", res[350].exec("XxXyxx"), 580);
+assertToStringEquals("*", res[350].exec("** Failers"), 581);
+assertToStringEquals("x", res[350].exec("x"), 582);
+assertToStringEquals("ab", res[350].exec("abcabc"), 583);
+assertToStringEquals("Xaaa,a", res[351].exec("Xaaa"), 584);
+assertToStringEquals("Xaba,a", res[351].exec("Xaba"), 585);
 assertThrows("var re = /^[a-\\Q\\E]/;", 586);
-assertEquals(null, res[353].exec("(xy)x", 587));
-assertEquals(null, res[353].exec("1221", 588));
-assertEquals(null, res[353].exec("Satan, oscillate my metallic sonatas!", 589));
-assertEquals(null, res[353].exec("A man, a plan, a canal: Panama!", 590));
-assertEquals(null, res[353].exec("Able was I ere I saw Elba.", 591));
-assertEquals(null, res[353].exec("*** Failers", 592));
-assertEquals(null, res[353].exec("The quick brown fox", 593));
-assertEquals("abcd:,abcd", res[354].exec("abcd:"), 594);
-assertEquals("abcd:,abcd", res[354].exec("abcd:"), 595);
-assertEquals("a:,a", res[354].exec("a:aaxyz"), 596);
-assertEquals("ab:,ab", res[354].exec("ab:ababxyz"), 597);
-assertEquals(null, res[354].exec("** Failers", 598));
-assertEquals("a:,a", res[354].exec("a:axyz"), 599);
-assertEquals("ab:,ab", res[354].exec("ab:abxyz"), 600);
-assertEquals(null, res[354].exec("abd", 601));
-assertEquals(null, res[354].exec("ce", 602));
-assertEquals(null, res[354].exec("abcabc1Xabc2XabcXabcabc", 603));
-assertEquals(null, res[354].exec("abcabc1Xabc2XabcXabcabc", 604));
-assertEquals(null, res[354].exec("abcabc1Xabc2XabcXabcabc", 605));
-assertEquals(null, res[354].exec("abcd", 606));
-assertEquals(null, res[354].exec("metcalfe 33", 607));
-assertEquals(null, res[356].exec("a\x0db", 608));
-assertEquals(null, res[356].exec("a\nb<cr>", 609));
-assertEquals("a\x85b", res[356].exec("a\x85b<anycrlf> "), 610);
-assertEquals(null, res[356].exec("** Failers", 611));
-assertEquals(null, res[356].exec("a\nb", 612));
-assertEquals(null, res[356].exec("a\nb<any>", 613));
-assertEquals(null, res[356].exec("a\x0db<cr>", 614));
-assertEquals(null, res[356].exec("a\x0db<any>", 615));
-assertEquals("a\x85b", res[356].exec("a\x85b<any> "), 616);
-assertEquals(null, res[356].exec("a\x0db<anycrlf>", 617));
-assertEquals("abc1", res[357].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 \x85abc7 JUNK"), 618);
-assertEquals("abc1", res[358].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc7 abc9"), 619);
-assertEquals(null, res[361].exec("a\nb", 620));
-assertEquals(null, res[361].exec("a\x0db", 621));
-assertEquals(null, res[361].exec("a\x0d\nb", 622));
-assertEquals(null, res[361].exec("a\x0bb", 623));
-assertEquals(null, res[361].exec("a\x0cb", 624));
-assertEquals(null, res[361].exec("a\x85b", 625));
-assertEquals(null, res[361].exec("** Failers", 626));
-assertEquals(null, res[361].exec("a\n\x0db", 627));
-assertEquals("ab", res[362].exec("ab"), 628);
-assertEquals(null, res[362].exec("a\nb", 629));
-assertEquals(null, res[362].exec("a\x0db", 630));
-assertEquals(null, res[362].exec("a\x0d\nb", 631));
-assertEquals(null, res[362].exec("a\x0bb", 632));
-assertEquals(null, res[362].exec("a\x0cb", 633));
-assertEquals(null, res[362].exec("a\x85b", 634));
-assertEquals(null, res[362].exec("a\n\x0db", 635));
-assertEquals(null, res[362].exec("a\n\x0d\x85\x0cb", 636));
-assertEquals(null, res[363].exec("a\nb", 637));
-assertEquals(null, res[363].exec("a\x0db", 638));
-assertEquals(null, res[363].exec("a\x0d\nb", 639));
-assertEquals(null, res[363].exec("a\x0bb", 640));
-assertEquals(null, res[363].exec("a\x0cb", 641));
-assertEquals(null, res[363].exec("a\x85b", 642));
-assertEquals(null, res[363].exec("a\n\x0db", 643));
-assertEquals(null, res[363].exec("a\n\x0d\x85\x0cb", 644));
-assertEquals(null, res[363].exec("** Failers", 645));
-assertEquals(null, res[363].exec("ab", 646));
-assertEquals(null, res[364].exec("a\nb", 647));
-assertEquals(null, res[364].exec("a\n\x0db", 648));
-assertEquals(null, res[364].exec("a\n\x0d\x85b", 649));
-assertEquals(null, res[364].exec("a\x0d\n\x0d\nb", 650));
-assertEquals(null, res[364].exec("a\x0d\n\x0d\n\x0d\nb", 651));
-assertEquals(null, res[364].exec("a\n\x0d\n\x0db", 652));
-assertEquals(null, res[364].exec("a\n\n\x0d\nb", 653));
-assertEquals(null, res[364].exec("** Failers", 654));
-assertEquals(null, res[364].exec("a\n\n\n\x0db", 655));
-assertEquals(null, res[364].exec("a\x0d", 656));
-assertEquals("aRb", res[365].exec("aRb"), 657);
-assertEquals(null, res[365].exec("** Failers", 658));
-assertEquals(null, res[365].exec("a\nb", 659));
-assertEquals(null, res[365].exec("abcPXP123", 660));
-assertEquals(null, res[365].exec("abcPXP123", 661));
-assertEquals(null, res[365].exec("1.2.3.4", 662));
-assertEquals(null, res[365].exec("131.111.10.206", 663));
-assertEquals(null, res[365].exec("10.0.0.0", 664));
-assertEquals(null, res[365].exec("** Failers", 665));
-assertEquals(null, res[365].exec("10.6", 666));
-assertEquals(null, res[365].exec("455.3.4.5", 667));
-assertEquals(null, res[365].exec("1.2.3.4", 668));
-assertEquals(null, res[365].exec("131.111.10.206", 669));
-assertEquals(null, res[365].exec("10.0.0.0", 670));
-assertEquals(null, res[365].exec("** Failers", 671));
-assertEquals(null, res[365].exec("10.6", 672));
-assertEquals(null, res[365].exec("455.3.4.5", 673));
-assertEquals(null, res[365].exec("123axbaxbaxbx456", 674));
-assertEquals(null, res[365].exec("123axbaxbaxb456", 675));
-assertEquals(null, res[365].exec("123axbaxbaxbx456", 676));
-assertEquals(null, res[365].exec("123axbaxbaxbx456", 677));
-assertEquals(null, res[365].exec("123axbaxbaxbx456", 678));
-assertEquals(null, res[366].exec("ababababbbabZXXXX", 679));
-assertEquals(null, res[372].exec("a\x0db", 680));
-assertEquals(null, res[372].exec("*** Failers", 681));
-assertEquals(null, res[372].exec("a\nb", 682));
-assertEquals("afoo", res[373].exec("afoo"), 683);
-assertEquals(null, res[373].exec("** Failers", 684));
-assertEquals(null, res[373].exec("\x0d\nfoo", 685));
-assertEquals(null, res[373].exec("\nfoo", 686));
-assertEquals("afoo", res[374].exec("afoo"), 687);
-assertEquals(null, res[374].exec("\nfoo", 688));
-assertEquals(null, res[374].exec("** Failers", 689));
-assertEquals(null, res[374].exec("\x0d\nfoo", 690));
-assertEquals("afoo", res[375].exec("afoo"), 691);
-assertEquals(null, res[375].exec("** Failers", 692));
-assertEquals(null, res[375].exec("\nfoo", 693));
-assertEquals(null, res[375].exec("\x0d\nfoo", 694));
-assertEquals("afoo", res[376].exec("afoo"), 695);
-assertEquals(null, res[376].exec("\x0d\nfoo", 696));
-assertEquals(null, res[376].exec("\nfoo", 697));
-assertEquals("", res[377].exec("abc\x0d\x0dxyz"), 698);
-assertEquals("", res[377].exec("abc\n\x0dxyz  "), 699);
-assertEquals(null, res[377].exec("** Failers ", 700));
-assertEquals("", res[377].exec("abc\x0d\nxyz"), 701);
-assertEquals("", res[377].exec("abc\x0d\n\x0d\n"), 702);
-assertEquals("", res[377].exec("abc\x0d\n\x0d\n"), 703);
-assertEquals("", res[377].exec("abc\x0d\n\x0d\n"), 704);
-assertEquals("abc1", res[378].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc9"), 705);
-assertEquals("X", res[379].exec("XABC"), 706);
-assertEquals(null, res[379].exec("** Failers ", 707));
-assertEquals("X", res[379].exec("XABCB"), 708);
+assertNull(res[353].exec("(xy)x", 587));
+assertNull(res[353].exec("1221", 588));
+assertNull(res[353].exec("Satan, oscillate my metallic sonatas!", 589));
+assertNull(res[353].exec("A man, a plan, a canal: Panama!", 590));
+assertNull(res[353].exec("Able was I ere I saw Elba.", 591));
+assertNull(res[353].exec("*** Failers", 592));
+assertNull(res[353].exec("The quick brown fox", 593));
+assertToStringEquals("abcd:,abcd", res[354].exec("abcd:"), 594);
+assertToStringEquals("abcd:,abcd", res[354].exec("abcd:"), 595);
+assertToStringEquals("a:,a", res[354].exec("a:aaxyz"), 596);
+assertToStringEquals("ab:,ab", res[354].exec("ab:ababxyz"), 597);
+assertNull(res[354].exec("** Failers", 598));
+assertToStringEquals("a:,a", res[354].exec("a:axyz"), 599);
+assertToStringEquals("ab:,ab", res[354].exec("ab:abxyz"), 600);
+assertNull(res[354].exec("abd", 601));
+assertNull(res[354].exec("ce", 602));
+assertNull(res[354].exec("abcabc1Xabc2XabcXabcabc", 603));
+assertNull(res[354].exec("abcabc1Xabc2XabcXabcabc", 604));
+assertNull(res[354].exec("abcabc1Xabc2XabcXabcabc", 605));
+assertNull(res[354].exec("abcd", 606));
+assertNull(res[354].exec("metcalfe 33", 607));
+assertNull(res[356].exec("a\x0db", 608));
+assertNull(res[356].exec("a\nb<cr>", 609));
+assertToStringEquals("a\x85b", res[356].exec("a\x85b<anycrlf> "), 610);
+assertNull(res[356].exec("** Failers", 611));
+assertNull(res[356].exec("a\nb", 612));
+assertNull(res[356].exec("a\nb<any>", 613));
+assertNull(res[356].exec("a\x0db<cr>", 614));
+assertNull(res[356].exec("a\x0db<any>", 615));
+assertToStringEquals("a\x85b", res[356].exec("a\x85b<any> "), 616);
+assertNull(res[356].exec("a\x0db<anycrlf>", 617));
+assertToStringEquals("abc1", res[357].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 \x85abc7 JUNK"), 618);
+assertToStringEquals("abc1", res[358].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc7 abc9"), 619);
+assertNull(res[361].exec("a\nb", 620));
+assertNull(res[361].exec("a\x0db", 621));
+assertNull(res[361].exec("a\x0d\nb", 622));
+assertNull(res[361].exec("a\x0bb", 623));
+assertNull(res[361].exec("a\x0cb", 624));
+assertNull(res[361].exec("a\x85b", 625));
+assertNull(res[361].exec("** Failers", 626));
+assertNull(res[361].exec("a\n\x0db", 627));
+assertToStringEquals("ab", res[362].exec("ab"), 628);
+assertNull(res[362].exec("a\nb", 629));
+assertNull(res[362].exec("a\x0db", 630));
+assertNull(res[362].exec("a\x0d\nb", 631));
+assertNull(res[362].exec("a\x0bb", 632));
+assertNull(res[362].exec("a\x0cb", 633));
+assertNull(res[362].exec("a\x85b", 634));
+assertNull(res[362].exec("a\n\x0db", 635));
+assertNull(res[362].exec("a\n\x0d\x85\x0cb", 636));
+assertNull(res[363].exec("a\nb", 637));
+assertNull(res[363].exec("a\x0db", 638));
+assertNull(res[363].exec("a\x0d\nb", 639));
+assertNull(res[363].exec("a\x0bb", 640));
+assertNull(res[363].exec("a\x0cb", 641));
+assertNull(res[363].exec("a\x85b", 642));
+assertNull(res[363].exec("a\n\x0db", 643));
+assertNull(res[363].exec("a\n\x0d\x85\x0cb", 644));
+assertNull(res[363].exec("** Failers", 645));
+assertNull(res[363].exec("ab", 646));
+assertNull(res[364].exec("a\nb", 647));
+assertNull(res[364].exec("a\n\x0db", 648));
+assertNull(res[364].exec("a\n\x0d\x85b", 649));
+assertNull(res[364].exec("a\x0d\n\x0d\nb", 650));
+assertNull(res[364].exec("a\x0d\n\x0d\n\x0d\nb", 651));
+assertNull(res[364].exec("a\n\x0d\n\x0db", 652));
+assertNull(res[364].exec("a\n\n\x0d\nb", 653));
+assertNull(res[364].exec("** Failers", 654));
+assertNull(res[364].exec("a\n\n\n\x0db", 655));
+assertNull(res[364].exec("a\x0d", 656));
+assertToStringEquals("aRb", res[365].exec("aRb"), 657);
+assertNull(res[365].exec("** Failers", 658));
+assertNull(res[365].exec("a\nb", 659));
+assertNull(res[365].exec("abcPXP123", 660));
+assertNull(res[365].exec("abcPXP123", 661));
+assertNull(res[365].exec("1.2.3.4", 662));
+assertNull(res[365].exec("131.111.10.206", 663));
+assertNull(res[365].exec("10.0.0.0", 664));
+assertNull(res[365].exec("** Failers", 665));
+assertNull(res[365].exec("10.6", 666));
+assertNull(res[365].exec("455.3.4.5", 667));
+assertNull(res[365].exec("1.2.3.4", 668));
+assertNull(res[365].exec("131.111.10.206", 669));
+assertNull(res[365].exec("10.0.0.0", 670));
+assertNull(res[365].exec("** Failers", 671));
+assertNull(res[365].exec("10.6", 672));
+assertNull(res[365].exec("455.3.4.5", 673));
+assertNull(res[365].exec("123axbaxbaxbx456", 674));
+assertNull(res[365].exec("123axbaxbaxb456", 675));
+assertNull(res[365].exec("123axbaxbaxbx456", 676));
+assertNull(res[365].exec("123axbaxbaxbx456", 677));
+assertNull(res[365].exec("123axbaxbaxbx456", 678));
+assertNull(res[366].exec("ababababbbabZXXXX", 679));
+assertNull(res[372].exec("a\x0db", 680));
+assertNull(res[372].exec("*** Failers", 681));
+assertNull(res[372].exec("a\nb", 682));
+assertToStringEquals("afoo", res[373].exec("afoo"), 683);
+assertNull(res[373].exec("** Failers", 684));
+assertNull(res[373].exec("\x0d\nfoo", 685));
+assertNull(res[373].exec("\nfoo", 686));
+assertToStringEquals("afoo", res[374].exec("afoo"), 687);
+assertNull(res[374].exec("\nfoo", 688));
+assertNull(res[374].exec("** Failers", 689));
+assertNull(res[374].exec("\x0d\nfoo", 690));
+assertToStringEquals("afoo", res[375].exec("afoo"), 691);
+assertNull(res[375].exec("** Failers", 692));
+assertNull(res[375].exec("\nfoo", 693));
+assertNull(res[375].exec("\x0d\nfoo", 694));
+assertToStringEquals("afoo", res[376].exec("afoo"), 695);
+assertNull(res[376].exec("\x0d\nfoo", 696));
+assertNull(res[376].exec("\nfoo", 697));
+assertToStringEquals("", res[377].exec("abc\x0d\x0dxyz"), 698);
+assertToStringEquals("", res[377].exec("abc\n\x0dxyz  "), 699);
+assertNull(res[377].exec("** Failers ", 700));
+assertToStringEquals("", res[377].exec("abc\x0d\nxyz"), 701);
+assertToStringEquals("", res[377].exec("abc\x0d\n\x0d\n"), 702);
+assertToStringEquals("", res[377].exec("abc\x0d\n\x0d\n"), 703);
+assertToStringEquals("", res[377].exec("abc\x0d\n\x0d\n"), 704);
+assertToStringEquals("abc1", res[378].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc9"), 705);
+assertToStringEquals("X", res[379].exec("XABC"), 706);
+assertNull(res[379].exec("** Failers ", 707));
+assertToStringEquals("X", res[379].exec("XABCB"), 708);
 assertThrows("var re = /(ab|c)(?-1)/;", 709);
-assertEquals(null, res[379].exec("abc", 710));
-assertEquals(null, res[379].exec("xyabcabc", 711));
-assertEquals(null, res[379].exec("** Failers", 712));
-assertEquals(null, res[379].exec("xyabc  ", 713));
+assertNull(res[379].exec("abc", 710));
+assertNull(res[379].exec("xyabcabc", 711));
+assertNull(res[379].exec("** Failers", 712));
+assertNull(res[379].exec("xyabc  ", 713));
 assertThrows("var re = /x(?-0)y/;", 714);
 assertThrows("var re = /x(?-1)y/;", 715);
-assertEquals(null, res[379].exec("abcX", 716));
-assertEquals(null, res[379].exec("Y", 717));
-assertEquals(null, res[379].exec("** Failers", 718));
-assertEquals(null, res[379].exec("abcY   ", 719));
-assertEquals(null, res[379].exec("YabcXabc", 720));
-assertEquals(null, res[379].exec("YabcXabcXabc", 721));
-assertEquals(null, res[379].exec("** Failers", 722));
-assertEquals("X", res[379].exec("XabcXabc  "), 723);
-assertEquals(null, res[379].exec("Y!", 724));
-assertEquals(null, res[380].exec("foobar", 725));
-assertEquals(null, res[381].exec("foobar", 726));
-assertEquals("foobaz,foo,baz", res[381].exec("foobaz "), 727);
-assertEquals(null, res[382].exec("foobarbaz", 728));
-assertEquals(null, res[382].exec("tom-tom", 729));
-assertEquals(null, res[382].exec("bon-bon ", 730));
-assertEquals(null, res[382].exec("** Failers", 731));
-assertEquals(null, res[382].exec("tom-bon  ", 732));
-assertEquals(null, res[382].exec("tom-tom", 733));
-assertEquals(null, res[382].exec("bon-bon ", 734));
+assertNull(res[379].exec("abcX", 716));
+assertNull(res[379].exec("Y", 717));
+assertNull(res[379].exec("** Failers", 718));
+assertNull(res[379].exec("abcY   ", 719));
+assertNull(res[379].exec("YabcXabc", 720));
+assertNull(res[379].exec("YabcXabcXabc", 721));
+assertNull(res[379].exec("** Failers", 722));
+assertToStringEquals("X", res[379].exec("XabcXabc  "), 723);
+assertNull(res[379].exec("Y!", 724));
+assertNull(res[380].exec("foobar", 725));
+assertNull(res[381].exec("foobar", 726));
+assertToStringEquals("foobaz,foo,baz", res[381].exec("foobaz "), 727);
+assertNull(res[382].exec("foobarbaz", 728));
+assertNull(res[382].exec("tom-tom", 729));
+assertNull(res[382].exec("bon-bon ", 730));
+assertNull(res[382].exec("** Failers", 731));
+assertNull(res[382].exec("tom-bon  ", 732));
+assertNull(res[382].exec("tom-tom", 733));
+assertNull(res[382].exec("bon-bon ", 734));
 assertThrows("var re = /(?|(abc)|(xyz))/;", 735);
 assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;", 736);
-assertEquals(null, res[383].exec("xabcx", 737));
-assertEquals(null, res[383].exec("xxyzx ", 738));
+assertNull(res[383].exec("xabcx", 737));
+assertNull(res[383].exec("xxyzx ", 738));
 assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;", 739);
-assertEquals(null, res[383].exec("xabcpqrx", 740));
-assertEquals(null, res[383].exec("xxyzx ", 741));
+assertNull(res[383].exec("xabcpqrx", 740));
+assertNull(res[383].exec("xxyzx ", 741));
 assertThrows("var re = /(?|(abc)|(xyz))\\1/;", 742);
-assertEquals(null, res[383].exec("abcabc", 743));
-assertEquals(null, res[383].exec("xyzxyz ", 744));
-assertEquals(null, res[383].exec("** Failers", 745));
-assertEquals(null, res[383].exec("abcxyz", 746));
-assertEquals(null, res[383].exec("xyzabc   ", 747));
-assertEquals(null, res[383].exec("abcabc", 748));
-assertEquals(null, res[383].exec("xyzabc ", 749));
-assertEquals(null, res[383].exec("** Failers ", 750));
-assertEquals(null, res[383].exec("xyzxyz ", 751));
-assertEquals(null, res[384].exec("X X\n", 752));
-assertEquals(null, res[384].exec("X\x09X\x0b", 753));
-assertEquals(null, res[384].exec("** Failers", 754));
-assertEquals(null, res[384].exec("\xa0 X\n   ", 755));
-assertEquals(null, res[385].exec("\x09 \xa0X\n\x0b\x0c\x0d\n", 756));
-assertEquals(null, res[385].exec("\x09 \xa0\n\x0b\x0c\x0d\n", 757));
-assertEquals(null, res[385].exec("\x09 \xa0\n\x0b\x0c", 758));
-assertEquals(null, res[385].exec("** Failers ", 759));
-assertEquals(null, res[385].exec("\x09 \xa0\n\x0b", 760));
-assertEquals(null, res[385].exec(" ", 761));
-assertEquals(null, res[386].exec("XY  ABCDE", 762));
-assertEquals(null, res[386].exec("XY  PQR ST ", 763));
-assertEquals(null, res[387].exec("XY  AB    PQRS", 764));
-assertEquals(null, res[388].exec(">XNNNYZ", 765));
-assertEquals(null, res[388].exec(">  X NYQZ", 766));
-assertEquals(null, res[388].exec("** Failers", 767));
-assertEquals(null, res[388].exec(">XYZ   ", 768));
-assertEquals(null, res[388].exec(">  X NY Z", 769));
-assertEquals(null, res[389].exec(">XY\nZ\nA\x0bNN\x0c", 770));
-assertEquals(null, res[389].exec(">\n\x0dX\nY\n\x0bZZZ\nAAA\x0bNNN\x0c", 771));
-assertEquals(null, res[390].exec(">\x09<", 772));
-assertEquals(null, res[391].exec(">\x09 \xa0<", 773));
-assertEquals(null, res[396].exec("** Failers", 774));
-assertEquals(null, res[396].exec("XXXX", 775));
-assertEquals(null, res[397].exec("XXXX Y ", 776));
-assertEquals(null, res[419].exec("aaaaaa", 777));
-assertEquals(null, res[419].exec("aaabccc", 778));
-assertEquals(null, res[419].exec("aaabccc", 779));
-assertEquals(null, res[419].exec("aaabccc", 780));
-assertEquals(null, res[419].exec("aaabcccaaabccc", 781));
-assertEquals(null, res[419].exec("aaaxxxxxx", 782));
-assertEquals(null, res[419].exec("aaa++++++ ", 783));
-assertEquals(null, res[419].exec("bbbxxxxx", 784));
-assertEquals(null, res[419].exec("bbb+++++ ", 785));
-assertEquals(null, res[419].exec("cccxxxx", 786));
-assertEquals(null, res[419].exec("ccc++++ ", 787));
-assertEquals(null, res[419].exec("dddddddd   ", 788));
-assertEquals(null, res[419].exec("aaaxxxxxx", 789));
-assertEquals(null, res[419].exec("aaa++++++ ", 790));
-assertEquals(null, res[419].exec("bbbxxxxx", 791));
-assertEquals(null, res[419].exec("bbb+++++ ", 792));
-assertEquals(null, res[419].exec("cccxxxx", 793));
-assertEquals(null, res[419].exec("ccc++++ ", 794));
-assertEquals(null, res[419].exec("dddddddd   ", 795));
-assertEquals(null, res[419].exec("aaabccc", 796));
-assertEquals(null, res[419].exec("ABX", 797));
-assertEquals(null, res[419].exec("AADE", 798));
-assertEquals(null, res[419].exec("ACDE", 799));
-assertEquals(null, res[419].exec("** Failers", 800));
-assertEquals(null, res[419].exec("AD ", 801));
-assertEquals(null, res[419].exec("    ", 802));
-assertEquals(null, res[419].exec("aaaaaa", 803));
-assertEquals(null, res[419].exec("aaabccc", 804));
-assertEquals(null, res[419].exec("aaabccc", 805));
-assertEquals(null, res[419].exec("aaabccc", 806));
-assertEquals(null, res[419].exec("aaabcccaaabccc", 807));
-assertEquals(null, res[419].exec("aaabccc", 808));
-assertEquals(null, res[422].exec("\x0d\nA", 809));
-assertEquals("\nA", res[423].exec("\x0d\nA "), 810);
-assertEquals("\nA", res[424].exec("\x0d\nA "), 811);
-assertEquals("\nA,\n", res[425].exec("\x0d\nA "), 812);
-assertEquals(null, res[425].exec("a\nb", 813));
-assertEquals(null, res[425].exec("** Failers", 814));
-assertEquals(null, res[425].exec("a\x0db  ", 815));
-assertEquals(null, res[425].exec("a\nb", 816));
-assertEquals(null, res[425].exec("** Failers", 817));
-assertEquals(null, res[425].exec("a\x0db  ", 818));
-assertEquals(null, res[425].exec("a\x0db", 819));
-assertEquals(null, res[425].exec("** Failers", 820));
-assertEquals(null, res[425].exec("a\nb  ", 821));
-assertEquals(null, res[425].exec("a\x0db", 822));
-assertEquals(null, res[425].exec("a\nb  ", 823));
-assertEquals(null, res[425].exec("** Failers", 824));
-assertEquals(null, res[425].exec("a\x0d\nb  ", 825));
-assertEquals(null, res[425].exec("** Failers", 826));
-assertEquals(null, res[425].exec("a\x0db", 827));
-assertEquals(null, res[425].exec("a\nb  ", 828));
-assertEquals(null, res[425].exec("a\x0d\nb  ", 829));
-assertEquals(null, res[425].exec("** Failers", 830));
-assertEquals(null, res[425].exec("a\x0db", 831));
-assertEquals(null, res[425].exec("a\nb  ", 832));
-assertEquals(null, res[425].exec("a\x0d\nb  ", 833));
-assertEquals(null, res[425].exec("a\x85b ", 834));
-assertEquals(null, res[426].exec("a\x0db", 835));
-assertEquals(null, res[426].exec("a\nb", 836));
-assertEquals(null, res[426].exec("a\x0d\nb", 837));
-assertEquals(null, res[426].exec("** Failers", 838));
-assertEquals(null, res[426].exec("a\x85b", 839));
-assertEquals(null, res[426].exec("a\x0bb     ", 840));
-assertEquals(null, res[427].exec("a\x0db", 841));
-assertEquals(null, res[427].exec("a\nb", 842));
-assertEquals(null, res[427].exec("a\x0d\nb", 843));
-assertEquals(null, res[427].exec("a\x85b", 844));
-assertEquals(null, res[427].exec("a\x0bb     ", 845));
-assertEquals(null, res[427].exec("** Failers ", 846));
-assertEquals(null, res[427].exec("a\x85b<bsr_anycrlf>", 847));
-assertEquals(null, res[427].exec("a\x0bb<bsr_anycrlf>", 848));
-assertEquals(null, res[428].exec("a\x0db", 849));
-assertEquals(null, res[428].exec("a\nb", 850));
-assertEquals(null, res[428].exec("a\x0d\nb", 851));
-assertEquals(null, res[428].exec("** Failers", 852));
-assertEquals(null, res[428].exec("a\x85b", 853));
-assertEquals(null, res[428].exec("a\x0bb     ", 854));
-assertEquals(null, res[429].exec("a\x0db", 855));
-assertEquals(null, res[429].exec("a\nb", 856));
-assertEquals(null, res[429].exec("a\x0d\nb", 857));
-assertEquals(null, res[429].exec("a\x85b", 858));
-assertEquals(null, res[429].exec("a\x0bb     ", 859));
-assertEquals(null, res[429].exec("** Failers ", 860));
-assertEquals(null, res[429].exec("a\x85b<bsr_anycrlf>", 861));
-assertEquals(null, res[429].exec("a\x0bb<bsr_anycrlf>", 862));
-assertEquals(null, res[430].exec("a\x0d\n\nb", 863));
-assertEquals(null, res[430].exec("a\n\x0d\x0db", 864));
-assertEquals(null, res[430].exec("a\x0d\n\x0d\n\x0d\n\x0d\nb", 865));
-assertEquals(null, res[430].exec("** Failers", 866));
-assertEquals(null, res[430].exec("a\x8585b", 867));
-assertEquals(null, res[430].exec("a\x0b\x00bb     ", 868));
-assertEquals(null, res[431].exec("a\x0d\x0db", 869));
-assertEquals(null, res[431].exec("a\n\n\nb", 870));
-assertEquals(null, res[431].exec("a\x0d\n\n\x0d\x0db", 871));
-assertEquals(null, res[431].exec("a\x8585b", 872));
-assertEquals(null, res[431].exec("a\x0b\x00bb     ", 873));
-assertEquals(null, res[431].exec("** Failers ", 874));
-assertEquals(null, res[431].exec("a\x0d\x0d\x0d\x0d\x0db ", 875));
-assertEquals(null, res[431].exec("a\x8585b<bsr_anycrlf>", 876));
-assertEquals(null, res[431].exec("a\x0b\x00bb<bsr_anycrlf>", 877));
-assertEquals(null, res[431].exec("a\nb", 878));
-assertEquals(null, res[431].exec("a\x0db ", 879));
-assertEquals(null, res[431].exec("a\x85b", 880));
-assertEquals(null, res[431].exec("a\nb", 881));
-assertEquals(null, res[431].exec("a\x0db ", 882));
-assertEquals(null, res[431].exec("a\x85b", 883));
+assertNull(res[383].exec("abcabc", 743));
+assertNull(res[383].exec("xyzxyz ", 744));
+assertNull(res[383].exec("** Failers", 745));
+assertNull(res[383].exec("abcxyz", 746));
+assertNull(res[383].exec("xyzabc   ", 747));
+assertNull(res[383].exec("abcabc", 748));
+assertNull(res[383].exec("xyzabc ", 749));
+assertNull(res[383].exec("** Failers ", 750));
+assertNull(res[383].exec("xyzxyz ", 751));
+assertNull(res[384].exec("X X\n", 752));
+assertNull(res[384].exec("X\x09X\x0b", 753));
+assertNull(res[384].exec("** Failers", 754));
+assertNull(res[384].exec("\xa0 X\n   ", 755));
+assertNull(res[385].exec("\x09 \xa0X\n\x0b\x0c\x0d\n", 756));
+assertNull(res[385].exec("\x09 \xa0\n\x0b\x0c\x0d\n", 757));
+assertNull(res[385].exec("\x09 \xa0\n\x0b\x0c", 758));
+assertNull(res[385].exec("** Failers ", 759));
+assertNull(res[385].exec("\x09 \xa0\n\x0b", 760));
+assertNull(res[385].exec(" ", 761));
+assertNull(res[386].exec("XY  ABCDE", 762));
+assertNull(res[386].exec("XY  PQR ST ", 763));
+assertNull(res[387].exec("XY  AB    PQRS", 764));
+assertNull(res[388].exec(">XNNNYZ", 765));
+assertNull(res[388].exec(">  X NYQZ", 766));
+assertNull(res[388].exec("** Failers", 767));
+assertNull(res[388].exec(">XYZ   ", 768));
+assertNull(res[388].exec(">  X NY Z", 769));
+assertNull(res[389].exec(">XY\nZ\nA\x0bNN\x0c", 770));
+assertNull(res[389].exec(">\n\x0dX\nY\n\x0bZZZ\nAAA\x0bNNN\x0c", 771));
+assertNull(res[390].exec(">\x09<", 772));
+assertNull(res[391].exec(">\x09 \xa0<", 773));
+assertNull(res[396].exec("** Failers", 774));
+assertNull(res[396].exec("XXXX", 775));
+assertNull(res[397].exec("XXXX Y ", 776));
+assertNull(res[419].exec("aaaaaa", 777));
+assertNull(res[419].exec("aaabccc", 778));
+assertNull(res[419].exec("aaabccc", 779));
+assertNull(res[419].exec("aaabccc", 780));
+assertNull(res[419].exec("aaabcccaaabccc", 781));
+assertNull(res[419].exec("aaaxxxxxx", 782));
+assertNull(res[419].exec("aaa++++++ ", 783));
+assertNull(res[419].exec("bbbxxxxx", 784));
+assertNull(res[419].exec("bbb+++++ ", 785));
+assertNull(res[419].exec("cccxxxx", 786));
+assertNull(res[419].exec("ccc++++ ", 787));
+assertNull(res[419].exec("dddddddd   ", 788));
+assertNull(res[419].exec("aaaxxxxxx", 789));
+assertNull(res[419].exec("aaa++++++ ", 790));
+assertNull(res[419].exec("bbbxxxxx", 791));
+assertNull(res[419].exec("bbb+++++ ", 792));
+assertNull(res[419].exec("cccxxxx", 793));
+assertNull(res[419].exec("ccc++++ ", 794));
+assertNull(res[419].exec("dddddddd   ", 795));
+assertNull(res[419].exec("aaabccc", 796));
+assertNull(res[419].exec("ABX", 797));
+assertNull(res[419].exec("AADE", 798));
+assertNull(res[419].exec("ACDE", 799));
+assertNull(res[419].exec("** Failers", 800));
+assertNull(res[419].exec("AD ", 801));
+assertNull(res[419].exec("    ", 802));
+assertNull(res[419].exec("aaaaaa", 803));
+assertNull(res[419].exec("aaabccc", 804));
+assertNull(res[419].exec("aaabccc", 805));
+assertNull(res[419].exec("aaabccc", 806));
+assertNull(res[419].exec("aaabcccaaabccc", 807));
+assertNull(res[419].exec("aaabccc", 808));
+assertNull(res[422].exec("\x0d\nA", 809));
+assertToStringEquals("\nA", res[423].exec("\x0d\nA "), 810);
+assertToStringEquals("\nA", res[424].exec("\x0d\nA "), 811);
+assertToStringEquals("\nA,\n", res[425].exec("\x0d\nA "), 812);
+assertNull(res[425].exec("a\nb", 813));
+assertNull(res[425].exec("** Failers", 814));
+assertNull(res[425].exec("a\x0db  ", 815));
+assertNull(res[425].exec("a\nb", 816));
+assertNull(res[425].exec("** Failers", 817));
+assertNull(res[425].exec("a\x0db  ", 818));
+assertNull(res[425].exec("a\x0db", 819));
+assertNull(res[425].exec("** Failers", 820));
+assertNull(res[425].exec("a\nb  ", 821));
+assertNull(res[425].exec("a\x0db", 822));
+assertNull(res[425].exec("a\nb  ", 823));
+assertNull(res[425].exec("** Failers", 824));
+assertNull(res[425].exec("a\x0d\nb  ", 825));
+assertNull(res[425].exec("** Failers", 826));
+assertNull(res[425].exec("a\x0db", 827));
+assertNull(res[425].exec("a\nb  ", 828));
+assertNull(res[425].exec("a\x0d\nb  ", 829));
+assertNull(res[425].exec("** Failers", 830));
+assertNull(res[425].exec("a\x0db", 831));
+assertNull(res[425].exec("a\nb  ", 832));
+assertNull(res[425].exec("a\x0d\nb  ", 833));
+assertNull(res[425].exec("a\x85b ", 834));
+assertNull(res[426].exec("a\x0db", 835));
+assertNull(res[426].exec("a\nb", 836));
+assertNull(res[426].exec("a\x0d\nb", 837));
+assertNull(res[426].exec("** Failers", 838));
+assertNull(res[426].exec("a\x85b", 839));
+assertNull(res[426].exec("a\x0bb     ", 840));
+assertNull(res[427].exec("a\x0db", 841));
+assertNull(res[427].exec("a\nb", 842));
+assertNull(res[427].exec("a\x0d\nb", 843));
+assertNull(res[427].exec("a\x85b", 844));
+assertNull(res[427].exec("a\x0bb     ", 845));
+assertNull(res[427].exec("** Failers ", 846));
+assertNull(res[427].exec("a\x85b<bsr_anycrlf>", 847));
+assertNull(res[427].exec("a\x0bb<bsr_anycrlf>", 848));
+assertNull(res[428].exec("a\x0db", 849));
+assertNull(res[428].exec("a\nb", 850));
+assertNull(res[428].exec("a\x0d\nb", 851));
+assertNull(res[428].exec("** Failers", 852));
+assertNull(res[428].exec("a\x85b", 853));
+assertNull(res[428].exec("a\x0bb     ", 854));
+assertNull(res[429].exec("a\x0db", 855));
+assertNull(res[429].exec("a\nb", 856));
+assertNull(res[429].exec("a\x0d\nb", 857));
+assertNull(res[429].exec("a\x85b", 858));
+assertNull(res[429].exec("a\x0bb     ", 859));
+assertNull(res[429].exec("** Failers ", 860));
+assertNull(res[429].exec("a\x85b<bsr_anycrlf>", 861));
+assertNull(res[429].exec("a\x0bb<bsr_anycrlf>", 862));
+assertNull(res[430].exec("a\x0d\n\nb", 863));
+assertNull(res[430].exec("a\n\x0d\x0db", 864));
+assertNull(res[430].exec("a\x0d\n\x0d\n\x0d\n\x0d\nb", 865));
+assertNull(res[430].exec("** Failers", 866));
+assertNull(res[430].exec("a\x8585b", 867));
+assertNull(res[430].exec("a\x0b\x00bb     ", 868));
+assertNull(res[431].exec("a\x0d\x0db", 869));
+assertNull(res[431].exec("a\n\n\nb", 870));
+assertNull(res[431].exec("a\x0d\n\n\x0d\x0db", 871));
+assertNull(res[431].exec("a\x8585b", 872));
+assertNull(res[431].exec("a\x0b\x00bb     ", 873));
+assertNull(res[431].exec("** Failers ", 874));
+assertNull(res[431].exec("a\x0d\x0d\x0d\x0d\x0db ", 875));
+assertNull(res[431].exec("a\x8585b<bsr_anycrlf>", 876));
+assertNull(res[431].exec("a\x0b\x00bb<bsr_anycrlf>", 877));
+assertNull(res[431].exec("a\nb", 878));
+assertNull(res[431].exec("a\x0db ", 879));
+assertNull(res[431].exec("a\x85b", 880));
+assertNull(res[431].exec("a\nb", 881));
+assertNull(res[431].exec("a\x0db ", 882));
+assertNull(res[431].exec("a\x85b", 883));
 assertThrows("var re = /(?-+a)/;", 884);
-assertEquals(null, res[443].exec("aaaa", 885));
-assertEquals(null, res[443].exec("bacxxx", 886));
-assertEquals(null, res[443].exec("bbaccxxx ", 887));
-assertEquals(null, res[443].exec("bbbacccxx", 888));
-assertEquals(null, res[443].exec("aaaa", 889));
-assertEquals(null, res[443].exec("bacxxx", 890));
-assertEquals(null, res[443].exec("bbaccxxx ", 891));
-assertEquals(null, res[443].exec("bbbacccxx", 892));
-assertEquals("a,a", res[444].exec("aaaa"), 893);
-assertEquals(null, res[444].exec("bacxxx", 894));
-assertEquals(null, res[444].exec("bbaccxxx ", 895));
-assertEquals(null, res[444].exec("bbbacccxx", 896));
-assertEquals("a,a", res[445].exec("aaaa"), 897);
-assertEquals(null, res[445].exec("bacxxx", 898));
-assertEquals(null, res[445].exec("bbaccxxx ", 899));
-assertEquals(null, res[445].exec("bbbacccxx", 900));
-assertEquals("a,a", res[446].exec("aaaa"), 901);
-assertEquals(null, res[446].exec("bacxxx", 902));
-assertEquals(null, res[446].exec("bbaccxxx ", 903));
-assertEquals(null, res[446].exec("bbbacccxx", 904));
-assertEquals("a,a,a", res[447].exec("aaaa"), 905);
-assertEquals(null, res[447].exec("bacxxx", 906));
-assertEquals(null, res[447].exec("bbaccxxx ", 907));
-assertEquals(null, res[447].exec("bbbacccxx", 908));
-assertEquals(null, res[449].exec("bacxxx", 909));
-assertEquals(null, res[449].exec("XaaX", 910));
-assertEquals(null, res[449].exec("XAAX ", 911));
-assertEquals(null, res[449].exec("XaaX", 912));
-assertEquals(null, res[449].exec("** Failers ", 913));
-assertEquals(null, res[449].exec("XAAX ", 914));
-assertEquals(null, res[449].exec("XaaX", 915));
-assertEquals(null, res[449].exec("XAAX ", 916));
-assertEquals(null, res[449].exec("xzxx", 917));
-assertEquals(null, res[449].exec("yzyy ", 918));
-assertEquals(null, res[449].exec("** Failers", 919));
-assertEquals(null, res[449].exec("xxz  ", 920));
-assertEquals("a,,,a", res[450].exec("cat"), 921);
-assertEquals("a,,,a", res[451].exec("cat"), 922);
-assertEquals("TA]", res[452].exec("The ACTA] comes "), 923);
-assertEquals("TA]", res[453].exec("The ACTA] comes "), 924);
-assertEquals(null, res[453].exec("abcbabc", 925));
-assertEquals(null, res[453].exec("abcbabc", 926));
-assertEquals(null, res[453].exec("abcbabc", 927));
-assertEquals(null, res[453].exec("** Failers ", 928));
-assertEquals(null, res[453].exec("abcXabc", 929));
-assertEquals(null, res[453].exec("abcXabc", 930));
-assertEquals(null, res[453].exec("** Failers ", 931));
-assertEquals(null, res[453].exec("abcbabc", 932));
-assertEquals(null, res[453].exec("xyzbabcxyz", 933));
-assertEquals(null, res[456].exec("** Failers", 934));
-assertEquals(null, res[456].exec("ab", 935));
-assertEquals(null, res[457].exec("** Failers", 936));
-assertEquals(null, res[457].exec("ab ", 937));
-assertEquals(null, res[457].exec("** Failers", 938));
-assertEquals(null, res[457].exec("ab ", 939));
-assertEquals("aXb", res[458].exec("aXb"), 940);
-assertEquals("a\nb", res[458].exec("a\nb "), 941);
-assertEquals(null, res[458].exec("** Failers", 942));
-assertEquals(null, res[458].exec("ab  ", 943));
-assertEquals("aXb", res[459].exec("aXb"), 944);
-assertEquals("a\nX\nXb", res[459].exec("a\nX\nXb "), 945);
-assertEquals(null, res[459].exec("** Failers", 946));
-assertEquals(null, res[459].exec("ab  ", 947));
-assertEquals("acb", res[463].exec("acb"), 948);
-assertEquals("ab", res[463].exec("ab"), 949);
-assertEquals(null, res[463].exec("ax{100}b ", 950));
-assertEquals(null, res[463].exec("*** Failers", 951));
-assertEquals(null, res[463].exec("a\nb  ", 952));
-assertEquals(null, res[464].exec("ax{4000}xyb ", 953));
-assertEquals(null, res[464].exec("ax{4000}yb ", 954));
-assertEquals(null, res[464].exec("ax{4000}x{100}yb ", 955));
-assertEquals(null, res[464].exec("*** Failers", 956));
-assertEquals(null, res[464].exec("ax{4000}b ", 957));
-assertEquals(null, res[464].exec("ac\ncb ", 958));
-assertEquals("a\xc0,,\xc0", res[465].exec("a\xc0\x88b"), 959);
-assertEquals("ax,,x", res[466].exec("ax{100}b"), 960);
-assertEquals("a\xc0\x88b,\xc0\x88,b", res[467].exec("a\xc0\x88b"), 961);
-assertEquals("ax{100}b,x{100},b", res[468].exec("ax{100}b"), 962);
-assertEquals("a\xc0\x92,\xc0,\x92", res[469].exec("a\xc0\x92bcd"), 963);
-assertEquals("ax{,x,{", res[470].exec("ax{240}bcd"), 964);
-assertEquals("a\xc0\x92,\xc0,\x92", res[471].exec("a\xc0\x92bcd"), 965);
-assertEquals("ax{,x,{", res[472].exec("ax{240}bcd"), 966);
-assertEquals("a\xc0,,\xc0", res[473].exec("a\xc0\x92bcd"), 967);
-assertEquals("ax,,x", res[474].exec("ax{240}bcd"), 968);
-assertEquals(null, res[475].exec("ax{1234}xyb ", 969));
-assertEquals(null, res[475].exec("ax{1234}x{4321}yb ", 970));
-assertEquals(null, res[475].exec("ax{1234}x{4321}x{3412}b ", 971));
-assertEquals(null, res[475].exec("*** Failers", 972));
-assertEquals(null, res[475].exec("ax{1234}b ", 973));
-assertEquals(null, res[475].exec("ac\ncb ", 974));
-assertEquals("ax{1234}xyb,x{1234}xy", res[476].exec("ax{1234}xyb "), 975);
-assertEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[476].exec("ax{1234}x{4321}yb "), 976);
-assertEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[476].exec("ax{1234}x{4321}x{3412}b "), 977);
-assertEquals("axxxxbcdefghijb,xxxxbcdefghij", res[476].exec("axxxxbcdefghijb "), 978);
-assertEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[476].exec("ax{1234}x{4321}x{3412}x{3421}b "), 979);
-assertEquals(null, res[476].exec("*** Failers", 980));
-assertEquals("ax{1234}b,x{1234}", res[476].exec("ax{1234}b "), 981);
-assertEquals("ax{1234}xyb,x{1234}xy", res[477].exec("ax{1234}xyb "), 982);
-assertEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[477].exec("ax{1234}x{4321}yb "), 983);
-assertEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[477].exec("ax{1234}x{4321}x{3412}b "), 984);
-assertEquals("axxxxb,xxxx", res[477].exec("axxxxbcdefghijb "), 985);
-assertEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[477].exec("ax{1234}x{4321}x{3412}x{3421}b "), 986);
-assertEquals(null, res[477].exec("*** Failers", 987));
-assertEquals("ax{1234}b,x{1234}", res[477].exec("ax{1234}b "), 988);
-assertEquals(null, res[478].exec("ax{1234}xyb ", 989));
-assertEquals(null, res[478].exec("ax{1234}x{4321}yb ", 990));
-assertEquals(null, res[478].exec("ax{1234}x{4321}x{3412}b ", 991));
-assertEquals("axxxxb,xxxx", res[478].exec("axxxxbcdefghijb "), 992);
-assertEquals(null, res[478].exec("ax{1234}x{4321}x{3412}x{3421}b ", 993));
-assertEquals("axbxxb,xbxx", res[478].exec("axbxxbcdefghijb "), 994);
-assertEquals("axxxxxb,xxxxx", res[478].exec("axxxxxbcdefghijb "), 995);
-assertEquals(null, res[478].exec("*** Failers", 996));
-assertEquals(null, res[478].exec("ax{1234}b ", 997));
-assertEquals(null, res[478].exec("axxxxxxbcdefghijb ", 998));
-assertEquals(null, res[479].exec("ax{1234}xyb ", 999));
-assertEquals(null, res[479].exec("ax{1234}x{4321}yb ", 1000));
-assertEquals(null, res[479].exec("ax{1234}x{4321}x{3412}b ", 1001));
-assertEquals("axxxxb,xxxx", res[479].exec("axxxxbcdefghijb "), 1002);
-assertEquals(null, res[479].exec("ax{1234}x{4321}x{3412}x{3421}b ", 1003));
-assertEquals("axbxxb,xbxx", res[479].exec("axbxxbcdefghijb "), 1004);
-assertEquals("axxxxxb,xxxxx", res[479].exec("axxxxxbcdefghijb "), 1005);
-assertEquals(null, res[479].exec("*** Failers", 1006));
-assertEquals(null, res[479].exec("ax{1234}b ", 1007));
-assertEquals(null, res[479].exec("axxxxxxbcdefghijb ", 1008));
-assertEquals(null, res[479].exec("*** Failers", 1009));
-assertEquals(null, res[479].exec("x{100}", 1010));
-assertEquals(null, res[479].exec("aXbcd", 1011));
-assertEquals(null, res[479].exec("ax{100}bcd", 1012));
-assertEquals(null, res[479].exec("ax{100000}bcd", 1013));
-assertEquals(null, res[479].exec("x{100}x{100}x{100}b", 1014));
-assertEquals(null, res[479].exec("*** Failers ", 1015));
-assertEquals(null, res[479].exec("x{100}x{100}b", 1016));
-assertEquals(null, res[479].exec("x{ab} ", 1017));
-assertEquals(null, res[479].exec("\xc2\xab", 1018));
-assertEquals(null, res[479].exec("*** Failers ", 1019));
-assertEquals(null, res[479].exec("\x00{ab}", 1020));
-assertEquals(null, res[479].exec("WXYZ", 1021));
-assertEquals(null, res[479].exec("x{256}XYZ ", 1022));
-assertEquals(null, res[479].exec("*** Failers", 1023));
-assertEquals(null, res[479].exec("XYZ ", 1024));
-assertEquals(null, res[480].exec("Xx{1234}", 1025));
-assertEquals(null, res[481].exec("Xx{1234}YZ", 1026));
-assertEquals("X", res[482].exec("XYZabcdce"), 1027);
-assertEquals("X", res[483].exec("XYZabcde"), 1028);
-assertEquals(null, res[484].exec("Xabcdefg   ", 1029));
-assertEquals(null, res[484].exec("Xx{1234} ", 1030));
-assertEquals(null, res[484].exec("Xx{1234}YZ", 1031));
-assertEquals(null, res[484].exec("Xx{1234}x{512}  ", 1032));
-assertEquals(null, res[484].exec("Xx{1234}x{512}YZ", 1033));
-assertEquals(null, res[485].exec("Xabcdefg   ", 1034));
-assertEquals(null, res[485].exec("Xx{1234} ", 1035));
-assertEquals(null, res[485].exec("Xx{1234}YZ", 1036));
-assertEquals(null, res[485].exec("Xx{1234}x{512}  ", 1037));
-assertEquals("bcd", res[486].exec("bcd"), 1038);
-assertEquals("00}", res[486].exec("x{100}aYx{256}Z "), 1039);
-assertEquals("x{", res[487].exec("x{100}bc"), 1040);
-assertEquals("x{100}bcA", res[488].exec("x{100}bcAa"), 1041);
-assertEquals("x{", res[489].exec("x{100}bca"), 1042);
-assertEquals("bcd", res[490].exec("bcd"), 1043);
-assertEquals("00}", res[490].exec("x{100}aYx{256}Z "), 1044);
-assertEquals("x{", res[491].exec("x{100}bc"), 1045);
-assertEquals("x{100}bc", res[492].exec("x{100}bcAa"), 1046);
-assertEquals("x{", res[493].exec("x{100}bca"), 1047);
-assertEquals(null, res[493].exec("abcd", 1048));
-assertEquals(null, res[493].exec("abcd", 1049));
-assertEquals("x{", res[493].exec("x{100}x{100} "), 1050);
-assertEquals("x{", res[493].exec("x{100}x{100} "), 1051);
-assertEquals("x{", res[493].exec("x{100}x{100}x{100}x{100} "), 1052);
-assertEquals(null, res[493].exec("abce", 1053));
-assertEquals("x{", res[493].exec("x{100}x{100}x{100}x{100} "), 1054);
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1055));
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1056));
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1057));
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}XX", 1058));
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 1059));
-assertEquals(null, res[493].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 1060));
-assertEquals("Xy", res[493].exec("Xyyyax{100}x{100}bXzzz"), 1061);
-assertEquals("X", res[496].exec("1X2"), 1062);
-assertEquals("x", res[496].exec("1x{100}2 "), 1063);
-assertEquals(">X", res[497].exec("> >X Y"), 1064);
-assertEquals(">x", res[497].exec("> >x{100} Y"), 1065);
-assertEquals("1", res[498].exec("x{100}3"), 1066);
-assertEquals(" ", res[499].exec("x{100} X"), 1067);
-assertEquals("abcd", res[500].exec("12abcd34"), 1068);
-assertEquals("*** Failers", res[500].exec("*** Failers"), 1069);
-assertEquals("  ", res[500].exec("1234  "), 1070);
-assertEquals("abc", res[501].exec("12abcd34"), 1071);
-assertEquals("ab", res[501].exec("12ab34"), 1072);
-assertEquals("***", res[501].exec("*** Failers  "), 1073);
-assertEquals(null, res[501].exec("1234", 1074));
-assertEquals("  ", res[501].exec("12a34  "), 1075);
-assertEquals("ab", res[502].exec("12abcd34"), 1076);
-assertEquals("ab", res[502].exec("12ab34"), 1077);
-assertEquals("**", res[502].exec("*** Failers  "), 1078);
-assertEquals(null, res[502].exec("1234", 1079));
-assertEquals("  ", res[502].exec("12a34  "), 1080);
-assertEquals("12", res[503].exec("12abcd34"), 1081);
-assertEquals(null, res[503].exec("*** Failers", 1082));
-assertEquals("12", res[504].exec("12abcd34"), 1083);
-assertEquals("123", res[504].exec("1234abcd"), 1084);
-assertEquals(null, res[504].exec("*** Failers  ", 1085));
-assertEquals(null, res[504].exec("1.4 ", 1086));
-assertEquals("12", res[505].exec("12abcd34"), 1087);
-assertEquals("12", res[505].exec("1234abcd"), 1088);
-assertEquals(null, res[505].exec("*** Failers  ", 1089));
-assertEquals(null, res[505].exec("1.4 ", 1090));
-assertEquals("12abcd34", res[506].exec("12abcd34"), 1091);
-assertEquals("***", res[506].exec("*** Failers"), 1092);
-assertEquals(null, res[506].exec("     ", 1093));
-assertEquals("12a", res[507].exec("12abcd34"), 1094);
-assertEquals("123", res[507].exec("1234abcd"), 1095);
-assertEquals("***", res[507].exec("*** Failers"), 1096);
-assertEquals(null, res[507].exec("       ", 1097));
-assertEquals("12", res[508].exec("12abcd34"), 1098);
-assertEquals("12", res[508].exec("1234abcd"), 1099);
-assertEquals("**", res[508].exec("*** Failers"), 1100);
-assertEquals(null, res[508].exec("       ", 1101));
-assertEquals(">      <", res[509].exec("12>      <34"), 1102);
-assertEquals(null, res[509].exec("*** Failers", 1103));
-assertEquals(">  <", res[510].exec("ab>  <cd"), 1104);
-assertEquals(">   <", res[510].exec("ab>   <ce"), 1105);
-assertEquals(null, res[510].exec("*** Failers", 1106));
-assertEquals(null, res[510].exec("ab>    <cd ", 1107));
-assertEquals(">  <", res[511].exec("ab>  <cd"), 1108);
-assertEquals(">   <", res[511].exec("ab>   <ce"), 1109);
-assertEquals(null, res[511].exec("*** Failers", 1110));
-assertEquals(null, res[511].exec("ab>    <cd ", 1111));
-assertEquals("12", res[512].exec("12      34"), 1112);
-assertEquals("Failers", res[512].exec("*** Failers"), 1113);
-assertEquals(null, res[512].exec("+++=*! ", 1114));
-assertEquals("ab", res[513].exec("ab  cd"), 1115);
-assertEquals("abc", res[513].exec("abcd ce"), 1116);
-assertEquals("Fai", res[513].exec("*** Failers"), 1117);
-assertEquals(null, res[513].exec("a.b.c", 1118));
-assertEquals("ab", res[514].exec("ab  cd"), 1119);
-assertEquals("ab", res[514].exec("abcd ce"), 1120);
-assertEquals("Fa", res[514].exec("*** Failers"), 1121);
-assertEquals(null, res[514].exec("a.b.c", 1122));
-assertEquals("====", res[515].exec("12====34"), 1123);
-assertEquals("*** ", res[515].exec("*** Failers"), 1124);
-assertEquals(" ", res[515].exec("abcd "), 1125);
-assertEquals("===", res[516].exec("ab====cd"), 1126);
-assertEquals("==", res[516].exec("ab==cd"), 1127);
-assertEquals("***", res[516].exec("*** Failers"), 1128);
-assertEquals(null, res[516].exec("a.b.c", 1129));
-assertEquals("==", res[517].exec("ab====cd"), 1130);
-assertEquals("==", res[517].exec("ab==cd"), 1131);
-assertEquals("**", res[517].exec("*** Failers"), 1132);
-assertEquals(null, res[517].exec("a.b.c", 1133));
-assertEquals(null, res[517].exec("x{100}", 1134));
-assertEquals(null, res[517].exec("Zx{100}", 1135));
-assertEquals(null, res[517].exec("x{100}Z", 1136));
-assertEquals("**", res[517].exec("*** Failers "), 1137);
-assertEquals(null, res[517].exec("Zx{100}", 1138));
-assertEquals(null, res[517].exec("x{100}", 1139));
-assertEquals(null, res[517].exec("x{100}Z", 1140));
-assertEquals("**", res[517].exec("*** Failers "), 1141);
-assertEquals(null, res[517].exec("abcx{200}X", 1142));
-assertEquals(null, res[517].exec("abcx{100}X ", 1143));
-assertEquals("**", res[517].exec("*** Failers"), 1144);
-assertEquals("  ", res[517].exec("X  "), 1145);
-assertEquals(null, res[517].exec("abcx{200}X", 1146));
-assertEquals(null, res[517].exec("abcx{100}X ", 1147));
-assertEquals(null, res[517].exec("abQX ", 1148));
-assertEquals("**", res[517].exec("*** Failers"), 1149);
-assertEquals("  ", res[517].exec("X  "), 1150);
-assertEquals(null, res[517].exec("abcx{100}x{200}x{100}X", 1151));
-assertEquals("**", res[517].exec("*** Failers"), 1152);
-assertEquals(null, res[517].exec("abcx{200}X", 1153));
-assertEquals("  ", res[517].exec("X  "), 1154);
-assertEquals(null, res[517].exec("AX", 1155));
-assertEquals(null, res[517].exec("x{150}X", 1156));
-assertEquals(null, res[517].exec("x{500}X ", 1157));
-assertEquals("**", res[517].exec("*** Failers"), 1158);
-assertEquals(null, res[517].exec("x{100}X", 1159));
-assertEquals("  ", res[517].exec("x{200}X   "), 1160);
-assertEquals(null, res[517].exec("AX", 1161));
-assertEquals(null, res[517].exec("x{150}X", 1162));
-assertEquals(null, res[517].exec("x{500}X ", 1163));
-assertEquals("**", res[517].exec("*** Failers"), 1164);
-assertEquals(null, res[517].exec("x{100}X", 1165));
-assertEquals("  ", res[517].exec("x{200}X   "), 1166);
-assertEquals(null, res[517].exec("QX ", 1167));
-assertEquals(null, res[517].exec("AX", 1168));
-assertEquals(null, res[517].exec("x{500}X ", 1169));
-assertEquals("**", res[517].exec("*** Failers"), 1170);
-assertEquals(null, res[517].exec("x{100}X", 1171));
-assertEquals(null, res[517].exec("x{150}X", 1172));
-assertEquals("  ", res[517].exec("x{200}X   "), 1173);
-assertEquals(null, res[518].exec("aXb", 1174));
-assertEquals(null, res[518].exec("a\nb", 1175));
-assertEquals(null, res[519].exec("aXb", 1176));
-assertEquals(null, res[519].exec("a\nb", 1177));
-assertEquals(null, res[519].exec("*** Failers ", 1178));
-assertEquals(null, res[519].exec("ax{100}b ", 1179));
-assertEquals(null, res[519].exec("z", 1180));
-assertEquals(null, res[519].exec("Z ", 1181));
-assertEquals(null, res[519].exec("x{100}", 1182));
-assertEquals(null, res[519].exec("*** Failers", 1183));
-assertEquals(null, res[519].exec("x{102}", 1184));
-assertEquals(null, res[519].exec("y    ", 1185));
-assertEquals("\xff", res[520].exec(">\xff<"), 1186);
-assertEquals(null, res[521].exec(">x{ff}<", 1187));
-assertEquals("X", res[522].exec("XYZ"), 1188);
-assertEquals("X", res[523].exec("XYZ"), 1189);
-assertEquals("x", res[523].exec("x{123} "), 1190);
-assertEquals(",", res[528].exec("catac"), 1191);
-assertEquals(",", res[528].exec("ax{256}a "), 1192);
-assertEquals(",", res[528].exec("x{85}"), 1193);
-assertEquals(",", res[528].exec("\u1234 "), 1194);
-assertEquals(",", res[528].exec("\u1234 "), 1195);
-assertEquals(",", res[528].exec("abcdefg"), 1196);
-assertEquals(",", res[528].exec("ab"), 1197);
-assertEquals(",", res[528].exec("a "), 1198);
-assertEquals("Ax", res[529].exec("Ax{a3}BC"), 1199);
-assertEquals("Ax", res[530].exec("Ax{a3}BC"), 1200);
-assertEquals("}=", res[531].exec("+x{a3}== "), 1201);
-assertEquals("}=", res[532].exec("+x{a3}== "), 1202);
-assertEquals("x", res[533].exec("x{442}x{435}x{441}x{442}"), 1203);
-assertEquals("x", res[534].exec("x{442}x{435}x{441}x{442}"), 1204);
-assertEquals("x", res[535].exec("x{442}x{435}x{441}x{442}"), 1205);
-assertEquals("x", res[536].exec("x{442}x{435}x{441}x{442}"), 1206);
-assertEquals("{", res[537].exec("x{2442}x{2435}x{2441}x{2442}"), 1207);
-assertEquals("{", res[538].exec("x{2442}x{2435}x{2441}x{2442}"), 1208);
-assertEquals("abc\n\x0dx{442}x{435}x{441}x{442}xyz ", res[539].exec("abc\n\x0dx{442}x{435}x{441}x{442}xyz "), 1209);
-assertEquals("x{442}x{435}x{441}x{442}", res[539].exec("x{442}x{435}x{441}x{442}"), 1210);
-assertEquals("c d", res[540].exec("abc defx{442}x{443}xyz\npqr"), 1211);
-assertEquals("c d", res[541].exec("abc defx{442}x{443}xyz\npqr"), 1212);
-assertEquals(null, res[542].exec("+x{2442}", 1213));
-assertEquals(null, res[543].exec("+x{2442}", 1214));
-assertEquals(null, res[544].exec("Ax{442}", 1215));
-assertEquals(null, res[545].exec("Ax{442}", 1216));
-assertEquals(null, res[546].exec("Ax{442}", 1217));
-assertEquals(null, res[547].exec("Ax{442}", 1218));
-assertEquals(null, res[548].exec("\x19x{e01ff}", 1219));
-assertEquals(null, res[549].exec("Ax{422}", 1220));
-assertEquals(null, res[550].exec("x{19}x{e01ff}", 1221));
-assertEquals(null, res[551].exec("Ax{442}", 1222));
-assertEquals(null, res[552].exec("Ax{442}", 1223));
-assertEquals(null, res[553].exec("ax{442}", 1224));
-assertEquals(null, res[554].exec("+x{2442}", 1225));
-assertEquals(null, res[555].exec("Mx{442}", 1226));
-assertEquals("abc", res[556].exec("abc"), 1227);
-assertEquals("abc", res[557].exec("abc"), 1228);
-assertEquals("abc", res[558].exec("abc"), 1229);
-assertEquals("abc", res[559].exec("abc"), 1230);
-assertEquals(null, res[560].exec("x{100}ax{1234}bcd", 1231));
-assertEquals(null, res[562].exec("x{0041}x{2262}x{0391}x{002e}", 1232));
-assertEquals(null, res[562].exec("x{D55c}x{ad6d}x{C5B4} ", 1233));
-assertEquals(null, res[562].exec("x{65e5}x{672c}x{8a9e}", 1234));
-assertEquals("{861}X", res[563].exec("x{212ab}x{212ab}x{212ab}x{861}X"), 1235);
-assertEquals("x{2", res[564].exec("x{212ab}x{212ab}x{212ab}x{861}"), 1236);
-assertEquals("x{c", res[564].exec("x{c0}b"), 1237);
-assertEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1238);
-assertEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1239);
-assertEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1240);
-assertEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1241);
-assertEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1242);
-assertEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1243);
-assertEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1244);
-assertEquals("Sho", res[564].exec("Should produce an error diagnostic"), 1245);
-assertEquals(null, res[565].exec("Xx{1234}", 1246));
-assertEquals(null, res[565].exec("X\nabc ", 1247));
-assertEquals("b", res[566].exec("bar"), 1248);
-assertEquals(null, res[566].exec("*** Failers", 1249));
-assertEquals(null, res[566].exec("c", 1250));
-assertEquals(null, res[566].exec("x{ff}", 1251));
-assertEquals(null, res[566].exec("x{100}  ", 1252));
-assertEquals("c", res[567].exec("c"), 1253);
-assertEquals("x", res[567].exec("x{ff}"), 1254);
-assertEquals("x", res[567].exec("x{100}  "), 1255);
-assertEquals("*", res[567].exec("*** Failers "), 1256);
-assertEquals(null, res[567].exec("aaa", 1257));
-assertEquals("x", res[568].exec("x{f1}"), 1258);
-assertEquals("x", res[568].exec("x{bf}"), 1259);
-assertEquals("x", res[568].exec("x{100}"), 1260);
-assertEquals("x", res[568].exec("x{1000}   "), 1261);
-assertEquals("*", res[568].exec("*** Failers"), 1262);
-assertEquals("x", res[568].exec("x{c0} "), 1263);
-assertEquals("x", res[568].exec("x{f0} "), 1264);
-assertEquals("1", res[568].exec("1234"), 1265);
-assertEquals("\"", res[568].exec("\"1234\" "), 1266);
-assertEquals("x", res[568].exec("x{100}1234"), 1267);
-assertEquals("\"", res[568].exec("\"x{100}1234\"  "), 1268);
-assertEquals("x", res[568].exec("x{100}x{100}12ab "), 1269);
-assertEquals("x", res[568].exec("x{100}x{100}\"12\" "), 1270);
-assertEquals("*", res[568].exec("*** Failers "), 1271);
-assertEquals("x", res[568].exec("x{100}x{100}abcd"), 1272);
-assertEquals("A", res[568].exec("A"), 1273);
-assertEquals("x", res[568].exec("x{100}"), 1274);
-assertEquals("Z", res[568].exec("Zx{100}"), 1275);
-assertEquals("x", res[568].exec("x{100}Z"), 1276);
-assertEquals("*", res[568].exec("*** Failers "), 1277);
-assertEquals("Z", res[568].exec("Zx{100}"), 1278);
-assertEquals("x", res[568].exec("x{100}"), 1279);
-assertEquals("x", res[568].exec("x{100}Z"), 1280);
-assertEquals("*", res[568].exec("*** Failers "), 1281);
-assertEquals("x", res[568].exec("x{100}"), 1282);
-assertEquals("x", res[568].exec("x{104}"), 1283);
-assertEquals("*", res[568].exec("*** Failers"), 1284);
-assertEquals("x", res[568].exec("x{105}"), 1285);
-assertEquals("x", res[568].exec("x{ff}    "), 1286);
-assertEquals("x", res[568].exec("x{100}"), 1287);
-assertEquals("\u0100", res[568].exec("\u0100 "), 1288);
-assertEquals("\xff", res[569].exec(">\xff<"), 1289);
-assertEquals(null, res[570].exec(">x{ff}<", 1290));
-assertEquals("\xd6", res[572].exec("\xd6 # Matches without Study"), 1291);
-assertEquals("x", res[572].exec("x{d6}"), 1292);
-assertEquals("\xd6", res[572].exec("\xd6 <-- Same with Study"), 1293);
-assertEquals("x", res[572].exec("x{d6}"), 1294);
-assertEquals("\xd6", res[572].exec("\xd6 # Matches without Study"), 1295);
-assertEquals("x", res[572].exec("x{d6} "), 1296);
-assertEquals("\xd6", res[572].exec("\xd6 <-- Same with Study"), 1297);
-assertEquals("x", res[572].exec("x{d6} "), 1298);
-assertEquals("\ufffd", res[572].exec("\ufffd]"), 1299);
-assertEquals("\ufffd", res[572].exec("\ufffd"), 1300);
-assertEquals("\ufffd", res[572].exec("\ufffd\ufffd\ufffd"), 1301);
-assertEquals("\ufffd", res[572].exec("\ufffd\ufffd\ufffd?"), 1302);
-assertEquals(null, res[573].exec("\xc0\x80", 1303));
-assertEquals(null, res[573].exec("\xc1\x8f ", 1304));
-assertEquals(null, res[573].exec("\xe0\x9f\x80", 1305));
-assertEquals(null, res[573].exec("\xf0\x8f\x80\x80 ", 1306));
-assertEquals(null, res[573].exec("\xf8\x87\x80\x80\x80  ", 1307));
-assertEquals(null, res[573].exec("\xfc\x83\x80\x80\x80\x80", 1308));
-assertEquals(null, res[573].exec("\xfe\x80\x80\x80\x80\x80  ", 1309));
-assertEquals(null, res[573].exec("\xff\x80\x80\x80\x80\x80  ", 1310));
-assertEquals(null, res[573].exec("\xc3\x8f", 1311));
-assertEquals(null, res[573].exec("\xe0\xaf\x80", 1312));
-assertEquals(null, res[573].exec("\xe1\x80\x80", 1313));
-assertEquals(null, res[573].exec("\xf0\x9f\x80\x80 ", 1314));
-assertEquals(null, res[573].exec("\xf1\x8f\x80\x80 ", 1315));
-assertEquals(null, res[573].exec("\xf8\x88\x80\x80\x80  ", 1316));
-assertEquals(null, res[573].exec("\xf9\x87\x80\x80\x80  ", 1317));
-assertEquals(null, res[573].exec("\xfc\x84\x80\x80\x80\x80", 1318));
-assertEquals(null, res[573].exec("\xfd\x83\x80\x80\x80\x80", 1319));
-assertEquals(null, res[573].exec("?\xf8\x88\x80\x80\x80  ", 1320));
-assertEquals(null, res[573].exec("?\xf9\x87\x80\x80\x80  ", 1321));
-assertEquals(null, res[573].exec("?\xfc\x84\x80\x80\x80\x80", 1322));
-assertEquals(null, res[573].exec("?\xfd\x83\x80\x80\x80\x80", 1323));
-assertEquals(".", res[574].exec("A.B"), 1324);
-assertEquals("{", res[574].exec("Ax{100}B "), 1325);
-assertEquals("x", res[575].exec("x{100}X   "), 1326);
-assertEquals("a", res[575].exec("ax{1234}b"), 1327);
-assertEquals(null, res[577].exec("AxxB     ", 1328));
-assertEquals("abc1", res[578].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 x{0085}abc7 x{2028}abc8 x{2029}abc9 JUNK"), 1329);
-assertEquals("abc1", res[579].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6x{0085} abc7x{2028} abc8x{2029} abc9"), 1330);
-assertEquals(null, res[580].exec("a\nb", 1331));
-assertEquals(null, res[580].exec("a\x0db", 1332));
-assertEquals(null, res[580].exec("a\x0d\nb", 1333));
-assertEquals(null, res[580].exec("a\x0bb", 1334));
-assertEquals(null, res[580].exec("a\x0cb", 1335));
-assertEquals(null, res[580].exec("ax{85}b   ", 1336));
-assertEquals(null, res[580].exec("ax{2028}b ", 1337));
-assertEquals(null, res[580].exec("ax{2029}b ", 1338));
-assertEquals(null, res[580].exec("** Failers", 1339));
-assertEquals(null, res[580].exec("a\n\x0db    ", 1340));
-assertEquals("ab", res[581].exec("ab"), 1341);
-assertEquals(null, res[581].exec("a\nb", 1342));
-assertEquals(null, res[581].exec("a\x0db", 1343));
-assertEquals(null, res[581].exec("a\x0d\nb", 1344));
-assertEquals(null, res[581].exec("a\x0bb", 1345));
-assertEquals(null, res[581].exec("a\x0cx{2028}x{2029}b", 1346));
-assertEquals(null, res[581].exec("ax{85}b   ", 1347));
-assertEquals(null, res[581].exec("a\n\x0db    ", 1348));
-assertEquals(null, res[581].exec("a\n\x0dx{85}\x0cb ", 1349));
-assertEquals(null, res[582].exec("a\nb", 1350));
-assertEquals(null, res[582].exec("a\x0db", 1351));
-assertEquals(null, res[582].exec("a\x0d\nb", 1352));
-assertEquals(null, res[582].exec("a\x0bb", 1353));
-assertEquals(null, res[582].exec("a\x0cx{2028}x{2029}b", 1354));
-assertEquals(null, res[582].exec("ax{85}b   ", 1355));
-assertEquals(null, res[582].exec("a\n\x0db    ", 1356));
-assertEquals(null, res[582].exec("a\n\x0dx{85}\x0cb ", 1357));
-assertEquals(null, res[582].exec("** Failers", 1358));
-assertEquals(null, res[582].exec("ab  ", 1359));
-assertEquals(null, res[583].exec("a\nb", 1360));
-assertEquals(null, res[583].exec("a\n\x0db", 1361));
-assertEquals(null, res[583].exec("a\n\x0dx{85}b", 1362));
-assertEquals(null, res[583].exec("a\x0d\n\x0d\nb ", 1363));
-assertEquals(null, res[583].exec("a\x0d\n\x0d\n\x0d\nb ", 1364));
-assertEquals(null, res[583].exec("a\n\x0d\n\x0db", 1365));
-assertEquals(null, res[583].exec("a\n\n\x0d\nb ", 1366));
-assertEquals(null, res[583].exec("** Failers", 1367));
-assertEquals(null, res[583].exec("a\n\n\n\x0db", 1368));
-assertEquals(null, res[583].exec("a\x0d", 1369));
-assertEquals(null, res[584].exec("X X\n", 1370));
-assertEquals(null, res[584].exec("X\x09X\x0b", 1371));
-assertEquals(null, res[584].exec("** Failers", 1372));
-assertEquals(null, res[584].exec("x{a0} X\n   ", 1373));
-assertEquals(null, res[585].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 1374));
-assertEquals(null, res[585].exec("\x09 x{a0}\n\x0b\x0c\x0d\n", 1375));
-assertEquals(null, res[585].exec("\x09 x{a0}\n\x0b\x0c", 1376));
-assertEquals(null, res[585].exec("** Failers ", 1377));
-assertEquals(null, res[585].exec("\x09 x{a0}\n\x0b", 1378));
-assertEquals(null, res[585].exec(" ", 1379));
-assertEquals(null, res[586].exec("x{3001}x{3000}x{2030}x{2028}", 1380));
-assertEquals(null, res[586].exec("Xx{180e}Xx{85}", 1381));
-assertEquals(null, res[586].exec("** Failers", 1382));
-assertEquals(null, res[586].exec("x{2009} X\n   ", 1383));
-assertEquals(null, res[587].exec("x{1680}x{180e}x{2007}Xx{2028}x{2029}\x0c\x0d\n", 1384));
-assertEquals(null, res[587].exec("\x09x{205f}x{a0}\nx{2029}\x0cx{2028}\n", 1385));
-assertEquals(null, res[587].exec("\x09 x{202f}\n\x0b\x0c", 1386));
-assertEquals(null, res[587].exec("** Failers ", 1387));
-assertEquals(null, res[587].exec("\x09x{200a}x{a0}x{2028}\x0b", 1388));
-assertEquals(null, res[587].exec(" ", 1389));
-assertEquals(null, res[588].exec(">x{1680}", 1390));
-assertEquals(null, res[589].exec(">x{1680}x{180e}x{2000}x{2003}x{200a}x{202f}x{205f}x{3000}<", 1391));
-assertEquals("x{1ec5} ", res[593].exec("x{1ec5} "), 1392);
-assertEquals(null, res[594].exec("x{0}x{d7ff}x{e000}x{10ffff}", 1393));
-assertEquals(null, res[594].exec("x{d800}", 1394));
-assertEquals(null, res[594].exec("x{d800}?", 1395));
-assertEquals(null, res[594].exec("x{da00}", 1396));
-assertEquals(null, res[594].exec("x{da00}?", 1397));
-assertEquals(null, res[594].exec("x{dfff}", 1398));
-assertEquals(null, res[594].exec("x{dfff}?", 1399));
-assertEquals(null, res[594].exec("x{110000}    ", 1400));
-assertEquals(null, res[594].exec("x{110000}?    ", 1401));
-assertEquals(null, res[594].exec("x{2000000} ", 1402));
-assertEquals(null, res[594].exec("x{2000000}? ", 1403));
-assertEquals(null, res[594].exec("x{7fffffff} ", 1404));
-assertEquals(null, res[594].exec("x{7fffffff}? ", 1405));
-assertEquals(null, res[595].exec("a\x0db", 1406));
-assertEquals(null, res[595].exec("a\nb", 1407));
-assertEquals(null, res[595].exec("a\x0d\nb", 1408));
-assertEquals(null, res[595].exec("** Failers", 1409));
-assertEquals(null, res[595].exec("ax{85}b", 1410));
-assertEquals(null, res[595].exec("a\x0bb     ", 1411));
-assertEquals(null, res[596].exec("a\x0db", 1412));
-assertEquals(null, res[596].exec("a\nb", 1413));
-assertEquals(null, res[596].exec("a\x0d\nb", 1414));
-assertEquals(null, res[596].exec("ax{85}b", 1415));
-assertEquals(null, res[596].exec("a\x0bb     ", 1416));
-assertEquals(null, res[596].exec("** Failers ", 1417));
-assertEquals(null, res[596].exec("ax{85}b<bsr_anycrlf>", 1418));
-assertEquals(null, res[596].exec("a\x0bb<bsr_anycrlf>", 1419));
-assertEquals(null, res[597].exec("a\x0db", 1420));
-assertEquals(null, res[597].exec("a\nb", 1421));
-assertEquals(null, res[597].exec("a\x0d\nb", 1422));
-assertEquals(null, res[597].exec("** Failers", 1423));
-assertEquals(null, res[597].exec("ax{85}b", 1424));
-assertEquals(null, res[597].exec("a\x0bb     ", 1425));
-assertEquals(null, res[598].exec("a\x0db", 1426));
-assertEquals(null, res[598].exec("a\nb", 1427));
-assertEquals(null, res[598].exec("a\x0d\nb", 1428));
-assertEquals(null, res[598].exec("ax{85}b", 1429));
-assertEquals(null, res[598].exec("a\x0bb     ", 1430));
-assertEquals(null, res[598].exec("** Failers ", 1431));
-assertEquals(null, res[598].exec("ax{85}b<bsr_anycrlf>", 1432));
-assertEquals(null, res[598].exec("a\x0bb<bsr_anycrlf>", 1433));
-assertEquals("QQQx{2029}ABCaXYZ=!bPQR", res[599].exec("QQQx{2029}ABCaXYZ=!bPQR"), 1434);
-assertEquals(null, res[599].exec("** Failers", 1435));
-assertEquals(null, res[599].exec("ax{2029}b", 1436));
-assertEquals(null, res[599].exec("a\xe2\x80\xa9b ", 1437));
-assertEquals(null, res[600].exec("ax{1234}b", 1438));
-assertEquals("a\nb", res[600].exec("a\nb "), 1439);
-assertEquals(null, res[600].exec("** Failers", 1440));
-assertEquals(null, res[600].exec("ab  ", 1441));
-assertEquals("aXb", res[601].exec("aXb"), 1442);
-assertEquals("a\nX\nXx{1234}b", res[601].exec("a\nX\nXx{1234}b "), 1443);
-assertEquals(null, res[601].exec("** Failers", 1444));
-assertEquals(null, res[601].exec("ab  ", 1445));
-assertEquals(null, res[601].exec("x{de}x{de}", 1446));
-assertEquals(null, res[601].exec("x{123} ", 1447));
-assertEquals("X", res[602].exec("Ax{1ec5}ABCXYZ"), 1448);
-assertEquals(null, res[604].exec("x{c0}x{30f}x{660}x{66c}x{f01}x{1680}<", 1449));
-assertEquals(null, res[604].exec("\npx{300}9!$ < ", 1450));
-assertEquals(null, res[604].exec("** Failers ", 1451));
-assertEquals(null, res[604].exec("apx{300}9!$ < ", 1452));
-assertEquals(null, res[605].exec("X", 1453));
-assertEquals(null, res[605].exec("** Failers ", 1454));
-assertEquals(null, res[605].exec("", 1455));
-assertEquals(null, res[606].exec("9", 1456));
-assertEquals(null, res[606].exec("** Failers ", 1457));
-assertEquals(null, res[606].exec("x{c0}", 1458));
-assertEquals(null, res[607].exec("X", 1459));
-assertEquals(null, res[607].exec("** Failers ", 1460));
-assertEquals(null, res[607].exec("x{30f}", 1461));
-assertEquals(null, res[608].exec("X", 1462));
-assertEquals(null, res[608].exec("** Failers ", 1463));
-assertEquals(null, res[608].exec("x{660}", 1464));
-assertEquals(null, res[609].exec("X", 1465));
-assertEquals(null, res[609].exec("** Failers ", 1466));
-assertEquals(null, res[609].exec("x{66c}", 1467));
-assertEquals(null, res[610].exec("X", 1468));
-assertEquals(null, res[610].exec("** Failers ", 1469));
-assertEquals(null, res[610].exec("x{f01}", 1470));
-assertEquals(null, res[611].exec("X", 1471));
-assertEquals(null, res[611].exec("** Failers ", 1472));
-assertEquals(null, res[611].exec("x{1680}", 1473));
-assertEquals(null, res[612].exec("x{017}", 1474));
-assertEquals(null, res[612].exec("x{09f} ", 1475));
-assertEquals(null, res[612].exec("** Failers", 1476));
-assertEquals(null, res[612].exec("x{0600} ", 1477));
-assertEquals(null, res[613].exec("x{601}", 1478));
-assertEquals(null, res[613].exec("** Failers", 1479));
-assertEquals(null, res[613].exec("x{09f} ", 1480));
-assertEquals(null, res[614].exec("x{e0000}", 1481));
-assertEquals(null, res[614].exec("** Failers", 1482));
-assertEquals(null, res[614].exec("x{09f} ", 1483));
-assertEquals(null, res[615].exec("x{f8ff}", 1484));
-assertEquals(null, res[615].exec("** Failers", 1485));
-assertEquals(null, res[615].exec("x{09f} ", 1486));
-assertEquals(null, res[616].exec("?x{dfff}", 1487));
-assertEquals(null, res[616].exec("** Failers", 1488));
-assertEquals(null, res[616].exec("x{09f} ", 1489));
-assertEquals(null, res[617].exec("a", 1490));
-assertEquals(null, res[617].exec("** Failers ", 1491));
-assertEquals(null, res[617].exec("Z", 1492));
-assertEquals(null, res[617].exec("x{e000}  ", 1493));
-assertEquals(null, res[618].exec("x{2b0}", 1494));
-assertEquals(null, res[618].exec("** Failers", 1495));
-assertEquals(null, res[618].exec("a ", 1496));
-assertEquals(null, res[619].exec("x{1bb}", 1497));
-assertEquals(null, res[619].exec("x{3400}", 1498));
-assertEquals(null, res[619].exec("x{3401}", 1499));
-assertEquals(null, res[619].exec("x{4d00}", 1500));
-assertEquals(null, res[619].exec("x{4db4}", 1501));
-assertEquals(null, res[619].exec("x{4db5}     ", 1502));
-assertEquals(null, res[619].exec("** Failers", 1503));
-assertEquals(null, res[619].exec("a ", 1504));
-assertEquals(null, res[619].exec("x{2b0}", 1505));
-assertEquals(null, res[619].exec("x{4db6} ", 1506));
-assertEquals(null, res[620].exec("x{1c5}", 1507));
-assertEquals(null, res[620].exec("** Failers", 1508));
-assertEquals(null, res[620].exec("a ", 1509));
-assertEquals(null, res[620].exec("x{2b0}", 1510));
-assertEquals(null, res[621].exec("A", 1511));
-assertEquals(null, res[621].exec("** Failers", 1512));
-assertEquals(null, res[621].exec("x{2b0}", 1513));
-assertEquals(null, res[622].exec("x{903}", 1514));
-assertEquals(null, res[622].exec("** Failers", 1515));
-assertEquals(null, res[622].exec("X", 1516));
-assertEquals(null, res[622].exec("x{300}", 1517));
-assertEquals(null, res[622].exec("   ", 1518));
-assertEquals(null, res[623].exec("x{488}", 1519));
-assertEquals(null, res[623].exec("** Failers", 1520));
-assertEquals(null, res[623].exec("X", 1521));
-assertEquals(null, res[623].exec("x{903}", 1522));
-assertEquals(null, res[623].exec("x{300}", 1523));
-assertEquals(null, res[624].exec("x{300}", 1524));
-assertEquals(null, res[624].exec("** Failers", 1525));
-assertEquals(null, res[624].exec("X", 1526));
-assertEquals(null, res[624].exec("x{903}", 1527));
-assertEquals(null, res[624].exec("0123456789x{660}x{661}x{662}x{663}x{664}x{665}x{666}x{667}x{668}x{669}x{66a}", 1528));
-assertEquals(null, res[624].exec("x{6f0}x{6f1}x{6f2}x{6f3}x{6f4}x{6f5}x{6f6}x{6f7}x{6f8}x{6f9}x{6fa}", 1529));
-assertEquals(null, res[624].exec("x{966}x{967}x{968}x{969}x{96a}x{96b}x{96c}x{96d}x{96e}x{96f}x{970}", 1530));
-assertEquals(null, res[624].exec("** Failers", 1531));
-assertEquals(null, res[624].exec("X", 1532));
-assertEquals(null, res[625].exec("x{16ee}", 1533));
-assertEquals(null, res[625].exec("** Failers", 1534));
-assertEquals(null, res[625].exec("X", 1535));
-assertEquals(null, res[625].exec("x{966}", 1536));
-assertEquals(null, res[626].exec("x{b2}", 1537));
-assertEquals(null, res[626].exec("x{b3}", 1538));
-assertEquals(null, res[626].exec("** Failers", 1539));
-assertEquals(null, res[626].exec("X", 1540));
-assertEquals(null, res[626].exec("x{16ee}", 1541));
-assertEquals(null, res[627].exec("_", 1542));
-assertEquals(null, res[627].exec("x{203f}", 1543));
-assertEquals(null, res[627].exec("** Failers", 1544));
-assertEquals(null, res[627].exec("X", 1545));
-assertEquals(null, res[627].exec("-", 1546));
-assertEquals(null, res[627].exec("x{58a}", 1547));
-assertEquals(null, res[628].exec("-", 1548));
-assertEquals(null, res[628].exec("x{58a}", 1549));
-assertEquals(null, res[628].exec("** Failers", 1550));
-assertEquals(null, res[628].exec("X", 1551));
-assertEquals(null, res[628].exec("x{203f}", 1552));
-assertEquals(null, res[629].exec(")", 1553));
-assertEquals(null, res[629].exec("]", 1554));
-assertEquals(null, res[629].exec("}", 1555));
-assertEquals(null, res[629].exec("x{f3b}", 1556));
-assertEquals(null, res[629].exec("** Failers", 1557));
-assertEquals(null, res[629].exec("X", 1558));
-assertEquals(null, res[629].exec("x{203f}", 1559));
-assertEquals(null, res[629].exec("(", 1560));
-assertEquals(null, res[629].exec("[", 1561));
-assertEquals(null, res[629].exec("{", 1562));
-assertEquals(null, res[629].exec("x{f3c}", 1563));
-assertEquals(null, res[630].exec("x{bb}", 1564));
-assertEquals(null, res[630].exec("x{2019}", 1565));
-assertEquals(null, res[630].exec("** Failers", 1566));
-assertEquals(null, res[630].exec("X", 1567));
-assertEquals(null, res[630].exec("x{203f}", 1568));
-assertEquals(null, res[631].exec("x{ab}", 1569));
-assertEquals(null, res[631].exec("x{2018}", 1570));
-assertEquals(null, res[631].exec("** Failers", 1571));
-assertEquals(null, res[631].exec("X", 1572));
-assertEquals(null, res[631].exec("x{203f}", 1573));
-assertEquals(null, res[632].exec("!", 1574));
-assertEquals(null, res[632].exec("x{37e}", 1575));
-assertEquals(null, res[632].exec("** Failers", 1576));
-assertEquals(null, res[632].exec("X", 1577));
-assertEquals(null, res[632].exec("x{203f}", 1578));
-assertEquals(null, res[633].exec("(", 1579));
-assertEquals(null, res[633].exec("[", 1580));
-assertEquals(null, res[633].exec("{", 1581));
-assertEquals(null, res[633].exec("x{f3c}", 1582));
-assertEquals(null, res[633].exec("** Failers", 1583));
-assertEquals(null, res[633].exec("X", 1584));
-assertEquals(null, res[633].exec(")", 1585));
-assertEquals(null, res[633].exec("]", 1586));
-assertEquals(null, res[633].exec("}", 1587));
-assertEquals(null, res[633].exec("x{f3b}", 1588));
-assertEquals(null, res[633].exec("$x{a2}x{a3}x{a4}x{a5}x{a6}", 1589));
-assertEquals(null, res[633].exec("x{9f2}", 1590));
-assertEquals(null, res[633].exec("** Failers", 1591));
-assertEquals(null, res[633].exec("X", 1592));
-assertEquals(null, res[633].exec("x{2c2}", 1593));
-assertEquals(null, res[634].exec("x{2c2}", 1594));
-assertEquals(null, res[634].exec("** Failers", 1595));
-assertEquals(null, res[634].exec("X", 1596));
-assertEquals(null, res[634].exec("x{9f2}", 1597));
-assertEquals(null, res[634].exec("+<|~x{ac}x{2044}", 1598));
-assertEquals(null, res[634].exec("** Failers", 1599));
-assertEquals(null, res[634].exec("X", 1600));
-assertEquals(null, res[634].exec("x{9f2}", 1601));
-assertEquals(null, res[635].exec("x{a6}", 1602));
-assertEquals(null, res[635].exec("x{482} ", 1603));
-assertEquals(null, res[635].exec("** Failers", 1604));
-assertEquals(null, res[635].exec("X", 1605));
-assertEquals(null, res[635].exec("x{9f2}", 1606));
-assertEquals(null, res[636].exec("x{2028}", 1607));
-assertEquals(null, res[636].exec("** Failers", 1608));
-assertEquals(null, res[636].exec("X", 1609));
-assertEquals(null, res[636].exec("x{2029}", 1610));
-assertEquals(null, res[637].exec("x{2029}", 1611));
-assertEquals(null, res[637].exec("** Failers", 1612));
-assertEquals(null, res[637].exec("X", 1613));
-assertEquals(null, res[637].exec("x{2028}", 1614));
-assertEquals(null, res[638].exec("\\ \\", 1615));
-assertEquals(null, res[638].exec("x{a0}", 1616));
-assertEquals(null, res[638].exec("x{1680}", 1617));
-assertEquals(null, res[638].exec("x{180e}", 1618));
-assertEquals(null, res[638].exec("x{2000}", 1619));
-assertEquals(null, res[638].exec("x{2001}     ", 1620));
-assertEquals(null, res[638].exec("** Failers", 1621));
-assertEquals(null, res[638].exec("x{2028}", 1622));
-assertEquals(null, res[638].exec("x{200d} ", 1623));
-assertEquals(null, res[638].exec("  x{660}x{661}x{662}ABC", 1624));
-assertEquals(null, res[638].exec("  x{660}x{661}x{662}ABC", 1625));
-assertEquals(null, res[639].exec("  x{660}x{661}x{662}ABC", 1626));
-assertEquals(null, res[640].exec("  x{660}x{661}x{662}ABC", 1627));
-assertEquals(null, res[641].exec("  x{660}x{661}x{662}ABC", 1628));
-assertEquals(null, res[642].exec("  x{660}x{661}x{662}ABC", 1629));
-assertEquals(null, res[643].exec("  x{660}x{661}x{662}ABC", 1630));
-assertEquals(null, res[644].exec("  x{660}x{661}x{662}ABC", 1631));
-assertEquals(null, res[645].exec("  x{660}x{661}x{662}ABC", 1632));
-assertEquals(null, res[646].exec("  x{660}x{661}x{662}ABC", 1633));
-assertEquals(null, res[647].exec("  x{660}x{661}x{662}ABC", 1634));
-assertEquals(null, res[647].exec("  x{660}x{661}x{662}ABC", 1635));
-assertEquals(null, res[647].exec("  x{660}x{661}x{662}ABC", 1636));
-assertEquals(null, res[647].exec("  ** Failers", 1637));
-assertEquals(null, res[647].exec("  x{660}x{661}x{662}ABC", 1638));
-assertEquals(null, res[648].exec("A", 1639));
-assertEquals(null, res[648].exec("ax{10a0}B ", 1640));
-assertEquals(null, res[648].exec("** Failers ", 1641));
-assertEquals(null, res[648].exec("a", 1642));
-assertEquals(null, res[648].exec("x{1d00}  ", 1643));
-assertEquals(null, res[649].exec("1234", 1644));
-assertEquals(null, res[649].exec("** Failers", 1645));
-assertEquals(null, res[649].exec("ABC ", 1646));
-assertEquals(null, res[650].exec("1234", 1647));
-assertEquals(null, res[650].exec("** Failers", 1648));
-assertEquals(null, res[650].exec("ABC ", 1649));
-assertEquals(null, res[650].exec("A2XYZ", 1650));
-assertEquals(null, res[650].exec("123A5XYZPQR", 1651));
-assertEquals(null, res[650].exec("ABAx{660}XYZpqr", 1652));
-assertEquals(null, res[650].exec("** Failers", 1653));
-assertEquals(null, res[650].exec("AXYZ", 1654));
-assertEquals(null, res[650].exec("XYZ     ", 1655));
-assertEquals(null, res[650].exec("1XYZ", 1656));
-assertEquals(null, res[650].exec("AB=XYZ.. ", 1657));
-assertEquals(null, res[650].exec("XYZ ", 1658));
-assertEquals(null, res[650].exec("** Failers", 1659));
-assertEquals(null, res[650].exec("WXYZ ", 1660));
-assertEquals(null, res[655].exec("1234", 1661));
-assertEquals(null, res[655].exec("1234", 1662));
-assertEquals(null, res[655].exec("12-34", 1663));
-assertEquals("{", res[655].exec("12+x{661}-34  "), 1664);
-assertEquals(null, res[655].exec("** Failers", 1665));
-assertEquals("d", res[655].exec("abcd  "), 1666);
-assertEquals("d", res[656].exec("abcd"), 1667);
-assertEquals(null, res[656].exec("** Failers", 1668));
-assertEquals(null, res[656].exec("1234", 1669));
-assertEquals(null, res[657].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1670));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[657].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1671);
-assertEquals(" ", res[657].exec(" "), 1672);
-assertEquals(null, res[657].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1673));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[657].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1674);
-assertEquals(null, res[658].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1675));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[658].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1676);
-assertEquals(null, res[659].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1677));
-assertEquals(null, res[659].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 1678));
-assertEquals(null, res[660].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1679));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[660].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1680);
-assertEquals(null, res[661].exec("a", 1681));
-assertEquals(null, res[661].exec("A ", 1682));
-assertEquals(null, res[662].exec("a", 1683));
-assertEquals(null, res[662].exec("A ", 1684));
-assertEquals(null, res[663].exec("A", 1685));
-assertEquals(null, res[663].exec("aZ", 1686));
-assertEquals(null, res[663].exec("** Failers", 1687));
-assertEquals(null, res[663].exec("abc   ", 1688));
-assertEquals(null, res[664].exec("A", 1689));
-assertEquals(null, res[664].exec("aZ", 1690));
-assertEquals(null, res[664].exec("** Failers", 1691));
-assertEquals(null, res[664].exec("abc   ", 1692));
-assertEquals(null, res[665].exec("a", 1693));
-assertEquals(null, res[665].exec("Az", 1694));
-assertEquals(null, res[665].exec("** Failers", 1695));
-assertEquals(null, res[665].exec("ABC   ", 1696));
-assertEquals(null, res[666].exec("a", 1697));
-assertEquals(null, res[666].exec("Az", 1698));
-assertEquals(null, res[666].exec("** Failers", 1699));
-assertEquals(null, res[666].exec("ABC   ", 1700));
-assertEquals(null, res[666].exec("x{c0}", 1701));
-assertEquals(null, res[666].exec("x{e0} ", 1702));
-assertEquals(null, res[666].exec("x{c0}", 1703));
-assertEquals(null, res[666].exec("x{e0} ", 1704));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 1705));
-assertEquals(null, res[666].exec("** Failers", 1706));
-assertEquals(null, res[666].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 1707));
-assertEquals(null, res[666].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 1708));
-assertEquals(null, res[666].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 1709));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 1710));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 1711));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 1712));
-assertEquals(null, res[666].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 1713));
-assertEquals(null, res[666].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 1714));
-assertEquals(null, res[666].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 1715));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 1716));
-assertEquals(null, res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 1717));
-assertEquals(null, res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}", 1718));
-assertEquals(null, res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 1719));
-assertEquals(null, res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 1720));
-assertEquals(null, res[666].exec("x{391}", 1721));
-assertEquals(null, res[666].exec("x{ff3a}", 1722));
-assertEquals(null, res[666].exec("x{3b1}", 1723));
-assertEquals(null, res[666].exec("x{ff5a}   ", 1724));
-assertEquals(null, res[666].exec("x{c0}", 1725));
-assertEquals(null, res[666].exec("x{e0} ", 1726));
-assertEquals(null, res[666].exec("x{104}", 1727));
-assertEquals(null, res[666].exec("x{105}", 1728));
-assertEquals(null, res[666].exec("x{109}  ", 1729));
-assertEquals(null, res[666].exec("** Failers", 1730));
-assertEquals(null, res[666].exec("x{100}", 1731));
-assertEquals(null, res[666].exec("x{10a} ", 1732));
-assertEquals(null, res[666].exec("Z", 1733));
-assertEquals(null, res[666].exec("z", 1734));
-assertEquals(null, res[666].exec("x{39c}", 1735));
-assertEquals(null, res[666].exec("x{178}", 1736));
-assertEquals(null, res[666].exec("|", 1737));
-assertEquals(null, res[666].exec("x{80}", 1738));
-assertEquals(null, res[666].exec("x{ff}", 1739));
-assertEquals(null, res[666].exec("x{100}", 1740));
-assertEquals(null, res[666].exec("x{101} ", 1741));
-assertEquals(null, res[666].exec("** Failers", 1742));
-assertEquals(null, res[666].exec("x{102}", 1743));
-assertEquals(null, res[666].exec("Y", 1744));
-assertEquals(null, res[666].exec("y           ", 1745));
-assertEquals(null, res[667].exec("A", 1746));
-assertEquals(null, res[667].exec("Ax{300}BC ", 1747));
-assertEquals(null, res[667].exec("Ax{300}x{301}x{302}BC ", 1748));
-assertEquals(null, res[667].exec("*** Failers", 1749));
-assertEquals(null, res[667].exec("x{300}  ", 1750));
-assertEquals("X", res[668].exec("X123"), 1751);
-assertEquals(null, res[668].exec("*** Failers", 1752));
-assertEquals(null, res[668].exec("AXYZ", 1753));
-assertEquals(null, res[669].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 1754));
-assertEquals(null, res[669].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 1755));
-assertEquals(null, res[670].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 1756));
-assertEquals(null, res[670].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 1757));
-assertEquals("A,,A", res[671].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 1758);
-assertEquals("A,,A", res[671].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 1759);
-assertEquals("A,,A", res[672].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 1760);
-assertEquals("A,,A", res[672].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 1761);
-assertEquals(null, res[673].exec("*** Failers", 1762));
-assertEquals(null, res[673].exec("Ax{300}x{301}x{302}", 1763));
-assertEquals(null, res[674].exec("Ax{300}x{301}Bx{300}X", 1764));
-assertEquals(null, res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 1765));
-assertEquals(null, res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 1766));
-assertEquals(null, res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 1767));
-assertEquals(null, res[675].exec("Ax{300}x{301}Bx{300}X", 1768));
-assertEquals(null, res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 1769));
-assertEquals(null, res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 1770));
-assertEquals(null, res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 1771));
-assertEquals(null, res[675].exec("x{2e81}x{3007}x{2f804}x{31a0}", 1772));
-assertEquals(null, res[675].exec("** Failers", 1773));
-assertEquals(null, res[675].exec("x{2e7f}  ", 1774));
-assertEquals(null, res[675].exec("x{3105}", 1775));
-assertEquals(null, res[675].exec("** Failers", 1776));
-assertEquals(null, res[675].exec("x{30ff}  ", 1777));
-assertEquals(null, res[676].exec("x{06e9}", 1778));
-assertEquals(null, res[676].exec("x{060b}", 1779));
-assertEquals(null, res[676].exec("** Failers", 1780));
-assertEquals(null, res[676].exec("Xx{06e9}   ", 1781));
-assertEquals(null, res[677].exec("x{2f800}", 1782));
-assertEquals(null, res[677].exec("** Failers", 1783));
-assertEquals(null, res[677].exec("x{a014}", 1784));
-assertEquals(null, res[677].exec("x{a4c6}   ", 1785));
-assertEquals(null, res[678].exec("AXYZ", 1786));
-assertEquals(null, res[678].exec("x{1234}XYZ ", 1787));
-assertEquals(null, res[678].exec("** Failers", 1788));
-assertEquals(null, res[678].exec("X  ", 1789));
-assertEquals(null, res[679].exec("** Failers", 1790));
-assertEquals(null, res[679].exec("AX", 1791));
-assertEquals(null, res[680].exec("XYZ", 1792));
-assertEquals(null, res[680].exec("AXYZ", 1793));
-assertEquals(null, res[680].exec("x{1234}XYZ ", 1794));
-assertEquals(null, res[680].exec("** Failers", 1795));
-assertEquals(null, res[680].exec("ABXYZ   ", 1796));
-assertEquals(null, res[681].exec("XYZ", 1797));
-assertEquals(null, res[681].exec("** Failers", 1798));
-assertEquals(null, res[681].exec("AXYZ", 1799));
-assertEquals(null, res[681].exec("x{1234}XYZ ", 1800));
-assertEquals(null, res[681].exec("ABXYZ   ", 1801));
-assertEquals(null, res[681].exec("AXYZ", 1802));
-assertEquals(null, res[681].exec("x{1234}XYZ", 1803));
-assertEquals(null, res[681].exec("Ax{1234}XYZ", 1804));
-assertEquals(null, res[681].exec("** Failers", 1805));
-assertEquals(null, res[681].exec("XYZ", 1806));
-assertEquals(null, res[681].exec("** Failers", 1807));
-assertEquals(null, res[681].exec("AXYZ", 1808));
-assertEquals(null, res[681].exec("x{1234}XYZ", 1809));
-assertEquals(null, res[681].exec("Ax{1234}XYZ", 1810));
-assertEquals(null, res[681].exec("XYZ", 1811));
-assertEquals(null, res[682].exec("XYZ", 1812));
-assertEquals(null, res[682].exec("AXYZ", 1813));
-assertEquals(null, res[682].exec("x{1234}XYZ", 1814));
-assertEquals(null, res[682].exec("Ax{1234}XYZ", 1815));
-assertEquals(null, res[682].exec("** Failers", 1816));
-assertEquals(null, res[683].exec("XYZ", 1817));
-assertEquals(null, res[683].exec("** Failers", 1818));
-assertEquals(null, res[683].exec("AXYZ", 1819));
-assertEquals(null, res[683].exec("x{1234}XYZ", 1820));
-assertEquals(null, res[683].exec("Ax{1234}XYZ", 1821));
-assertEquals("AX", res[684].exec("AXYZ"), 1822);
-assertEquals(null, res[684].exec("x{1234}XYZ ", 1823));
-assertEquals(null, res[684].exec("** Failers", 1824));
-assertEquals(null, res[684].exec("X  ", 1825));
-assertEquals(null, res[685].exec("** Failers", 1826));
-assertEquals("AX", res[685].exec("AX"), 1827);
-assertEquals("X", res[686].exec("XYZ"), 1828);
-assertEquals("AX", res[686].exec("AXYZ"), 1829);
-assertEquals(null, res[686].exec("x{1234}XYZ ", 1830));
-assertEquals(null, res[686].exec("** Failers", 1831));
-assertEquals(null, res[686].exec("ABXYZ   ", 1832));
-assertEquals("X", res[687].exec("XYZ"), 1833);
-assertEquals(null, res[687].exec("** Failers", 1834));
-assertEquals("AX", res[687].exec("AXYZ"), 1835);
-assertEquals(null, res[687].exec("x{1234}XYZ ", 1836));
-assertEquals(null, res[687].exec("ABXYZ   ", 1837));
-assertEquals("AX", res[688].exec("AXYZ"), 1838);
-assertEquals(null, res[688].exec("x{1234}XYZ", 1839));
-assertEquals(null, res[688].exec("Ax{1234}XYZ", 1840));
-assertEquals(null, res[688].exec("** Failers", 1841));
-assertEquals(null, res[688].exec("XYZ", 1842));
-assertEquals(null, res[689].exec("** Failers", 1843));
-assertEquals("AX", res[689].exec("AXYZ"), 1844);
-assertEquals(null, res[689].exec("x{1234}XYZ", 1845));
-assertEquals(null, res[689].exec("Ax{1234}XYZ", 1846));
-assertEquals(null, res[689].exec("XYZ", 1847));
-assertEquals("X", res[690].exec("XYZ"), 1848);
-assertEquals("AX", res[690].exec("AXYZ"), 1849);
-assertEquals(null, res[690].exec("x{1234}XYZ", 1850));
-assertEquals(null, res[690].exec("Ax{1234}XYZ", 1851));
-assertEquals(null, res[690].exec("** Failers", 1852));
-assertEquals("X", res[691].exec("XYZ"), 1853);
-assertEquals(null, res[691].exec("** Failers", 1854));
-assertEquals("AX", res[691].exec("AXYZ"), 1855);
-assertEquals(null, res[691].exec("x{1234}XYZ", 1856));
-assertEquals(null, res[691].exec("Ax{1234}XYZ", 1857));
-assertEquals(null, res[692].exec("abcdefgh", 1858));
-assertEquals(null, res[692].exec("x{1234}\n\x0dx{3456}xyz ", 1859));
-assertEquals(null, res[693].exec("abcdefgh", 1860));
-assertEquals(null, res[693].exec("x{1234}\n\x0dx{3456}xyz ", 1861));
-assertEquals(null, res[694].exec("** Failers", 1862));
-assertEquals(null, res[694].exec("abcdefgh", 1863));
-assertEquals(null, res[694].exec("x{1234}\n\x0dx{3456}xyz ", 1864));
-assertEquals(null, res[695].exec(" AXY", 1865));
-assertEquals(null, res[695].exec(" aXY", 1866));
-assertEquals(null, res[695].exec(" x{1c5}XY", 1867));
-assertEquals(null, res[695].exec(" ** Failers", 1868));
-assertEquals(null, res[695].exec(" x{1bb}XY", 1869));
-assertEquals(null, res[695].exec(" x{2b0}XY", 1870));
-assertEquals(null, res[695].exec(" !XY      ", 1871));
-assertEquals(null, res[696].exec(" AXY", 1872));
-assertEquals(null, res[696].exec(" aXY", 1873));
-assertEquals(null, res[696].exec(" x{1c5}XY", 1874));
-assertEquals(null, res[696].exec(" ** Failers", 1875));
-assertEquals(null, res[696].exec(" x{1bb}XY", 1876));
-assertEquals(null, res[696].exec(" x{2b0}XY", 1877));
-assertEquals(null, res[696].exec(" !XY      ", 1878));
-assertEquals(null, res[696].exec(" AXY", 1879));
-assertEquals(null, res[696].exec(" aXY", 1880));
-assertEquals(null, res[696].exec(" AbcdeXyz ", 1881));
-assertEquals(null, res[696].exec(" x{1c5}AbXY", 1882));
-assertEquals(null, res[696].exec(" abcDEXypqreXlmn ", 1883));
-assertEquals(null, res[696].exec(" ** Failers", 1884));
-assertEquals(null, res[696].exec(" x{1bb}XY", 1885));
-assertEquals(null, res[696].exec(" x{2b0}XY", 1886));
-assertEquals(null, res[696].exec(" !XY      ", 1887));
-assertEquals(null, res[697].exec(" AXY", 1888));
-assertEquals(null, res[697].exec(" aXY", 1889));
-assertEquals(null, res[697].exec(" AbcdeXyz ", 1890));
-assertEquals(null, res[697].exec(" x{1c5}AbXY", 1891));
-assertEquals(null, res[697].exec(" abcDEXypqreXlmn ", 1892));
-assertEquals(null, res[697].exec(" ** Failers", 1893));
-assertEquals(null, res[697].exec(" x{1bb}XY", 1894));
-assertEquals(null, res[697].exec(" x{2b0}XY", 1895));
-assertEquals(null, res[697].exec(" !XY      ", 1896));
-assertEquals(null, res[697].exec(" AXY", 1897));
-assertEquals(null, res[697].exec(" aXY", 1898));
-assertEquals(null, res[697].exec(" AbcdeXyz ", 1899));
-assertEquals(null, res[697].exec(" x{1c5}AbXY", 1900));
-assertEquals(null, res[697].exec(" abcDEXypqreXlmn ", 1901));
-assertEquals(null, res[697].exec(" ** Failers", 1902));
-assertEquals(null, res[697].exec(" x{1bb}XY", 1903));
-assertEquals(null, res[697].exec(" x{2b0}XY", 1904));
-assertEquals(null, res[697].exec(" !XY      ", 1905));
-assertEquals(null, res[698].exec(" AXY", 1906));
-assertEquals(null, res[698].exec(" aXY", 1907));
-assertEquals(null, res[698].exec(" AbcdeXyz ", 1908));
-assertEquals(null, res[698].exec(" x{1c5}AbXY", 1909));
-assertEquals(null, res[698].exec(" abcDEXypqreXlmn ", 1910));
-assertEquals(null, res[698].exec(" ** Failers", 1911));
-assertEquals(null, res[698].exec(" x{1bb}XY", 1912));
-assertEquals(null, res[698].exec(" x{2b0}XY", 1913));
-assertEquals(null, res[698].exec(" !XY      ", 1914));
-assertEquals(null, res[699].exec(" !XY", 1915));
-assertEquals(null, res[699].exec(" x{1bb}XY", 1916));
-assertEquals(null, res[699].exec(" x{2b0}XY", 1917));
-assertEquals(null, res[699].exec(" ** Failers", 1918));
-assertEquals(null, res[699].exec(" x{1c5}XY", 1919));
-assertEquals(null, res[699].exec(" AXY      ", 1920));
-assertEquals(null, res[700].exec(" !XY", 1921));
-assertEquals(null, res[700].exec(" x{1bb}XY", 1922));
-assertEquals(null, res[700].exec(" x{2b0}XY", 1923));
-assertEquals(null, res[700].exec(" ** Failers", 1924));
-assertEquals(null, res[700].exec(" x{1c5}XY", 1925));
-assertEquals(null, res[700].exec(" AXY      ", 1926));
-assertEquals(null, res[701].exec("\xa0!", 1927));
-assertEquals(null, res[701].exec("AabcabcYZ    ", 1928));
-assertEquals("L=abcX,L=abc,abc", res[702].exec("L=abcX"), 1929);
-assertEquals(null, res[702].exec("x{c0}", 1930));
-assertEquals(null, res[702].exec("x{e0} ", 1931));
-assertEquals(null, res[702].exec("x{c0}", 1932));
-assertEquals(null, res[702].exec("x{e0} ", 1933));
-assertEquals(null, res[703].exec("x{1b00}x{12000}x{7c0}x{a840}x{10900}", 1934));
-assertEquals(null, res[706].exec("123abcdefg", 1935));
-assertEquals(null, res[706].exec("123abc\xc4\xc5zz", 1936));
-assertEquals(null, res[710].exec("A\x80", 1937));
-assertEquals(null, res[725].exec("x{60e} ", 1938));
-assertEquals(null, res[725].exec("x{656} ", 1939));
-assertEquals(null, res[725].exec("x{657} ", 1940));
-assertEquals(null, res[725].exec("x{658} ", 1941));
-assertEquals(null, res[725].exec("x{659} ", 1942));
-assertEquals(null, res[725].exec("x{65a} ", 1943));
-assertEquals(null, res[725].exec("x{65b} ", 1944));
-assertEquals(null, res[725].exec("x{65c} ", 1945));
-assertEquals(null, res[725].exec("x{65d} ", 1946));
-assertEquals(null, res[725].exec("x{65e} ", 1947));
-assertEquals(null, res[725].exec("x{66a} ", 1948));
-assertEquals(null, res[725].exec("x{6e9} ", 1949));
-assertEquals(null, res[725].exec("x{6ef}", 1950));
-assertEquals(null, res[725].exec("x{6fa}  ", 1951));
-assertEquals(null, res[725].exec("** Failers", 1952));
-assertEquals(null, res[725].exec("x{600}", 1953));
-assertEquals(null, res[725].exec("x{650}", 1954));
-assertEquals(null, res[725].exec("x{651}  ", 1955));
-assertEquals(null, res[725].exec("x{652}  ", 1956));
-assertEquals(null, res[725].exec("x{653}  ", 1957));
-assertEquals(null, res[725].exec("x{654} ", 1958));
-assertEquals(null, res[725].exec("x{655} ", 1959));
-assertEquals(null, res[725].exec("x{65f}  ", 1960));
-assertEquals(null, res[726].exec("x{1d2b} ", 1961));
-assertEquals(null, res[727].exec("x{589}", 1962));
-assertEquals(null, res[727].exec("x{60c}", 1963));
-assertEquals(null, res[727].exec("x{61f}  ", 1964));
-assertEquals(null, res[727].exec("x{964}", 1965));
-assertEquals(null, res[727].exec("x{965}  ", 1966));
-assertEquals(null, res[727].exec("x{970}  ", 1967));
-assertEquals(null, res[728].exec("x{64b}", 1968));
-assertEquals(null, res[728].exec("x{654}", 1969));
-assertEquals(null, res[728].exec("x{655}", 1970));
-assertEquals(null, res[728].exec("x{200c} ", 1971));
-assertEquals(null, res[728].exec("** Failers", 1972));
-assertEquals(null, res[728].exec("x{64a}", 1973));
-assertEquals(null, res[728].exec("x{656}     ", 1974));
-assertEquals(null, res[729].exec("x{10450}", 1975));
-assertEquals(null, res[729].exec("x{1047f}", 1976));
-assertEquals(null, res[730].exec("x{10400}", 1977));
-assertEquals(null, res[730].exec("x{1044f}", 1978));
-assertEquals(null, res[731].exec("x{10480}", 1979));
-assertEquals(null, res[731].exec("x{1049d}", 1980));
-assertEquals(null, res[731].exec("x{104a0}", 1981));
-assertEquals(null, res[731].exec("x{104a9}", 1982));
-assertEquals(null, res[731].exec("** Failers", 1983));
-assertEquals(null, res[731].exec("x{1049e}", 1984));
-assertEquals(null, res[731].exec("x{1049f}", 1985));
-assertEquals(null, res[731].exec("x{104aa}           ", 1986));
-assertEquals(null, res[731].exec("\xe2\x80\xa8\xe2\x80\xa8", 1987));
-assertEquals(null, res[731].exec("x{2028}x{2028}x{2028}", 1988));
-assertEquals(null, res[732].exec("x{c0}x{e0}x{116}x{117}", 1989));
-assertEquals(null, res[732].exec("x{c0}x{e0}x{116}x{117}", 1990));
-assertEquals(null, res[733].exec("x{102A4}x{AA52}x{A91D}x{1C46}x{10283}x{1092E}x{1C6B}x{A93B}x{A8BF}x{1BA0}x{A50A}====", 1991));
-assertEquals(null, res[733].exec("x{a77d}x{1d79}", 1992));
-assertEquals(null, res[733].exec("x{1d79}x{a77d} ", 1993));
-assertEquals(null, res[733].exec("x{a77d}x{1d79}", 1994));
-assertEquals(null, res[733].exec("** Failers ", 1995));
-assertEquals(null, res[733].exec("x{1d79}x{a77d} ", 1996));
-assertEquals("AA,A", res[734].exec("AA"), 1997);
-assertEquals("Aa,A", res[734].exec("Aa"), 1998);
-assertEquals("aa,a", res[734].exec("aa"), 1999);
-assertEquals("aA,a", res[734].exec("aA"), 2000);
-assertEquals(null, res[734].exec("x{de}x{de}", 2001));
-assertEquals(null, res[734].exec("x{de}x{fe}", 2002));
-assertEquals(null, res[734].exec("x{fe}x{fe}", 2003));
-assertEquals(null, res[734].exec("x{fe}x{de}", 2004));
-assertEquals(null, res[734].exec("x{10a}x{10a}", 2005));
-assertEquals(null, res[734].exec("x{10a}x{10b}", 2006));
-assertEquals(null, res[734].exec("x{10b}x{10b}", 2007));
-assertEquals(null, res[734].exec("x{10b}x{10a}", 2008));
-assertEquals("abc", res[736].exec("abc"), 2009);
-assertEquals("abc", res[737].exec("abc"), 2010);
-assertEquals("abbbbc", res[737].exec("abbbbc"), 2011);
-assertEquals("ac", res[737].exec("ac"), 2012);
-assertEquals("abc", res[738].exec("abc"), 2013);
-assertEquals("abbbbbbc", res[738].exec("abbbbbbc"), 2014);
-assertEquals(null, res[738].exec("*** Failers ", 2015));
-assertEquals(null, res[738].exec("ac", 2016));
-assertEquals(null, res[738].exec("ab", 2017));
-assertEquals("a", res[739].exec("a"), 2018);
-assertEquals("aaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaa"), 2019);
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa "), 2020);
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaF "), 2021);
-assertEquals("a,a", res[740].exec("a"), 2022);
-assertEquals("a,a", res[740].exec("abcd"), 2023);
-assertEquals("a,a", res[740].exec("african"), 2024);
-assertEquals("abc", res[741].exec("abcdef"), 2025);
-assertEquals(null, res[741].exec("*** Failers", 2026));
-assertEquals(null, res[741].exec("xyzabc", 2027));
-assertEquals(null, res[741].exec("xyz\nabc    ", 2028));
-assertEquals("abc", res[742].exec("abcdef"), 2029);
-assertEquals("abc", res[742].exec("xyz\nabc    "), 2030);
-assertEquals(null, res[742].exec("*** Failers", 2031));
-assertEquals(null, res[742].exec("xyzabc", 2032));
-assertEquals(null, res[743].exec("abcdef", 2033));
-assertEquals(null, res[743].exec("*** Failers", 2034));
-assertEquals(null, res[743].exec("xyzabc", 2035));
-assertEquals(null, res[743].exec("xyz\nabc    ", 2036));
-assertEquals(null, res[744].exec("abcdef", 2037));
-assertEquals(null, res[744].exec("*** Failers", 2038));
-assertEquals(null, res[744].exec("xyzabc", 2039));
-assertEquals(null, res[744].exec("xyz\nabc    ", 2040));
-assertEquals(null, res[745].exec("abcdef", 2041));
-assertEquals(null, res[745].exec("xyzabc>3", 2042));
-assertEquals(null, res[745].exec("*** Failers", 2043));
-assertEquals(null, res[745].exec("xyzabc    ", 2044));
-assertEquals(null, res[745].exec("xyzabc>2 ", 2045));
-assertEquals("x9yzz", res[746].exec("x9yzz"), 2046);
-assertEquals("x0y+z", res[746].exec("x0y+z"), 2047);
-assertEquals(null, res[746].exec("*** Failers", 2048));
-assertEquals(null, res[746].exec("xyz", 2049));
-assertEquals(null, res[746].exec("xxy0z     ", 2050));
-assertEquals("x yzz", res[747].exec("x yzz"), 2051);
-assertEquals("x y+z", res[747].exec("x y+z"), 2052);
-assertEquals(null, res[747].exec("*** Failers", 2053));
-assertEquals(null, res[747].exec("xyz", 2054));
-assertEquals(null, res[747].exec("xxyyz", 2055));
-assertEquals("xxy+z", res[748].exec("xxy+z"), 2056);
-assertEquals(null, res[748].exec("*** Failers", 2057));
-assertEquals(null, res[748].exec("xxy0z", 2058));
-assertEquals(null, res[748].exec("x+y+z         ", 2059));
-assertEquals("x+y", res[749].exec("x+y"), 2060);
-assertEquals("x-y", res[749].exec("x-y"), 2061);
-assertEquals(null, res[749].exec("*** Failers", 2062));
-assertEquals(null, res[749].exec("x\ny", 2063));
-assertEquals("x+y", res[750].exec("x+y"), 2064);
-assertEquals("x-y", res[750].exec("x-y"), 2065);
-assertEquals(null, res[750].exec("x\ny", 2066));
-assertEquals(null, res[750].exec("a+bc+dp+q", 2067));
-assertEquals(null, res[750].exec("a+bc\ndp+q", 2068));
-assertEquals(null, res[750].exec("x\nyp+q ", 2069));
-assertEquals(null, res[750].exec("*** Failers ", 2070));
-assertEquals(null, res[750].exec("a\nbc\ndp+q", 2071));
-assertEquals(null, res[750].exec("a+bc\ndp\nq", 2072));
-assertEquals(null, res[750].exec("x\nyp\nq ", 2073));
-assertEquals(null, res[751].exec("ba0", 2074));
-assertEquals(null, res[751].exec("*** Failers", 2075));
-assertEquals(null, res[751].exec("ba0\n", 2076));
-assertEquals(null, res[751].exec("ba0\ncd   ", 2077));
-assertEquals(null, res[752].exec("ba0", 2078));
-assertEquals(null, res[752].exec("*** Failers", 2079));
-assertEquals(null, res[752].exec("ba0\n", 2080));
-assertEquals(null, res[752].exec("ba0\ncd   ", 2081));
-assertEquals(null, res[753].exec("ba0", 2082));
-assertEquals(null, res[753].exec("ba0\n", 2083));
-assertEquals(null, res[753].exec("*** Failers", 2084));
-assertEquals(null, res[753].exec("ba0\ncd   ", 2085));
-assertEquals(null, res[754].exec("ba0", 2086));
-assertEquals(null, res[754].exec("ba0\n", 2087));
-assertEquals(null, res[754].exec("*** Failers", 2088));
-assertEquals(null, res[754].exec("ba0\ncd   ", 2089));
-assertEquals("a0", res[755].exec("ba0"), 2090);
-assertEquals(null, res[755].exec("ba0\n", 2091));
-assertEquals(null, res[755].exec("*** Failers", 2092));
-assertEquals(null, res[755].exec("ba0\ncd   ", 2093));
-assertEquals("a0", res[756].exec("ba0"), 2094);
-assertEquals("a0", res[756].exec("ba0\n"), 2095);
-assertEquals("a0", res[756].exec("ba0\ncd   "), 2096);
-assertEquals(null, res[756].exec("*** Failers", 2097));
-assertEquals("abc", res[757].exec("abc"), 2098);
-assertEquals("aBc", res[757].exec("aBc"), 2099);
-assertEquals("ABC", res[757].exec("ABC"), 2100);
-assertEquals("b", res[758].exec("abcd"), 2101);
-assertEquals("abz", res[759].exec("abz"), 2102);
-assertEquals("abb", res[759].exec("abbz"), 2103);
-assertEquals("az", res[759].exec("azz  "), 2104);
-assertEquals("yz", res[760].exec("ayzq"), 2105);
-assertEquals("xyz", res[760].exec("axyzq"), 2106);
-assertEquals("xxyz", res[760].exec("axxyz"), 2107);
-assertEquals("xxxyz", res[760].exec("axxxyzq"), 2108);
-assertEquals("xxxyz", res[760].exec("axxxxyzq"), 2109);
-assertEquals(null, res[760].exec("*** Failers", 2110));
-assertEquals(null, res[760].exec("ax", 2111));
-assertEquals(null, res[760].exec("axx     ", 2112));
-assertEquals(null, res[760].exec("  ", 2113));
-assertEquals("xxxyz", res[761].exec("axxxyzq"), 2114);
-assertEquals("xxxyz", res[761].exec("axxxxyzq"), 2115);
-assertEquals(null, res[761].exec("*** Failers", 2116));
-assertEquals(null, res[761].exec("ax", 2117));
-assertEquals(null, res[761].exec("axx     ", 2118));
-assertEquals(null, res[761].exec("ayzq", 2119));
-assertEquals(null, res[761].exec("axyzq", 2120));
-assertEquals(null, res[761].exec("axxyz", 2121));
-assertEquals(null, res[761].exec("  ", 2122));
-assertEquals("xxyz", res[762].exec("axxyz"), 2123);
-assertEquals("xxxyz", res[762].exec("axxxyzq"), 2124);
-assertEquals("xxxyz", res[762].exec("axxxxyzq"), 2125);
-assertEquals(null, res[762].exec("*** Failers", 2126));
-assertEquals(null, res[762].exec("ax", 2127));
-assertEquals(null, res[762].exec("axx     ", 2128));
-assertEquals(null, res[762].exec("ayzq", 2129));
-assertEquals(null, res[762].exec("axyzq", 2130));
-assertEquals(null, res[762].exec("  ", 2131));
-assertEquals("b", res[763].exec("bac"), 2132);
-assertEquals("bcdef", res[763].exec("bcdefax"), 2133);
-assertEquals("*** F", res[763].exec("*** Failers"), 2134);
-assertEquals("   ", res[763].exec("aaaaa   "), 2135);
-assertEquals("b", res[764].exec("bac"), 2136);
-assertEquals("bcdef", res[764].exec("bcdefax"), 2137);
-assertEquals("*** F", res[764].exec("*** Failers"), 2138);
-assertEquals("", res[764].exec("aaaaa   "), 2139);
-assertEquals("xyz", res[765].exec("xyz"), 2140);
-assertEquals("wxyz", res[765].exec("awxyza"), 2141);
-assertEquals("bcdef", res[765].exec("abcdefa"), 2142);
-assertEquals("bcdef", res[765].exec("abcdefghijk"), 2143);
-assertEquals("*** F", res[765].exec("*** Failers"), 2144);
-assertEquals(null, res[765].exec("axya", 2145));
-assertEquals(null, res[765].exec("axa", 2146));
-assertEquals("     ", res[765].exec("aaaaa         "), 2147);
-assertEquals("1234", res[766].exec("1234b567"), 2148);
-assertEquals("", res[766].exec("xyz"), 2149);
-assertEquals("a", res[767].exec("a1234b567"), 2150);
-assertEquals("xyz", res[767].exec("xyz"), 2151);
-assertEquals(" ", res[767].exec(" "), 2152);
-assertEquals("1234", res[768].exec("ab1234c56"), 2153);
-assertEquals(null, res[768].exec("*** Failers", 2154));
-assertEquals(null, res[768].exec("xyz", 2155));
-assertEquals("ab", res[769].exec("ab123c56"), 2156);
-assertEquals("*** Failers", res[769].exec("*** Failers"), 2157);
-assertEquals(null, res[769].exec("789", 2158));
-assertEquals("5A", res[770].exec("045ABC"), 2159);
-assertEquals("A", res[770].exec("ABC"), 2160);
-assertEquals(null, res[770].exec("*** Failers", 2161));
-assertEquals(null, res[770].exec("XYZ", 2162));
-assertEquals("A", res[771].exec("ABC"), 2163);
-assertEquals("BA", res[771].exec("BAC"), 2164);
-assertEquals("A", res[771].exec("9ABC             "), 2165);
-assertEquals(null, res[771].exec("*** Failers", 2166));
-assertEquals("aaaa", res[772].exec("aaaa"), 2167);
-assertEquals("xyz", res[773].exec("xyz"), 2168);
-assertEquals("ggggggggxyz", res[773].exec("ggggggggxyz"), 2169);
-assertEquals("abcdxyz", res[774].exec("abcdxyz"), 2170);
-assertEquals("axyz", res[774].exec("axyz"), 2171);
-assertEquals(null, res[774].exec("*** Failers", 2172));
-assertEquals(null, res[774].exec("xyz", 2173));
-assertEquals("xyz", res[775].exec("xyz"), 2174);
-assertEquals("cxyz", res[775].exec("cxyz       "), 2175);
-assertEquals("12X", res[776].exec("12X"), 2176);
-assertEquals("123X", res[776].exec("123X"), 2177);
-assertEquals(null, res[776].exec("*** Failers", 2178));
-assertEquals(null, res[776].exec("X", 2179));
-assertEquals(null, res[776].exec("1X", 2180));
-assertEquals(null, res[776].exec("1234X     ", 2181));
-assertEquals("a4", res[777].exec("a45"), 2182);
-assertEquals("b9", res[777].exec("b93"), 2183);
-assertEquals("c9", res[777].exec("c99z"), 2184);
-assertEquals("d0", res[777].exec("d04"), 2185);
-assertEquals(null, res[777].exec("*** Failers", 2186));
-assertEquals(null, res[777].exec("e45", 2187));
-assertEquals(null, res[777].exec("abcd      ", 2188));
-assertEquals(null, res[777].exec("abcd1234", 2189));
-assertEquals(null, res[777].exec("1234  ", 2190));
-assertEquals("a4", res[778].exec("a45"), 2191);
-assertEquals("b9", res[778].exec("b93"), 2192);
-assertEquals("c9", res[778].exec("c99z"), 2193);
-assertEquals("d0", res[778].exec("d04"), 2194);
-assertEquals("abcd1", res[778].exec("abcd1234"), 2195);
-assertEquals("1", res[778].exec("1234  "), 2196);
-assertEquals(null, res[778].exec("*** Failers", 2197));
-assertEquals(null, res[778].exec("e45", 2198));
-assertEquals(null, res[778].exec("abcd      ", 2199));
-assertEquals("a4", res[779].exec("a45"), 2200);
-assertEquals("b9", res[779].exec("b93"), 2201);
-assertEquals("c9", res[779].exec("c99z"), 2202);
-assertEquals("d0", res[779].exec("d04"), 2203);
-assertEquals("abcd1", res[779].exec("abcd1234"), 2204);
-assertEquals(null, res[779].exec("*** Failers", 2205));
-assertEquals(null, res[779].exec("1234  ", 2206));
-assertEquals(null, res[779].exec("e45", 2207));
-assertEquals(null, res[779].exec("abcd      ", 2208));
-assertEquals("aX", res[780].exec("aX"), 2209);
-assertEquals("aaX", res[780].exec("aaX "), 2210);
-assertEquals("a4", res[781].exec("a45"), 2211);
-assertEquals("b9", res[781].exec("b93"), 2212);
-assertEquals("c9", res[781].exec("c99z"), 2213);
-assertEquals("d0", res[781].exec("d04"), 2214);
-assertEquals("1", res[781].exec("1234  "), 2215);
-assertEquals(null, res[781].exec("*** Failers", 2216));
-assertEquals(null, res[781].exec("abcd1234", 2217));
-assertEquals(null, res[781].exec("e45", 2218));
-assertEquals("ab4", res[782].exec("ab45"), 2219);
-assertEquals("bcd9", res[782].exec("bcd93"), 2220);
-assertEquals(null, res[782].exec("*** Failers", 2221));
-assertEquals(null, res[782].exec("1234 ", 2222));
-assertEquals(null, res[782].exec("a36 ", 2223));
-assertEquals(null, res[782].exec("abcd1234", 2224));
-assertEquals(null, res[782].exec("ee45", 2225));
-assertEquals("abc4,abc", res[783].exec("abc45"), 2226);
-assertEquals("abcabcabc4,abc", res[783].exec("abcabcabc45"), 2227);
-assertEquals("4,", res[783].exec("42xyz "), 2228);
-assertEquals(null, res[783].exec("*** Failers", 2229));
-assertEquals("abc4,abc", res[784].exec("abc45"), 2230);
-assertEquals("abcabcabc4,abc", res[784].exec("abcabcabc45"), 2231);
-assertEquals(null, res[784].exec("*** Failers", 2232));
-assertEquals(null, res[784].exec("42xyz ", 2233));
-assertEquals("abc4,abc", res[785].exec("abc45"), 2234);
-assertEquals("4,", res[785].exec("42xyz "), 2235);
-assertEquals(null, res[785].exec("*** Failers", 2236));
-assertEquals(null, res[785].exec("abcabcabc45", 2237));
-assertEquals("abcabc4,abc", res[786].exec("abcabc45"), 2238);
-assertEquals("abcabcabc4,abc", res[786].exec("abcabcabc45"), 2239);
-assertEquals(null, res[786].exec("*** Failers", 2240));
-assertEquals(null, res[786].exec("abcabcabcabc45", 2241));
-assertEquals(null, res[786].exec("abc45", 2242));
-assertEquals(null, res[786].exec("42xyz ", 2243));
-assertEquals(null, res[786].exec("1abc2abc3456", 2244));
-assertEquals(null, res[786].exec("1abc2xyz3456 ", 2245));
-assertEquals("ab=ab,ab,ab", res[787].exec("ab=ab"), 2246);
-assertEquals("ab=ab,ab,ab", res[787].exec("ab=ab"), 2247);
-assertEquals(null, res[787].exec("abc", 2248));
-assertEquals(null, res[787].exec("a(b)c", 2249));
-assertEquals(null, res[787].exec("a(b(c))d  ", 2250));
-assertEquals(null, res[787].exec("*** Failers)", 2251));
-assertEquals(null, res[787].exec("a(b(c)d  ", 2252));
-assertEquals(null, res[787].exec(">abc>123<xyz<", 2253));
-assertEquals(null, res[787].exec(">abc>1(2)3<xyz<", 2254));
-assertEquals(null, res[787].exec(">abc>(1(2)3)<xyz<", 2255));
-assertEquals(null, res[787].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9876", 2256));
-assertEquals(null, res[787].exec("*** Failers ", 2257));
-assertEquals(null, res[787].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 2258));
-assertEquals(null, res[787].exec("<>", 2259));
-assertEquals(null, res[787].exec("<abcd>", 2260));
-assertEquals(null, res[787].exec("<abc <123> hij>", 2261));
-assertEquals(null, res[787].exec("<abc <def> hij>", 2262));
-assertEquals(null, res[787].exec("<abc<>def> ", 2263));
-assertEquals(null, res[787].exec("<abc<>      ", 2264));
-assertEquals(null, res[787].exec("*** Failers", 2265));
-assertEquals(null, res[787].exec("<abc", 2266));
-assertEquals(null, res[787].exec("abc:                          ", 2267));
-assertEquals(null, res[787].exec("12                             ", 2268));
-assertEquals(null, res[787].exec("*** Failers                     ", 2269));
-assertEquals(null, res[787].exec("123                       ", 2270));
-assertEquals(null, res[787].exec("xyz                        ", 2271));
-assertEquals(null, res[787].exec("                            ", 2272));
-assertEquals(null, res[787].exec("abc:                        ", 2273));
-assertEquals(null, res[787].exec("12         ", 2274));
-assertEquals(null, res[787].exec("*** Failers", 2275));
-assertEquals(null, res[787].exec("123", 2276));
-assertEquals(null, res[787].exec("xyz    ", 2277));
-assertEquals(null, res[788].exec("abcde:                          ", 2278));
-assertEquals(null, res[788].exec("*** Failers                     ", 2279));
-assertEquals(null, res[788].exec("abc.. ", 2280));
-assertEquals(null, res[788].exec("123                       ", 2281));
-assertEquals(null, res[788].exec("vwxyz                        ", 2282));
-assertEquals(null, res[788].exec("                            ", 2283));
-assertEquals(null, res[789].exec("12         ", 2284));
-assertEquals(null, res[789].exec("*** Failers", 2285));
-assertEquals(null, res[789].exec("abcde:", 2286));
-assertEquals(null, res[789].exec("abc..  ", 2287));
-assertEquals(null, res[789].exec("123", 2288));
-assertEquals(null, res[789].exec("vwxyz    ", 2289));
-assertEquals(null, res[789].exec("abc12345", 2290));
-assertEquals(null, res[789].exec("wxy123z", 2291));
-assertEquals(null, res[789].exec("*** Failers", 2292));
-assertEquals(null, res[789].exec("123abc", 2293));
-assertEquals(null, res[789].exec("123abc", 2294));
-assertEquals(null, res[789].exec("mno123456 ", 2295));
-assertEquals(null, res[789].exec("*** Failers", 2296));
-assertEquals(null, res[789].exec("abc12345", 2297));
-assertEquals(null, res[789].exec("wxy123z", 2298));
-assertEquals(null, res[789].exec("abcxyz", 2299));
-assertEquals(null, res[789].exec("123abcxyz999 ", 2300));
-assertEquals("abc", res[791].exec("abcdef"), 2301);
-assertEquals(null, res[791].exec("*** Failers", 2302));
-assertEquals("abc", res[791].exec("abcdefB  "), 2303);
-assertEquals(",", res[792].exec("bcd"), 2304);
-assertEquals("aaa,aaa", res[792].exec("aaabcd"), 2305);
-assertEquals(",", res[792].exec("xyz"), 2306);
-assertEquals(",", res[792].exec("xyzN  "), 2307);
-assertEquals(",", res[792].exec("*** Failers"), 2308);
-assertEquals(",", res[792].exec("bcdN   "), 2309);
-assertEquals("xyz", res[793].exec("xyz"), 2310);
-assertEquals(null, res[793].exec("xyz\n", 2311));
-assertEquals(null, res[793].exec("*** Failers", 2312));
-assertEquals(null, res[793].exec("xyzZ", 2313));
-assertEquals(null, res[793].exec("xyz\nZ    ", 2314));
-assertEquals("xyz", res[794].exec("xyz"), 2315);
-assertEquals("xyz", res[794].exec("xyz\n "), 2316);
-assertEquals("xyz", res[794].exec("abcxyz\npqr "), 2317);
-assertEquals("xyz", res[794].exec("abcxyz\npqrZ "), 2318);
-assertEquals("xyz", res[794].exec("xyz\nZ    "), 2319);
-assertEquals(null, res[794].exec("*** Failers", 2320));
-assertEquals(null, res[794].exec("xyzZ", 2321));
-assertEquals(null, res[795].exec("abcdef", 2322));
-assertEquals(null, res[795].exec("defabcxyz>3 ", 2323));
-assertEquals(null, res[795].exec("*** Failers ", 2324));
-assertEquals(null, res[795].exec("defabcxyz", 2325));
-assertEquals(null, res[796].exec("abP", 2326));
-assertEquals(null, res[796].exec("abcdeP", 2327));
-assertEquals("abcdef", res[796].exec("abcdefP"), 2328);
-assertEquals(null, res[796].exec("*** Failers", 2329));
-assertEquals(null, res[796].exec("abxP    ", 2330));
-assertEquals(null, res[797].exec("aP", 2331));
-assertEquals(null, res[797].exec("aaP", 2332));
-assertEquals(null, res[797].exec("aa2P ", 2333));
-assertEquals(null, res[797].exec("aaaP", 2334));
-assertEquals(null, res[797].exec("aaa23P ", 2335));
-assertEquals(null, res[797].exec("aaaa12345P", 2336));
-assertEquals("aa0z", res[797].exec("aa0zP"), 2337);
-assertEquals("aaaa4444444444444z", res[797].exec("aaaa4444444444444zP "), 2338);
-assertEquals(null, res[797].exec("*** Failers", 2339));
-assertEquals(null, res[797].exec("azP ", 2340));
-assertEquals(null, res[797].exec("aaaaaP ", 2341));
-assertEquals(null, res[797].exec("a56P ", 2342));
-assertEquals(null, res[799].exec("adfadadaklhlkalkajhlkjahdfasdfasdfladsfjkjPZ", 2343));
-assertEquals(null, res[799].exec("lkjhlkjhlkjhlkjhabbbbbbcdaefabbbbbbbefaPBZ", 2344));
-assertEquals(null, res[799].exec("cdabbbbbbbbPRBZ", 2345));
-assertEquals(null, res[799].exec("efabbbbbbbbbbbbbbbbPRBZ", 2346));
-assertEquals(null, res[799].exec("bbbbbbbbbbbbcdXyasdfadfPRBZ    ", 2347));
-assertEquals(null, res[799].exec("abc", 2348));
-assertEquals(null, res[799].exec("** Failers", 2349));
-assertEquals(null, res[799].exec("def  ", 2350));
-assertEquals("the quick brown fox", res[800].exec("the quick brown fox"), 2351);
-assertEquals(null, res[800].exec("The quick brown FOX", 2352));
-assertEquals("the quick brown fox", res[800].exec("What do you know about the quick brown fox?"), 2353);
-assertEquals(null, res[800].exec("What do you know about THE QUICK BROWN FOX?", 2354));
-assertEquals("the quick brown fox", res[801].exec("the quick brown fox"), 2355);
-assertEquals("The quick brown FOX", res[801].exec("The quick brown FOX"), 2356);
-assertEquals("the quick brown fox", res[801].exec("What do you know about the quick brown fox?"), 2357);
-assertEquals("THE QUICK BROWN FOX", res[801].exec("What do you know about THE QUICK BROWN FOX?"), 2358);
-assertEquals("abcd\x09\n\x0d\x0cae9;$\\?caxyz", res[802].exec("abcd\x09\n\x0d\x0cae9;$\\?caxyz"), 2359);
-assertEquals("abxyzpqrrrabbxyyyypqAzz", res[803].exec("abxyzpqrrrabbxyyyypqAzz"), 2360);
-assertEquals("abxyzpqrrrabbxyyyypqAzz", res[803].exec("abxyzpqrrrabbxyyyypqAzz"), 2361);
-assertEquals("aabxyzpqrrrabbxyyyypqAzz", res[803].exec("aabxyzpqrrrabbxyyyypqAzz"), 2362);
-assertEquals("aaabxyzpqrrrabbxyyyypqAzz", res[803].exec("aaabxyzpqrrrabbxyyyypqAzz"), 2363);
-assertEquals("aaaabxyzpqrrrabbxyyyypqAzz", res[803].exec("aaaabxyzpqrrrabbxyyyypqAzz"), 2364);
-assertEquals("abcxyzpqrrrabbxyyyypqAzz", res[803].exec("abcxyzpqrrrabbxyyyypqAzz"), 2365);
-assertEquals("aabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aabcxyzpqrrrabbxyyyypqAzz"), 2366);
-assertEquals("aaabcxyzpqrrrabbxyyyypAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypAzz"), 2367);
-assertEquals("aaabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqAzz"), 2368);
-assertEquals("aaabcxyzpqrrrabbxyyyypqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqAzz"), 2369);
-assertEquals("aaabcxyzpqrrrabbxyyyypqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqAzz"), 2370);
-assertEquals("aaabcxyzpqrrrabbxyyyypqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqAzz"), 2371);
-assertEquals("aaabcxyzpqrrrabbxyyyypqqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqAzz"), 2372);
-assertEquals("aaabcxyzpqrrrabbxyyyypqqqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqqAzz"), 2373);
-assertEquals("aaaabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aaaabcxyzpqrrrabbxyyyypqAzz"), 2374);
-assertEquals("abxyzzpqrrrabbxyyyypqAzz", res[803].exec("abxyzzpqrrrabbxyyyypqAzz"), 2375);
-assertEquals("aabxyzzzpqrrrabbxyyyypqAzz", res[803].exec("aabxyzzzpqrrrabbxyyyypqAzz"), 2376);
-assertEquals("aaabxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaabxyzzzzpqrrrabbxyyyypqAzz"), 2377);
-assertEquals("aaaabxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaaabxyzzzzpqrrrabbxyyyypqAzz"), 2378);
-assertEquals("abcxyzzpqrrrabbxyyyypqAzz", res[803].exec("abcxyzzpqrrrabbxyyyypqAzz"), 2379);
-assertEquals("aabcxyzzzpqrrrabbxyyyypqAzz", res[803].exec("aabcxyzzzpqrrrabbxyyyypqAzz"), 2380);
-assertEquals("aaabcxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaabcxyzzzzpqrrrabbxyyyypqAzz"), 2381);
-assertEquals("aaaabcxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbxyyyypqAzz"), 2382);
-assertEquals("aaaabcxyzzzzpqrrrabbbxyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyypqAzz"), 2383);
-assertEquals("aaaabcxyzzzzpqrrrabbbxyyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyyypqAzz"), 2384);
-assertEquals("aaabcxyzpqrrrabbxyyyypABzz", res[803].exec("aaabcxyzpqrrrabbxyyyypABzz"), 2385);
-assertEquals("aaabcxyzpqrrrabbxyyyypABBzz", res[803].exec("aaabcxyzpqrrrabbxyyyypABBzz"), 2386);
-assertEquals("aaabxyzpqrrrabbxyyyypqAzz", res[803].exec(">>>aaabxyzpqrrrabbxyyyypqAzz"), 2387);
-assertEquals("aaaabxyzpqrrrabbxyyyypqAzz", res[803].exec(">aaaabxyzpqrrrabbxyyyypqAzz"), 2388);
-assertEquals("abcxyzpqrrrabbxyyyypqAzz", res[803].exec(">>>>abcxyzpqrrrabbxyyyypqAzz"), 2389);
-assertEquals(null, res[803].exec("*** Failers", 2390));
-assertEquals(null, res[803].exec("abxyzpqrrabbxyyyypqAzz", 2391));
-assertEquals(null, res[803].exec("abxyzpqrrrrabbxyyyypqAzz", 2392));
-assertEquals(null, res[803].exec("abxyzpqrrrabxyyyypqAzz", 2393));
-assertEquals(null, res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyyyypqAzz", 2394));
-assertEquals(null, res[803].exec("aaaabcxyzzzzpqrrrabbbxyyypqAzz", 2395));
-assertEquals(null, res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqqqAzz", 2396));
-assertEquals("abczz,abc", res[804].exec("abczz"), 2397);
-assertEquals("abcabczz,abc", res[804].exec("abcabczz"), 2398);
-assertEquals(null, res[804].exec("*** Failers", 2399));
-assertEquals(null, res[804].exec("zz", 2400));
-assertEquals(null, res[804].exec("abcabcabczz", 2401));
-assertEquals(null, res[804].exec(">>abczz", 2402));
-assertEquals("bc,b", res[805].exec("bc"), 2403);
-assertEquals("bbc,b", res[805].exec("bbc"), 2404);
-assertEquals("bbbc,bb", res[805].exec("bbbc"), 2405);
-assertEquals("bac,a", res[805].exec("bac"), 2406);
-assertEquals("bbac,a", res[805].exec("bbac"), 2407);
-assertEquals("aac,a", res[805].exec("aac"), 2408);
-assertEquals("abbbbbbbbbbbc,bbbbbbbbbbb", res[805].exec("abbbbbbbbbbbc"), 2409);
-assertEquals("bbbbbbbbbbbac,a", res[805].exec("bbbbbbbbbbbac"), 2410);
-assertEquals(null, res[805].exec("*** Failers", 2411));
-assertEquals(null, res[805].exec("aaac", 2412));
-assertEquals(null, res[805].exec("abbbbbbbbbbbac", 2413));
-assertEquals("bc,b", res[806].exec("bc"), 2414);
-assertEquals("bbc,bb", res[806].exec("bbc"), 2415);
-assertEquals("bbbc,bbb", res[806].exec("bbbc"), 2416);
-assertEquals("bac,a", res[806].exec("bac"), 2417);
-assertEquals("bbac,a", res[806].exec("bbac"), 2418);
-assertEquals("aac,a", res[806].exec("aac"), 2419);
-assertEquals("abbbbbbbbbbbc,bbbbbbbbbbb", res[806].exec("abbbbbbbbbbbc"), 2420);
-assertEquals("bbbbbbbbbbbac,a", res[806].exec("bbbbbbbbbbbac"), 2421);
-assertEquals(null, res[806].exec("*** Failers", 2422));
-assertEquals(null, res[806].exec("aaac", 2423));
-assertEquals(null, res[806].exec("abbbbbbbbbbbac", 2424));
-assertEquals("bbc,bb", res[806].exec("bbc"), 2425);
-assertEquals("babc,ba", res[807].exec("babc"), 2426);
-assertEquals("bbabc,ba", res[807].exec("bbabc"), 2427);
-assertEquals("bababc,ba", res[807].exec("bababc"), 2428);
-assertEquals(null, res[807].exec("*** Failers", 2429));
-assertEquals(null, res[807].exec("bababbc", 2430));
-assertEquals(null, res[807].exec("babababc", 2431));
-assertEquals("babc,ba", res[808].exec("babc"), 2432);
-assertEquals("bbabc,ba", res[808].exec("bbabc"), 2433);
-assertEquals("bababc,ba", res[808].exec("bababc"), 2434);
-assertEquals(null, res[808].exec("*** Failers", 2435));
-assertEquals(null, res[808].exec("bababbc", 2436));
-assertEquals(null, res[808].exec("babababc", 2437));
+assertNull(res[443].exec("aaaa", 885));
+assertNull(res[443].exec("bacxxx", 886));
+assertNull(res[443].exec("bbaccxxx ", 887));
+assertNull(res[443].exec("bbbacccxx", 888));
+assertNull(res[443].exec("aaaa", 889));
+assertNull(res[443].exec("bacxxx", 890));
+assertNull(res[443].exec("bbaccxxx ", 891));
+assertNull(res[443].exec("bbbacccxx", 892));
+assertToStringEquals("a,a", res[444].exec("aaaa"), 893);
+assertNull(res[444].exec("bacxxx", 894));
+assertNull(res[444].exec("bbaccxxx ", 895));
+assertNull(res[444].exec("bbbacccxx", 896));
+assertToStringEquals("a,a", res[445].exec("aaaa"), 897);
+assertNull(res[445].exec("bacxxx", 898));
+assertNull(res[445].exec("bbaccxxx ", 899));
+assertNull(res[445].exec("bbbacccxx", 900));
+assertToStringEquals("a,a", res[446].exec("aaaa"), 901);
+assertNull(res[446].exec("bacxxx", 902));
+assertNull(res[446].exec("bbaccxxx ", 903));
+assertNull(res[446].exec("bbbacccxx", 904));
+assertToStringEquals("a,a,a", res[447].exec("aaaa"), 905);
+assertNull(res[447].exec("bacxxx", 906));
+assertNull(res[447].exec("bbaccxxx ", 907));
+assertNull(res[447].exec("bbbacccxx", 908));
+assertNull(res[449].exec("bacxxx", 909));
+assertNull(res[449].exec("XaaX", 910));
+assertNull(res[449].exec("XAAX ", 911));
+assertNull(res[449].exec("XaaX", 912));
+assertNull(res[449].exec("** Failers ", 913));
+assertNull(res[449].exec("XAAX ", 914));
+assertNull(res[449].exec("XaaX", 915));
+assertNull(res[449].exec("XAAX ", 916));
+assertNull(res[449].exec("xzxx", 917));
+assertNull(res[449].exec("yzyy ", 918));
+assertNull(res[449].exec("** Failers", 919));
+assertNull(res[449].exec("xxz  ", 920));
+assertToStringEquals("a,,,a", res[450].exec("cat"), 921);
+assertToStringEquals("a,,,a", res[451].exec("cat"), 922);
+assertToStringEquals("TA]", res[452].exec("The ACTA] comes "), 923);
+assertToStringEquals("TA]", res[453].exec("The ACTA] comes "), 924);
+assertNull(res[453].exec("abcbabc", 925));
+assertNull(res[453].exec("abcbabc", 926));
+assertNull(res[453].exec("abcbabc", 927));
+assertNull(res[453].exec("** Failers ", 928));
+assertNull(res[453].exec("abcXabc", 929));
+assertNull(res[453].exec("abcXabc", 930));
+assertNull(res[453].exec("** Failers ", 931));
+assertNull(res[453].exec("abcbabc", 932));
+assertNull(res[453].exec("xyzbabcxyz", 933));
+assertNull(res[456].exec("** Failers", 934));
+assertNull(res[456].exec("ab", 935));
+assertNull(res[457].exec("** Failers", 936));
+assertNull(res[457].exec("ab ", 937));
+assertNull(res[457].exec("** Failers", 938));
+assertNull(res[457].exec("ab ", 939));
+assertToStringEquals("aXb", res[458].exec("aXb"), 940);
+assertToStringEquals("a\nb", res[458].exec("a\nb "), 941);
+assertNull(res[458].exec("** Failers", 942));
+assertNull(res[458].exec("ab  ", 943));
+assertToStringEquals("aXb", res[459].exec("aXb"), 944);
+assertToStringEquals("a\nX\nXb", res[459].exec("a\nX\nXb "), 945);
+assertNull(res[459].exec("** Failers", 946));
+assertNull(res[459].exec("ab  ", 947));
+assertToStringEquals("acb", res[463].exec("acb"), 948);
+assertToStringEquals("ab", res[463].exec("ab"), 949);
+assertNull(res[463].exec("ax{100}b ", 950));
+assertNull(res[463].exec("*** Failers", 951));
+assertNull(res[463].exec("a\nb  ", 952));
+assertNull(res[464].exec("ax{4000}xyb ", 953));
+assertNull(res[464].exec("ax{4000}yb ", 954));
+assertNull(res[464].exec("ax{4000}x{100}yb ", 955));
+assertNull(res[464].exec("*** Failers", 956));
+assertNull(res[464].exec("ax{4000}b ", 957));
+assertNull(res[464].exec("ac\ncb ", 958));
+assertToStringEquals("a\xc0,,\xc0", res[465].exec("a\xc0\x88b"), 959);
+assertToStringEquals("ax,,x", res[466].exec("ax{100}b"), 960);
+assertToStringEquals("a\xc0\x88b,\xc0\x88,b", res[467].exec("a\xc0\x88b"), 961);
+assertToStringEquals("ax{100}b,x{100},b", res[468].exec("ax{100}b"), 962);
+assertToStringEquals("a\xc0\x92,\xc0,\x92", res[469].exec("a\xc0\x92bcd"), 963);
+assertToStringEquals("ax{,x,{", res[470].exec("ax{240}bcd"), 964);
+assertToStringEquals("a\xc0\x92,\xc0,\x92", res[471].exec("a\xc0\x92bcd"), 965);
+assertToStringEquals("ax{,x,{", res[472].exec("ax{240}bcd"), 966);
+assertToStringEquals("a\xc0,,\xc0", res[473].exec("a\xc0\x92bcd"), 967);
+assertToStringEquals("ax,,x", res[474].exec("ax{240}bcd"), 968);
+assertNull(res[475].exec("ax{1234}xyb ", 969));
+assertNull(res[475].exec("ax{1234}x{4321}yb ", 970));
+assertNull(res[475].exec("ax{1234}x{4321}x{3412}b ", 971));
+assertNull(res[475].exec("*** Failers", 972));
+assertNull(res[475].exec("ax{1234}b ", 973));
+assertNull(res[475].exec("ac\ncb ", 974));
+assertToStringEquals("ax{1234}xyb,x{1234}xy", res[476].exec("ax{1234}xyb "), 975);
+assertToStringEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[476].exec("ax{1234}x{4321}yb "), 976);
+assertToStringEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[476].exec("ax{1234}x{4321}x{3412}b "), 977);
+assertToStringEquals("axxxxbcdefghijb,xxxxbcdefghij", res[476].exec("axxxxbcdefghijb "), 978);
+assertToStringEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[476].exec("ax{1234}x{4321}x{3412}x{3421}b "), 979);
+assertNull(res[476].exec("*** Failers", 980));
+assertToStringEquals("ax{1234}b,x{1234}", res[476].exec("ax{1234}b "), 981);
+assertToStringEquals("ax{1234}xyb,x{1234}xy", res[477].exec("ax{1234}xyb "), 982);
+assertToStringEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[477].exec("ax{1234}x{4321}yb "), 983);
+assertToStringEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[477].exec("ax{1234}x{4321}x{3412}b "), 984);
+assertToStringEquals("axxxxb,xxxx", res[477].exec("axxxxbcdefghijb "), 985);
+assertToStringEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[477].exec("ax{1234}x{4321}x{3412}x{3421}b "), 986);
+assertNull(res[477].exec("*** Failers", 987));
+assertToStringEquals("ax{1234}b,x{1234}", res[477].exec("ax{1234}b "), 988);
+assertNull(res[478].exec("ax{1234}xyb ", 989));
+assertNull(res[478].exec("ax{1234}x{4321}yb ", 990));
+assertNull(res[478].exec("ax{1234}x{4321}x{3412}b ", 991));
+assertToStringEquals("axxxxb,xxxx", res[478].exec("axxxxbcdefghijb "), 992);
+assertNull(res[478].exec("ax{1234}x{4321}x{3412}x{3421}b ", 993));
+assertToStringEquals("axbxxb,xbxx", res[478].exec("axbxxbcdefghijb "), 994);
+assertToStringEquals("axxxxxb,xxxxx", res[478].exec("axxxxxbcdefghijb "), 995);
+assertNull(res[478].exec("*** Failers", 996));
+assertNull(res[478].exec("ax{1234}b ", 997));
+assertNull(res[478].exec("axxxxxxbcdefghijb ", 998));
+assertNull(res[479].exec("ax{1234}xyb ", 999));
+assertNull(res[479].exec("ax{1234}x{4321}yb ", 1000));
+assertNull(res[479].exec("ax{1234}x{4321}x{3412}b ", 1001));
+assertToStringEquals("axxxxb,xxxx", res[479].exec("axxxxbcdefghijb "), 1002);
+assertNull(res[479].exec("ax{1234}x{4321}x{3412}x{3421}b ", 1003));
+assertToStringEquals("axbxxb,xbxx", res[479].exec("axbxxbcdefghijb "), 1004);
+assertToStringEquals("axxxxxb,xxxxx", res[479].exec("axxxxxbcdefghijb "), 1005);
+assertNull(res[479].exec("*** Failers", 1006));
+assertNull(res[479].exec("ax{1234}b ", 1007));
+assertNull(res[479].exec("axxxxxxbcdefghijb ", 1008));
+assertNull(res[479].exec("*** Failers", 1009));
+assertNull(res[479].exec("x{100}", 1010));
+assertNull(res[479].exec("aXbcd", 1011));
+assertNull(res[479].exec("ax{100}bcd", 1012));
+assertNull(res[479].exec("ax{100000}bcd", 1013));
+assertNull(res[479].exec("x{100}x{100}x{100}b", 1014));
+assertNull(res[479].exec("*** Failers ", 1015));
+assertNull(res[479].exec("x{100}x{100}b", 1016));
+assertNull(res[479].exec("x{ab} ", 1017));
+assertNull(res[479].exec("\xc2\xab", 1018));
+assertNull(res[479].exec("*** Failers ", 1019));
+assertNull(res[479].exec("\x00{ab}", 1020));
+assertNull(res[479].exec("WXYZ", 1021));
+assertNull(res[479].exec("x{256}XYZ ", 1022));
+assertNull(res[479].exec("*** Failers", 1023));
+assertNull(res[479].exec("XYZ ", 1024));
+assertNull(res[480].exec("Xx{1234}", 1025));
+assertNull(res[481].exec("Xx{1234}YZ", 1026));
+assertToStringEquals("X", res[482].exec("XYZabcdce"), 1027);
+assertToStringEquals("X", res[483].exec("XYZabcde"), 1028);
+assertNull(res[484].exec("Xabcdefg   ", 1029));
+assertNull(res[484].exec("Xx{1234} ", 1030));
+assertNull(res[484].exec("Xx{1234}YZ", 1031));
+assertNull(res[484].exec("Xx{1234}x{512}  ", 1032));
+assertNull(res[484].exec("Xx{1234}x{512}YZ", 1033));
+assertNull(res[485].exec("Xabcdefg   ", 1034));
+assertNull(res[485].exec("Xx{1234} ", 1035));
+assertNull(res[485].exec("Xx{1234}YZ", 1036));
+assertNull(res[485].exec("Xx{1234}x{512}  ", 1037));
+assertToStringEquals("bcd", res[486].exec("bcd"), 1038);
+assertToStringEquals("00}", res[486].exec("x{100}aYx{256}Z "), 1039);
+assertToStringEquals("x{", res[487].exec("x{100}bc"), 1040);
+assertToStringEquals("x{100}bcA", res[488].exec("x{100}bcAa"), 1041);
+assertToStringEquals("x{", res[489].exec("x{100}bca"), 1042);
+assertToStringEquals("bcd", res[490].exec("bcd"), 1043);
+assertToStringEquals("00}", res[490].exec("x{100}aYx{256}Z "), 1044);
+assertToStringEquals("x{", res[491].exec("x{100}bc"), 1045);
+assertToStringEquals("x{100}bc", res[492].exec("x{100}bcAa"), 1046);
+assertToStringEquals("x{", res[493].exec("x{100}bca"), 1047);
+assertNull(res[493].exec("abcd", 1048));
+assertNull(res[493].exec("abcd", 1049));
+assertToStringEquals("x{", res[493].exec("x{100}x{100} "), 1050);
+assertToStringEquals("x{", res[493].exec("x{100}x{100} "), 1051);
+assertToStringEquals("x{", res[493].exec("x{100}x{100}x{100}x{100} "), 1052);
+assertNull(res[493].exec("abce", 1053));
+assertToStringEquals("x{", res[493].exec("x{100}x{100}x{100}x{100} "), 1054);
+assertNull(res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1055));
+assertNull(res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1056));
+assertNull(res[493].exec("abcdx{100}x{100}x{100}x{100} ", 1057));
+assertNull(res[493].exec("abcdx{100}x{100}x{100}XX", 1058));
+assertNull(res[493].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 1059));
+assertNull(res[493].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 1060));
+assertToStringEquals("Xy", res[493].exec("Xyyyax{100}x{100}bXzzz"), 1061);
+assertToStringEquals("X", res[496].exec("1X2"), 1062);
+assertToStringEquals("x", res[496].exec("1x{100}2 "), 1063);
+assertToStringEquals(">X", res[497].exec("> >X Y"), 1064);
+assertToStringEquals(">x", res[497].exec("> >x{100} Y"), 1065);
+assertToStringEquals("1", res[498].exec("x{100}3"), 1066);
+assertToStringEquals(" ", res[499].exec("x{100} X"), 1067);
+assertToStringEquals("abcd", res[500].exec("12abcd34"), 1068);
+assertToStringEquals("*** Failers", res[500].exec("*** Failers"), 1069);
+assertToStringEquals("  ", res[500].exec("1234  "), 1070);
+assertToStringEquals("abc", res[501].exec("12abcd34"), 1071);
+assertToStringEquals("ab", res[501].exec("12ab34"), 1072);
+assertToStringEquals("***", res[501].exec("*** Failers  "), 1073);
+assertNull(res[501].exec("1234", 1074));
+assertToStringEquals("  ", res[501].exec("12a34  "), 1075);
+assertToStringEquals("ab", res[502].exec("12abcd34"), 1076);
+assertToStringEquals("ab", res[502].exec("12ab34"), 1077);
+assertToStringEquals("**", res[502].exec("*** Failers  "), 1078);
+assertNull(res[502].exec("1234", 1079));
+assertToStringEquals("  ", res[502].exec("12a34  "), 1080);
+assertToStringEquals("12", res[503].exec("12abcd34"), 1081);
+assertNull(res[503].exec("*** Failers", 1082));
+assertToStringEquals("12", res[504].exec("12abcd34"), 1083);
+assertToStringEquals("123", res[504].exec("1234abcd"), 1084);
+assertNull(res[504].exec("*** Failers  ", 1085));
+assertNull(res[504].exec("1.4 ", 1086));
+assertToStringEquals("12", res[505].exec("12abcd34"), 1087);
+assertToStringEquals("12", res[505].exec("1234abcd"), 1088);
+assertNull(res[505].exec("*** Failers  ", 1089));
+assertNull(res[505].exec("1.4 ", 1090));
+assertToStringEquals("12abcd34", res[506].exec("12abcd34"), 1091);
+assertToStringEquals("***", res[506].exec("*** Failers"), 1092);
+assertNull(res[506].exec("     ", 1093));
+assertToStringEquals("12a", res[507].exec("12abcd34"), 1094);
+assertToStringEquals("123", res[507].exec("1234abcd"), 1095);
+assertToStringEquals("***", res[507].exec("*** Failers"), 1096);
+assertNull(res[507].exec("       ", 1097));
+assertToStringEquals("12", res[508].exec("12abcd34"), 1098);
+assertToStringEquals("12", res[508].exec("1234abcd"), 1099);
+assertToStringEquals("**", res[508].exec("*** Failers"), 1100);
+assertNull(res[508].exec("       ", 1101));
+assertToStringEquals(">      <", res[509].exec("12>      <34"), 1102);
+assertNull(res[509].exec("*** Failers", 1103));
+assertToStringEquals(">  <", res[510].exec("ab>  <cd"), 1104);
+assertToStringEquals(">   <", res[510].exec("ab>   <ce"), 1105);
+assertNull(res[510].exec("*** Failers", 1106));
+assertNull(res[510].exec("ab>    <cd ", 1107));
+assertToStringEquals(">  <", res[511].exec("ab>  <cd"), 1108);
+assertToStringEquals(">   <", res[511].exec("ab>   <ce"), 1109);
+assertNull(res[511].exec("*** Failers", 1110));
+assertNull(res[511].exec("ab>    <cd ", 1111));
+assertToStringEquals("12", res[512].exec("12      34"), 1112);
+assertToStringEquals("Failers", res[512].exec("*** Failers"), 1113);
+assertNull(res[512].exec("+++=*! ", 1114));
+assertToStringEquals("ab", res[513].exec("ab  cd"), 1115);
+assertToStringEquals("abc", res[513].exec("abcd ce"), 1116);
+assertToStringEquals("Fai", res[513].exec("*** Failers"), 1117);
+assertNull(res[513].exec("a.b.c", 1118));
+assertToStringEquals("ab", res[514].exec("ab  cd"), 1119);
+assertToStringEquals("ab", res[514].exec("abcd ce"), 1120);
+assertToStringEquals("Fa", res[514].exec("*** Failers"), 1121);
+assertNull(res[514].exec("a.b.c", 1122));
+assertToStringEquals("====", res[515].exec("12====34"), 1123);
+assertToStringEquals("*** ", res[515].exec("*** Failers"), 1124);
+assertToStringEquals(" ", res[515].exec("abcd "), 1125);
+assertToStringEquals("===", res[516].exec("ab====cd"), 1126);
+assertToStringEquals("==", res[516].exec("ab==cd"), 1127);
+assertToStringEquals("***", res[516].exec("*** Failers"), 1128);
+assertNull(res[516].exec("a.b.c", 1129));
+assertToStringEquals("==", res[517].exec("ab====cd"), 1130);
+assertToStringEquals("==", res[517].exec("ab==cd"), 1131);
+assertToStringEquals("**", res[517].exec("*** Failers"), 1132);
+assertNull(res[517].exec("a.b.c", 1133));
+assertNull(res[517].exec("x{100}", 1134));
+assertNull(res[517].exec("Zx{100}", 1135));
+assertNull(res[517].exec("x{100}Z", 1136));
+assertToStringEquals("**", res[517].exec("*** Failers "), 1137);
+assertNull(res[517].exec("Zx{100}", 1138));
+assertNull(res[517].exec("x{100}", 1139));
+assertNull(res[517].exec("x{100}Z", 1140));
+assertToStringEquals("**", res[517].exec("*** Failers "), 1141);
+assertNull(res[517].exec("abcx{200}X", 1142));
+assertNull(res[517].exec("abcx{100}X ", 1143));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1144);
+assertToStringEquals("  ", res[517].exec("X  "), 1145);
+assertNull(res[517].exec("abcx{200}X", 1146));
+assertNull(res[517].exec("abcx{100}X ", 1147));
+assertNull(res[517].exec("abQX ", 1148));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1149);
+assertToStringEquals("  ", res[517].exec("X  "), 1150);
+assertNull(res[517].exec("abcx{100}x{200}x{100}X", 1151));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1152);
+assertNull(res[517].exec("abcx{200}X", 1153));
+assertToStringEquals("  ", res[517].exec("X  "), 1154);
+assertNull(res[517].exec("AX", 1155));
+assertNull(res[517].exec("x{150}X", 1156));
+assertNull(res[517].exec("x{500}X ", 1157));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1158);
+assertNull(res[517].exec("x{100}X", 1159));
+assertToStringEquals("  ", res[517].exec("x{200}X   "), 1160);
+assertNull(res[517].exec("AX", 1161));
+assertNull(res[517].exec("x{150}X", 1162));
+assertNull(res[517].exec("x{500}X ", 1163));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1164);
+assertNull(res[517].exec("x{100}X", 1165));
+assertToStringEquals("  ", res[517].exec("x{200}X   "), 1166);
+assertNull(res[517].exec("QX ", 1167));
+assertNull(res[517].exec("AX", 1168));
+assertNull(res[517].exec("x{500}X ", 1169));
+assertToStringEquals("**", res[517].exec("*** Failers"), 1170);
+assertNull(res[517].exec("x{100}X", 1171));
+assertNull(res[517].exec("x{150}X", 1172));
+assertToStringEquals("  ", res[517].exec("x{200}X   "), 1173);
+assertNull(res[518].exec("aXb", 1174));
+assertNull(res[518].exec("a\nb", 1175));
+assertNull(res[519].exec("aXb", 1176));
+assertNull(res[519].exec("a\nb", 1177));
+assertNull(res[519].exec("*** Failers ", 1178));
+assertNull(res[519].exec("ax{100}b ", 1179));
+assertNull(res[519].exec("z", 1180));
+assertNull(res[519].exec("Z ", 1181));
+assertNull(res[519].exec("x{100}", 1182));
+assertNull(res[519].exec("*** Failers", 1183));
+assertNull(res[519].exec("x{102}", 1184));
+assertNull(res[519].exec("y    ", 1185));
+assertToStringEquals("\xff", res[520].exec(">\xff<"), 1186);
+assertNull(res[521].exec(">x{ff}<", 1187));
+assertToStringEquals("X", res[522].exec("XYZ"), 1188);
+assertToStringEquals("X", res[523].exec("XYZ"), 1189);
+assertToStringEquals("x", res[523].exec("x{123} "), 1190);
+assertToStringEquals(",", res[528].exec("catac"), 1191);
+assertToStringEquals(",", res[528].exec("ax{256}a "), 1192);
+assertToStringEquals(",", res[528].exec("x{85}"), 1193);
+assertToStringEquals(",", res[528].exec("\u1234 "), 1194);
+assertToStringEquals(",", res[528].exec("\u1234 "), 1195);
+assertToStringEquals(",", res[528].exec("abcdefg"), 1196);
+assertToStringEquals(",", res[528].exec("ab"), 1197);
+assertToStringEquals(",", res[528].exec("a "), 1198);
+assertToStringEquals("Ax", res[529].exec("Ax{a3}BC"), 1199);
+assertToStringEquals("Ax", res[530].exec("Ax{a3}BC"), 1200);
+assertToStringEquals("}=", res[531].exec("+x{a3}== "), 1201);
+assertToStringEquals("}=", res[532].exec("+x{a3}== "), 1202);
+assertToStringEquals("x", res[533].exec("x{442}x{435}x{441}x{442}"), 1203);
+assertToStringEquals("x", res[534].exec("x{442}x{435}x{441}x{442}"), 1204);
+assertToStringEquals("x", res[535].exec("x{442}x{435}x{441}x{442}"), 1205);
+assertToStringEquals("x", res[536].exec("x{442}x{435}x{441}x{442}"), 1206);
+assertToStringEquals("{", res[537].exec("x{2442}x{2435}x{2441}x{2442}"), 1207);
+assertToStringEquals("{", res[538].exec("x{2442}x{2435}x{2441}x{2442}"), 1208);
+assertToStringEquals("abc\n\x0dx{442}x{435}x{441}x{442}xyz ", res[539].exec("abc\n\x0dx{442}x{435}x{441}x{442}xyz "), 1209);
+assertToStringEquals("x{442}x{435}x{441}x{442}", res[539].exec("x{442}x{435}x{441}x{442}"), 1210);
+assertToStringEquals("c d", res[540].exec("abc defx{442}x{443}xyz\npqr"), 1211);
+assertToStringEquals("c d", res[541].exec("abc defx{442}x{443}xyz\npqr"), 1212);
+assertNull(res[542].exec("+x{2442}", 1213));
+assertNull(res[543].exec("+x{2442}", 1214));
+assertNull(res[544].exec("Ax{442}", 1215));
+assertNull(res[545].exec("Ax{442}", 1216));
+assertNull(res[546].exec("Ax{442}", 1217));
+assertNull(res[547].exec("Ax{442}", 1218));
+assertNull(res[548].exec("\x19x{e01ff}", 1219));
+assertNull(res[549].exec("Ax{422}", 1220));
+assertNull(res[550].exec("x{19}x{e01ff}", 1221));
+assertNull(res[551].exec("Ax{442}", 1222));
+assertNull(res[552].exec("Ax{442}", 1223));
+assertNull(res[553].exec("ax{442}", 1224));
+assertNull(res[554].exec("+x{2442}", 1225));
+assertNull(res[555].exec("Mx{442}", 1226));
+assertToStringEquals("abc", res[556].exec("abc"), 1227);
+assertToStringEquals("abc", res[557].exec("abc"), 1228);
+assertToStringEquals("abc", res[558].exec("abc"), 1229);
+assertToStringEquals("abc", res[559].exec("abc"), 1230);
+assertNull(res[560].exec("x{100}ax{1234}bcd", 1231));
+assertNull(res[562].exec("x{0041}x{2262}x{0391}x{002e}", 1232));
+assertNull(res[562].exec("x{D55c}x{ad6d}x{C5B4} ", 1233));
+assertNull(res[562].exec("x{65e5}x{672c}x{8a9e}", 1234));
+assertToStringEquals("{861}X", res[563].exec("x{212ab}x{212ab}x{212ab}x{861}X"), 1235);
+assertToStringEquals("x{2", res[564].exec("x{212ab}x{212ab}x{212ab}x{861}"), 1236);
+assertToStringEquals("x{c", res[564].exec("x{c0}b"), 1237);
+assertToStringEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1238);
+assertToStringEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1239);
+assertToStringEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1240);
+assertToStringEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1241);
+assertToStringEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1242);
+assertToStringEquals("ax{", res[564].exec("ax{c0}aaaa/ "), 1243);
+assertToStringEquals("ax{", res[564].exec("ax{c0}ax{c0}aaa/ "), 1244);
+assertToStringEquals("Sho", res[564].exec("Should produce an error diagnostic"), 1245);
+assertNull(res[565].exec("Xx{1234}", 1246));
+assertNull(res[565].exec("X\nabc ", 1247));
+assertToStringEquals("b", res[566].exec("bar"), 1248);
+assertNull(res[566].exec("*** Failers", 1249));
+assertNull(res[566].exec("c", 1250));
+assertNull(res[566].exec("x{ff}", 1251));
+assertNull(res[566].exec("x{100}  ", 1252));
+assertToStringEquals("c", res[567].exec("c"), 1253);
+assertToStringEquals("x", res[567].exec("x{ff}"), 1254);
+assertToStringEquals("x", res[567].exec("x{100}  "), 1255);
+assertToStringEquals("*", res[567].exec("*** Failers "), 1256);
+assertNull(res[567].exec("aaa", 1257));
+assertToStringEquals("x", res[568].exec("x{f1}"), 1258);
+assertToStringEquals("x", res[568].exec("x{bf}"), 1259);
+assertToStringEquals("x", res[568].exec("x{100}"), 1260);
+assertToStringEquals("x", res[568].exec("x{1000}   "), 1261);
+assertToStringEquals("*", res[568].exec("*** Failers"), 1262);
+assertToStringEquals("x", res[568].exec("x{c0} "), 1263);
+assertToStringEquals("x", res[568].exec("x{f0} "), 1264);
+assertToStringEquals("1", res[568].exec("1234"), 1265);
+assertToStringEquals("\"", res[568].exec("\"1234\" "), 1266);
+assertToStringEquals("x", res[568].exec("x{100}1234"), 1267);
+assertToStringEquals("\"", res[568].exec("\"x{100}1234\"  "), 1268);
+assertToStringEquals("x", res[568].exec("x{100}x{100}12ab "), 1269);
+assertToStringEquals("x", res[568].exec("x{100}x{100}\"12\" "), 1270);
+assertToStringEquals("*", res[568].exec("*** Failers "), 1271);
+assertToStringEquals("x", res[568].exec("x{100}x{100}abcd"), 1272);
+assertToStringEquals("A", res[568].exec("A"), 1273);
+assertToStringEquals("x", res[568].exec("x{100}"), 1274);
+assertToStringEquals("Z", res[568].exec("Zx{100}"), 1275);
+assertToStringEquals("x", res[568].exec("x{100}Z"), 1276);
+assertToStringEquals("*", res[568].exec("*** Failers "), 1277);
+assertToStringEquals("Z", res[568].exec("Zx{100}"), 1278);
+assertToStringEquals("x", res[568].exec("x{100}"), 1279);
+assertToStringEquals("x", res[568].exec("x{100}Z"), 1280);
+assertToStringEquals("*", res[568].exec("*** Failers "), 1281);
+assertToStringEquals("x", res[568].exec("x{100}"), 1282);
+assertToStringEquals("x", res[568].exec("x{104}"), 1283);
+assertToStringEquals("*", res[568].exec("*** Failers"), 1284);
+assertToStringEquals("x", res[568].exec("x{105}"), 1285);
+assertToStringEquals("x", res[568].exec("x{ff}    "), 1286);
+assertToStringEquals("x", res[568].exec("x{100}"), 1287);
+assertToStringEquals("\u0100", res[568].exec("\u0100 "), 1288);
+assertToStringEquals("\xff", res[569].exec(">\xff<"), 1289);
+assertNull(res[570].exec(">x{ff}<", 1290));
+assertToStringEquals("\xd6", res[572].exec("\xd6 # Matches without Study"), 1291);
+assertToStringEquals("x", res[572].exec("x{d6}"), 1292);
+assertToStringEquals("\xd6", res[572].exec("\xd6 <-- Same with Study"), 1293);
+assertToStringEquals("x", res[572].exec("x{d6}"), 1294);
+assertToStringEquals("\xd6", res[572].exec("\xd6 # Matches without Study"), 1295);
+assertToStringEquals("x", res[572].exec("x{d6} "), 1296);
+assertToStringEquals("\xd6", res[572].exec("\xd6 <-- Same with Study"), 1297);
+assertToStringEquals("x", res[572].exec("x{d6} "), 1298);
+assertToStringEquals("\ufffd", res[572].exec("\ufffd]"), 1299);
+assertToStringEquals("\ufffd", res[572].exec("\ufffd"), 1300);
+assertToStringEquals("\ufffd", res[572].exec("\ufffd\ufffd\ufffd"), 1301);
+assertToStringEquals("\ufffd", res[572].exec("\ufffd\ufffd\ufffd?"), 1302);
+assertNull(res[573].exec("\xc0\x80", 1303));
+assertNull(res[573].exec("\xc1\x8f ", 1304));
+assertNull(res[573].exec("\xe0\x9f\x80", 1305));
+assertNull(res[573].exec("\xf0\x8f\x80\x80 ", 1306));
+assertNull(res[573].exec("\xf8\x87\x80\x80\x80  ", 1307));
+assertNull(res[573].exec("\xfc\x83\x80\x80\x80\x80", 1308));
+assertNull(res[573].exec("\xfe\x80\x80\x80\x80\x80  ", 1309));
+assertNull(res[573].exec("\xff\x80\x80\x80\x80\x80  ", 1310));
+assertNull(res[573].exec("\xc3\x8f", 1311));
+assertNull(res[573].exec("\xe0\xaf\x80", 1312));
+assertNull(res[573].exec("\xe1\x80\x80", 1313));
+assertNull(res[573].exec("\xf0\x9f\x80\x80 ", 1314));
+assertNull(res[573].exec("\xf1\x8f\x80\x80 ", 1315));
+assertNull(res[573].exec("\xf8\x88\x80\x80\x80  ", 1316));
+assertNull(res[573].exec("\xf9\x87\x80\x80\x80  ", 1317));
+assertNull(res[573].exec("\xfc\x84\x80\x80\x80\x80", 1318));
+assertNull(res[573].exec("\xfd\x83\x80\x80\x80\x80", 1319));
+assertNull(res[573].exec("?\xf8\x88\x80\x80\x80  ", 1320));
+assertNull(res[573].exec("?\xf9\x87\x80\x80\x80  ", 1321));
+assertNull(res[573].exec("?\xfc\x84\x80\x80\x80\x80", 1322));
+assertNull(res[573].exec("?\xfd\x83\x80\x80\x80\x80", 1323));
+assertToStringEquals(".", res[574].exec("A.B"), 1324);
+assertToStringEquals("{", res[574].exec("Ax{100}B "), 1325);
+assertToStringEquals("x", res[575].exec("x{100}X   "), 1326);
+assertToStringEquals("a", res[575].exec("ax{1234}b"), 1327);
+assertNull(res[577].exec("AxxB     ", 1328));
+assertToStringEquals("abc1", res[578].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 x{0085}abc7 x{2028}abc8 x{2029}abc9 JUNK"), 1329);
+assertToStringEquals("abc1", res[579].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6x{0085} abc7x{2028} abc8x{2029} abc9"), 1330);
+assertNull(res[580].exec("a\nb", 1331));
+assertNull(res[580].exec("a\x0db", 1332));
+assertNull(res[580].exec("a\x0d\nb", 1333));
+assertNull(res[580].exec("a\x0bb", 1334));
+assertNull(res[580].exec("a\x0cb", 1335));
+assertNull(res[580].exec("ax{85}b   ", 1336));
+assertNull(res[580].exec("ax{2028}b ", 1337));
+assertNull(res[580].exec("ax{2029}b ", 1338));
+assertNull(res[580].exec("** Failers", 1339));
+assertNull(res[580].exec("a\n\x0db    ", 1340));
+assertToStringEquals("ab", res[581].exec("ab"), 1341);
+assertNull(res[581].exec("a\nb", 1342));
+assertNull(res[581].exec("a\x0db", 1343));
+assertNull(res[581].exec("a\x0d\nb", 1344));
+assertNull(res[581].exec("a\x0bb", 1345));
+assertNull(res[581].exec("a\x0cx{2028}x{2029}b", 1346));
+assertNull(res[581].exec("ax{85}b   ", 1347));
+assertNull(res[581].exec("a\n\x0db    ", 1348));
+assertNull(res[581].exec("a\n\x0dx{85}\x0cb ", 1349));
+assertNull(res[582].exec("a\nb", 1350));
+assertNull(res[582].exec("a\x0db", 1351));
+assertNull(res[582].exec("a\x0d\nb", 1352));
+assertNull(res[582].exec("a\x0bb", 1353));
+assertNull(res[582].exec("a\x0cx{2028}x{2029}b", 1354));
+assertNull(res[582].exec("ax{85}b   ", 1355));
+assertNull(res[582].exec("a\n\x0db    ", 1356));
+assertNull(res[582].exec("a\n\x0dx{85}\x0cb ", 1357));
+assertNull(res[582].exec("** Failers", 1358));
+assertNull(res[582].exec("ab  ", 1359));
+assertNull(res[583].exec("a\nb", 1360));
+assertNull(res[583].exec("a\n\x0db", 1361));
+assertNull(res[583].exec("a\n\x0dx{85}b", 1362));
+assertNull(res[583].exec("a\x0d\n\x0d\nb ", 1363));
+assertNull(res[583].exec("a\x0d\n\x0d\n\x0d\nb ", 1364));
+assertNull(res[583].exec("a\n\x0d\n\x0db", 1365));
+assertNull(res[583].exec("a\n\n\x0d\nb ", 1366));
+assertNull(res[583].exec("** Failers", 1367));
+assertNull(res[583].exec("a\n\n\n\x0db", 1368));
+assertNull(res[583].exec("a\x0d", 1369));
+assertNull(res[584].exec("X X\n", 1370));
+assertNull(res[584].exec("X\x09X\x0b", 1371));
+assertNull(res[584].exec("** Failers", 1372));
+assertNull(res[584].exec("x{a0} X\n   ", 1373));
+assertNull(res[585].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 1374));
+assertNull(res[585].exec("\x09 x{a0}\n\x0b\x0c\x0d\n", 1375));
+assertNull(res[585].exec("\x09 x{a0}\n\x0b\x0c", 1376));
+assertNull(res[585].exec("** Failers ", 1377));
+assertNull(res[585].exec("\x09 x{a0}\n\x0b", 1378));
+assertNull(res[585].exec(" ", 1379));
+assertNull(res[586].exec("x{3001}x{3000}x{2030}x{2028}", 1380));
+assertNull(res[586].exec("Xx{180e}Xx{85}", 1381));
+assertNull(res[586].exec("** Failers", 1382));
+assertNull(res[586].exec("x{2009} X\n   ", 1383));
+assertNull(res[587].exec("x{1680}x{180e}x{2007}Xx{2028}x{2029}\x0c\x0d\n", 1384));
+assertNull(res[587].exec("\x09x{205f}x{a0}\nx{2029}\x0cx{2028}\n", 1385));
+assertNull(res[587].exec("\x09 x{202f}\n\x0b\x0c", 1386));
+assertNull(res[587].exec("** Failers ", 1387));
+assertNull(res[587].exec("\x09x{200a}x{a0}x{2028}\x0b", 1388));
+assertNull(res[587].exec(" ", 1389));
+assertNull(res[588].exec(">x{1680}", 1390));
+assertNull(res[589].exec(">x{1680}x{180e}x{2000}x{2003}x{200a}x{202f}x{205f}x{3000}<", 1391));
+assertToStringEquals("x{1ec5} ", res[593].exec("x{1ec5} "), 1392);
+assertNull(res[594].exec("x{0}x{d7ff}x{e000}x{10ffff}", 1393));
+assertNull(res[594].exec("x{d800}", 1394));
+assertNull(res[594].exec("x{d800}?", 1395));
+assertNull(res[594].exec("x{da00}", 1396));
+assertNull(res[594].exec("x{da00}?", 1397));
+assertNull(res[594].exec("x{dfff}", 1398));
+assertNull(res[594].exec("x{dfff}?", 1399));
+assertNull(res[594].exec("x{110000}    ", 1400));
+assertNull(res[594].exec("x{110000}?    ", 1401));
+assertNull(res[594].exec("x{2000000} ", 1402));
+assertNull(res[594].exec("x{2000000}? ", 1403));
+assertNull(res[594].exec("x{7fffffff} ", 1404));
+assertNull(res[594].exec("x{7fffffff}? ", 1405));
+assertNull(res[595].exec("a\x0db", 1406));
+assertNull(res[595].exec("a\nb", 1407));
+assertNull(res[595].exec("a\x0d\nb", 1408));
+assertNull(res[595].exec("** Failers", 1409));
+assertNull(res[595].exec("ax{85}b", 1410));
+assertNull(res[595].exec("a\x0bb     ", 1411));
+assertNull(res[596].exec("a\x0db", 1412));
+assertNull(res[596].exec("a\nb", 1413));
+assertNull(res[596].exec("a\x0d\nb", 1414));
+assertNull(res[596].exec("ax{85}b", 1415));
+assertNull(res[596].exec("a\x0bb     ", 1416));
+assertNull(res[596].exec("** Failers ", 1417));
+assertNull(res[596].exec("ax{85}b<bsr_anycrlf>", 1418));
+assertNull(res[596].exec("a\x0bb<bsr_anycrlf>", 1419));
+assertNull(res[597].exec("a\x0db", 1420));
+assertNull(res[597].exec("a\nb", 1421));
+assertNull(res[597].exec("a\x0d\nb", 1422));
+assertNull(res[597].exec("** Failers", 1423));
+assertNull(res[597].exec("ax{85}b", 1424));
+assertNull(res[597].exec("a\x0bb     ", 1425));
+assertNull(res[598].exec("a\x0db", 1426));
+assertNull(res[598].exec("a\nb", 1427));
+assertNull(res[598].exec("a\x0d\nb", 1428));
+assertNull(res[598].exec("ax{85}b", 1429));
+assertNull(res[598].exec("a\x0bb     ", 1430));
+assertNull(res[598].exec("** Failers ", 1431));
+assertNull(res[598].exec("ax{85}b<bsr_anycrlf>", 1432));
+assertNull(res[598].exec("a\x0bb<bsr_anycrlf>", 1433));
+assertToStringEquals("QQQx{2029}ABCaXYZ=!bPQR", res[599].exec("QQQx{2029}ABCaXYZ=!bPQR"), 1434);
+assertNull(res[599].exec("** Failers", 1435));
+assertNull(res[599].exec("ax{2029}b", 1436));
+assertNull(res[599].exec("a\xe2\x80\xa9b ", 1437));
+assertNull(res[600].exec("ax{1234}b", 1438));
+assertToStringEquals("a\nb", res[600].exec("a\nb "), 1439);
+assertNull(res[600].exec("** Failers", 1440));
+assertNull(res[600].exec("ab  ", 1441));
+assertToStringEquals("aXb", res[601].exec("aXb"), 1442);
+assertToStringEquals("a\nX\nXx{1234}b", res[601].exec("a\nX\nXx{1234}b "), 1443);
+assertNull(res[601].exec("** Failers", 1444));
+assertNull(res[601].exec("ab  ", 1445));
+assertNull(res[601].exec("x{de}x{de}", 1446));
+assertNull(res[601].exec("x{123} ", 1447));
+assertToStringEquals("X", res[602].exec("Ax{1ec5}ABCXYZ"), 1448);
+assertNull(res[604].exec("x{c0}x{30f}x{660}x{66c}x{f01}x{1680}<", 1449));
+assertNull(res[604].exec("\npx{300}9!$ < ", 1450));
+assertNull(res[604].exec("** Failers ", 1451));
+assertNull(res[604].exec("apx{300}9!$ < ", 1452));
+assertNull(res[605].exec("X", 1453));
+assertNull(res[605].exec("** Failers ", 1454));
+assertNull(res[605].exec("", 1455));
+assertNull(res[606].exec("9", 1456));
+assertNull(res[606].exec("** Failers ", 1457));
+assertNull(res[606].exec("x{c0}", 1458));
+assertNull(res[607].exec("X", 1459));
+assertNull(res[607].exec("** Failers ", 1460));
+assertNull(res[607].exec("x{30f}", 1461));
+assertNull(res[608].exec("X", 1462));
+assertNull(res[608].exec("** Failers ", 1463));
+assertNull(res[608].exec("x{660}", 1464));
+assertNull(res[609].exec("X", 1465));
+assertNull(res[609].exec("** Failers ", 1466));
+assertNull(res[609].exec("x{66c}", 1467));
+assertNull(res[610].exec("X", 1468));
+assertNull(res[610].exec("** Failers ", 1469));
+assertNull(res[610].exec("x{f01}", 1470));
+assertNull(res[611].exec("X", 1471));
+assertNull(res[611].exec("** Failers ", 1472));
+assertNull(res[611].exec("x{1680}", 1473));
+assertNull(res[612].exec("x{017}", 1474));
+assertNull(res[612].exec("x{09f} ", 1475));
+assertNull(res[612].exec("** Failers", 1476));
+assertNull(res[612].exec("x{0600} ", 1477));
+assertNull(res[613].exec("x{601}", 1478));
+assertNull(res[613].exec("** Failers", 1479));
+assertNull(res[613].exec("x{09f} ", 1480));
+assertNull(res[614].exec("x{e0000}", 1481));
+assertNull(res[614].exec("** Failers", 1482));
+assertNull(res[614].exec("x{09f} ", 1483));
+assertNull(res[615].exec("x{f8ff}", 1484));
+assertNull(res[615].exec("** Failers", 1485));
+assertNull(res[615].exec("x{09f} ", 1486));
+assertNull(res[616].exec("?x{dfff}", 1487));
+assertNull(res[616].exec("** Failers", 1488));
+assertNull(res[616].exec("x{09f} ", 1489));
+assertNull(res[617].exec("a", 1490));
+assertNull(res[617].exec("** Failers ", 1491));
+assertNull(res[617].exec("Z", 1492));
+assertNull(res[617].exec("x{e000}  ", 1493));
+assertNull(res[618].exec("x{2b0}", 1494));
+assertNull(res[618].exec("** Failers", 1495));
+assertNull(res[618].exec("a ", 1496));
+assertNull(res[619].exec("x{1bb}", 1497));
+assertNull(res[619].exec("x{3400}", 1498));
+assertNull(res[619].exec("x{3401}", 1499));
+assertNull(res[619].exec("x{4d00}", 1500));
+assertNull(res[619].exec("x{4db4}", 1501));
+assertNull(res[619].exec("x{4db5}     ", 1502));
+assertNull(res[619].exec("** Failers", 1503));
+assertNull(res[619].exec("a ", 1504));
+assertNull(res[619].exec("x{2b0}", 1505));
+assertNull(res[619].exec("x{4db6} ", 1506));
+assertNull(res[620].exec("x{1c5}", 1507));
+assertNull(res[620].exec("** Failers", 1508));
+assertNull(res[620].exec("a ", 1509));
+assertNull(res[620].exec("x{2b0}", 1510));
+assertNull(res[621].exec("A", 1511));
+assertNull(res[621].exec("** Failers", 1512));
+assertNull(res[621].exec("x{2b0}", 1513));
+assertNull(res[622].exec("x{903}", 1514));
+assertNull(res[622].exec("** Failers", 1515));
+assertNull(res[622].exec("X", 1516));
+assertNull(res[622].exec("x{300}", 1517));
+assertNull(res[622].exec("   ", 1518));
+assertNull(res[623].exec("x{488}", 1519));
+assertNull(res[623].exec("** Failers", 1520));
+assertNull(res[623].exec("X", 1521));
+assertNull(res[623].exec("x{903}", 1522));
+assertNull(res[623].exec("x{300}", 1523));
+assertNull(res[624].exec("x{300}", 1524));
+assertNull(res[624].exec("** Failers", 1525));
+assertNull(res[624].exec("X", 1526));
+assertNull(res[624].exec("x{903}", 1527));
+assertNull(res[624].exec("0123456789x{660}x{661}x{662}x{663}x{664}x{665}x{666}x{667}x{668}x{669}x{66a}", 1528));
+assertNull(res[624].exec("x{6f0}x{6f1}x{6f2}x{6f3}x{6f4}x{6f5}x{6f6}x{6f7}x{6f8}x{6f9}x{6fa}", 1529));
+assertNull(res[624].exec("x{966}x{967}x{968}x{969}x{96a}x{96b}x{96c}x{96d}x{96e}x{96f}x{970}", 1530));
+assertNull(res[624].exec("** Failers", 1531));
+assertNull(res[624].exec("X", 1532));
+assertNull(res[625].exec("x{16ee}", 1533));
+assertNull(res[625].exec("** Failers", 1534));
+assertNull(res[625].exec("X", 1535));
+assertNull(res[625].exec("x{966}", 1536));
+assertNull(res[626].exec("x{b2}", 1537));
+assertNull(res[626].exec("x{b3}", 1538));
+assertNull(res[626].exec("** Failers", 1539));
+assertNull(res[626].exec("X", 1540));
+assertNull(res[626].exec("x{16ee}", 1541));
+assertNull(res[627].exec("_", 1542));
+assertNull(res[627].exec("x{203f}", 1543));
+assertNull(res[627].exec("** Failers", 1544));
+assertNull(res[627].exec("X", 1545));
+assertNull(res[627].exec("-", 1546));
+assertNull(res[627].exec("x{58a}", 1547));
+assertNull(res[628].exec("-", 1548));
+assertNull(res[628].exec("x{58a}", 1549));
+assertNull(res[628].exec("** Failers", 1550));
+assertNull(res[628].exec("X", 1551));
+assertNull(res[628].exec("x{203f}", 1552));
+assertNull(res[629].exec(")", 1553));
+assertNull(res[629].exec("]", 1554));
+assertNull(res[629].exec("}", 1555));
+assertNull(res[629].exec("x{f3b}", 1556));
+assertNull(res[629].exec("** Failers", 1557));
+assertNull(res[629].exec("X", 1558));
+assertNull(res[629].exec("x{203f}", 1559));
+assertNull(res[629].exec("(", 1560));
+assertNull(res[629].exec("[", 1561));
+assertNull(res[629].exec("{", 1562));
+assertNull(res[629].exec("x{f3c}", 1563));
+assertNull(res[630].exec("x{bb}", 1564));
+assertNull(res[630].exec("x{2019}", 1565));
+assertNull(res[630].exec("** Failers", 1566));
+assertNull(res[630].exec("X", 1567));
+assertNull(res[630].exec("x{203f}", 1568));
+assertNull(res[631].exec("x{ab}", 1569));
+assertNull(res[631].exec("x{2018}", 1570));
+assertNull(res[631].exec("** Failers", 1571));
+assertNull(res[631].exec("X", 1572));
+assertNull(res[631].exec("x{203f}", 1573));
+assertNull(res[632].exec("!", 1574));
+assertNull(res[632].exec("x{37e}", 1575));
+assertNull(res[632].exec("** Failers", 1576));
+assertNull(res[632].exec("X", 1577));
+assertNull(res[632].exec("x{203f}", 1578));
+assertNull(res[633].exec("(", 1579));
+assertNull(res[633].exec("[", 1580));
+assertNull(res[633].exec("{", 1581));
+assertNull(res[633].exec("x{f3c}", 1582));
+assertNull(res[633].exec("** Failers", 1583));
+assertNull(res[633].exec("X", 1584));
+assertNull(res[633].exec(")", 1585));
+assertNull(res[633].exec("]", 1586));
+assertNull(res[633].exec("}", 1587));
+assertNull(res[633].exec("x{f3b}", 1588));
+assertNull(res[633].exec("$x{a2}x{a3}x{a4}x{a5}x{a6}", 1589));
+assertNull(res[633].exec("x{9f2}", 1590));
+assertNull(res[633].exec("** Failers", 1591));
+assertNull(res[633].exec("X", 1592));
+assertNull(res[633].exec("x{2c2}", 1593));
+assertNull(res[634].exec("x{2c2}", 1594));
+assertNull(res[634].exec("** Failers", 1595));
+assertNull(res[634].exec("X", 1596));
+assertNull(res[634].exec("x{9f2}", 1597));
+assertNull(res[634].exec("+<|~x{ac}x{2044}", 1598));
+assertNull(res[634].exec("** Failers", 1599));
+assertNull(res[634].exec("X", 1600));
+assertNull(res[634].exec("x{9f2}", 1601));
+assertNull(res[635].exec("x{a6}", 1602));
+assertNull(res[635].exec("x{482} ", 1603));
+assertNull(res[635].exec("** Failers", 1604));
+assertNull(res[635].exec("X", 1605));
+assertNull(res[635].exec("x{9f2}", 1606));
+assertNull(res[636].exec("x{2028}", 1607));
+assertNull(res[636].exec("** Failers", 1608));
+assertNull(res[636].exec("X", 1609));
+assertNull(res[636].exec("x{2029}", 1610));
+assertNull(res[637].exec("x{2029}", 1611));
+assertNull(res[637].exec("** Failers", 1612));
+assertNull(res[637].exec("X", 1613));
+assertNull(res[637].exec("x{2028}", 1614));
+assertNull(res[638].exec("\\ \\", 1615));
+assertNull(res[638].exec("x{a0}", 1616));
+assertNull(res[638].exec("x{1680}", 1617));
+assertNull(res[638].exec("x{180e}", 1618));
+assertNull(res[638].exec("x{2000}", 1619));
+assertNull(res[638].exec("x{2001}     ", 1620));
+assertNull(res[638].exec("** Failers", 1621));
+assertNull(res[638].exec("x{2028}", 1622));
+assertNull(res[638].exec("x{200d} ", 1623));
+assertNull(res[638].exec("  x{660}x{661}x{662}ABC", 1624));
+assertNull(res[638].exec("  x{660}x{661}x{662}ABC", 1625));
+assertNull(res[639].exec("  x{660}x{661}x{662}ABC", 1626));
+assertNull(res[640].exec("  x{660}x{661}x{662}ABC", 1627));
+assertNull(res[641].exec("  x{660}x{661}x{662}ABC", 1628));
+assertNull(res[642].exec("  x{660}x{661}x{662}ABC", 1629));
+assertNull(res[643].exec("  x{660}x{661}x{662}ABC", 1630));
+assertNull(res[644].exec("  x{660}x{661}x{662}ABC", 1631));
+assertNull(res[645].exec("  x{660}x{661}x{662}ABC", 1632));
+assertNull(res[646].exec("  x{660}x{661}x{662}ABC", 1633));
+assertNull(res[647].exec("  x{660}x{661}x{662}ABC", 1634));
+assertNull(res[647].exec("  x{660}x{661}x{662}ABC", 1635));
+assertNull(res[647].exec("  x{660}x{661}x{662}ABC", 1636));
+assertNull(res[647].exec("  ** Failers", 1637));
+assertNull(res[647].exec("  x{660}x{661}x{662}ABC", 1638));
+assertNull(res[648].exec("A", 1639));
+assertNull(res[648].exec("ax{10a0}B ", 1640));
+assertNull(res[648].exec("** Failers ", 1641));
+assertNull(res[648].exec("a", 1642));
+assertNull(res[648].exec("x{1d00}  ", 1643));
+assertNull(res[649].exec("1234", 1644));
+assertNull(res[649].exec("** Failers", 1645));
+assertNull(res[649].exec("ABC ", 1646));
+assertNull(res[650].exec("1234", 1647));
+assertNull(res[650].exec("** Failers", 1648));
+assertNull(res[650].exec("ABC ", 1649));
+assertNull(res[650].exec("A2XYZ", 1650));
+assertNull(res[650].exec("123A5XYZPQR", 1651));
+assertNull(res[650].exec("ABAx{660}XYZpqr", 1652));
+assertNull(res[650].exec("** Failers", 1653));
+assertNull(res[650].exec("AXYZ", 1654));
+assertNull(res[650].exec("XYZ     ", 1655));
+assertNull(res[650].exec("1XYZ", 1656));
+assertNull(res[650].exec("AB=XYZ.. ", 1657));
+assertNull(res[650].exec("XYZ ", 1658));
+assertNull(res[650].exec("** Failers", 1659));
+assertNull(res[650].exec("WXYZ ", 1660));
+assertNull(res[655].exec("1234", 1661));
+assertNull(res[655].exec("1234", 1662));
+assertNull(res[655].exec("12-34", 1663));
+assertToStringEquals("{", res[655].exec("12+x{661}-34  "), 1664);
+assertNull(res[655].exec("** Failers", 1665));
+assertToStringEquals("d", res[655].exec("abcd  "), 1666);
+assertToStringEquals("d", res[656].exec("abcd"), 1667);
+assertNull(res[656].exec("** Failers", 1668));
+assertNull(res[656].exec("1234", 1669));
+assertNull(res[657].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1670));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[657].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1671);
+assertToStringEquals(" ", res[657].exec(" "), 1672);
+assertNull(res[657].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1673));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[657].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1674);
+assertNull(res[658].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1675));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[658].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1676);
+assertNull(res[659].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1677));
+assertNull(res[659].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 1678));
+assertNull(res[660].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 1679));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[660].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 1680);
+assertNull(res[661].exec("a", 1681));
+assertNull(res[661].exec("A ", 1682));
+assertNull(res[662].exec("a", 1683));
+assertNull(res[662].exec("A ", 1684));
+assertNull(res[663].exec("A", 1685));
+assertNull(res[663].exec("aZ", 1686));
+assertNull(res[663].exec("** Failers", 1687));
+assertNull(res[663].exec("abc   ", 1688));
+assertNull(res[664].exec("A", 1689));
+assertNull(res[664].exec("aZ", 1690));
+assertNull(res[664].exec("** Failers", 1691));
+assertNull(res[664].exec("abc   ", 1692));
+assertNull(res[665].exec("a", 1693));
+assertNull(res[665].exec("Az", 1694));
+assertNull(res[665].exec("** Failers", 1695));
+assertNull(res[665].exec("ABC   ", 1696));
+assertNull(res[666].exec("a", 1697));
+assertNull(res[666].exec("Az", 1698));
+assertNull(res[666].exec("** Failers", 1699));
+assertNull(res[666].exec("ABC   ", 1700));
+assertNull(res[666].exec("x{c0}", 1701));
+assertNull(res[666].exec("x{e0} ", 1702));
+assertNull(res[666].exec("x{c0}", 1703));
+assertNull(res[666].exec("x{e0} ", 1704));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 1705));
+assertNull(res[666].exec("** Failers", 1706));
+assertNull(res[666].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 1707));
+assertNull(res[666].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 1708));
+assertNull(res[666].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 1709));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 1710));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 1711));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 1712));
+assertNull(res[666].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 1713));
+assertNull(res[666].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 1714));
+assertNull(res[666].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 1715));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 1716));
+assertNull(res[666].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 1717));
+assertNull(res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}", 1718));
+assertNull(res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 1719));
+assertNull(res[666].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 1720));
+assertNull(res[666].exec("x{391}", 1721));
+assertNull(res[666].exec("x{ff3a}", 1722));
+assertNull(res[666].exec("x{3b1}", 1723));
+assertNull(res[666].exec("x{ff5a}   ", 1724));
+assertNull(res[666].exec("x{c0}", 1725));
+assertNull(res[666].exec("x{e0} ", 1726));
+assertNull(res[666].exec("x{104}", 1727));
+assertNull(res[666].exec("x{105}", 1728));
+assertNull(res[666].exec("x{109}  ", 1729));
+assertNull(res[666].exec("** Failers", 1730));
+assertNull(res[666].exec("x{100}", 1731));
+assertNull(res[666].exec("x{10a} ", 1732));
+assertNull(res[666].exec("Z", 1733));
+assertNull(res[666].exec("z", 1734));
+assertNull(res[666].exec("x{39c}", 1735));
+assertNull(res[666].exec("x{178}", 1736));
+assertNull(res[666].exec("|", 1737));
+assertNull(res[666].exec("x{80}", 1738));
+assertNull(res[666].exec("x{ff}", 1739));
+assertNull(res[666].exec("x{100}", 1740));
+assertNull(res[666].exec("x{101} ", 1741));
+assertNull(res[666].exec("** Failers", 1742));
+assertNull(res[666].exec("x{102}", 1743));
+assertNull(res[666].exec("Y", 1744));
+assertNull(res[666].exec("y           ", 1745));
+assertNull(res[667].exec("A", 1746));
+assertNull(res[667].exec("Ax{300}BC ", 1747));
+assertNull(res[667].exec("Ax{300}x{301}x{302}BC ", 1748));
+assertNull(res[667].exec("*** Failers", 1749));
+assertNull(res[667].exec("x{300}  ", 1750));
+assertToStringEquals("X", res[668].exec("X123"), 1751);
+assertNull(res[668].exec("*** Failers", 1752));
+assertNull(res[668].exec("AXYZ", 1753));
+assertNull(res[669].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 1754));
+assertNull(res[669].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 1755));
+assertNull(res[670].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 1756));
+assertNull(res[670].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 1757));
+assertToStringEquals("A,,A", res[671].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 1758);
+assertToStringEquals("A,,A", res[671].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 1759);
+assertToStringEquals("A,,A", res[672].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 1760);
+assertToStringEquals("A,,A", res[672].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 1761);
+assertNull(res[673].exec("*** Failers", 1762));
+assertNull(res[673].exec("Ax{300}x{301}x{302}", 1763));
+assertNull(res[674].exec("Ax{300}x{301}Bx{300}X", 1764));
+assertNull(res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 1765));
+assertNull(res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 1766));
+assertNull(res[674].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 1767));
+assertNull(res[675].exec("Ax{300}x{301}Bx{300}X", 1768));
+assertNull(res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 1769));
+assertNull(res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 1770));
+assertNull(res[675].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 1771));
+assertNull(res[675].exec("x{2e81}x{3007}x{2f804}x{31a0}", 1772));
+assertNull(res[675].exec("** Failers", 1773));
+assertNull(res[675].exec("x{2e7f}  ", 1774));
+assertNull(res[675].exec("x{3105}", 1775));
+assertNull(res[675].exec("** Failers", 1776));
+assertNull(res[675].exec("x{30ff}  ", 1777));
+assertNull(res[676].exec("x{06e9}", 1778));
+assertNull(res[676].exec("x{060b}", 1779));
+assertNull(res[676].exec("** Failers", 1780));
+assertNull(res[676].exec("Xx{06e9}   ", 1781));
+assertNull(res[677].exec("x{2f800}", 1782));
+assertNull(res[677].exec("** Failers", 1783));
+assertNull(res[677].exec("x{a014}", 1784));
+assertNull(res[677].exec("x{a4c6}   ", 1785));
+assertNull(res[678].exec("AXYZ", 1786));
+assertNull(res[678].exec("x{1234}XYZ ", 1787));
+assertNull(res[678].exec("** Failers", 1788));
+assertNull(res[678].exec("X  ", 1789));
+assertNull(res[679].exec("** Failers", 1790));
+assertNull(res[679].exec("AX", 1791));
+assertNull(res[680].exec("XYZ", 1792));
+assertNull(res[680].exec("AXYZ", 1793));
+assertNull(res[680].exec("x{1234}XYZ ", 1794));
+assertNull(res[680].exec("** Failers", 1795));
+assertNull(res[680].exec("ABXYZ   ", 1796));
+assertNull(res[681].exec("XYZ", 1797));
+assertNull(res[681].exec("** Failers", 1798));
+assertNull(res[681].exec("AXYZ", 1799));
+assertNull(res[681].exec("x{1234}XYZ ", 1800));
+assertNull(res[681].exec("ABXYZ   ", 1801));
+assertNull(res[681].exec("AXYZ", 1802));
+assertNull(res[681].exec("x{1234}XYZ", 1803));
+assertNull(res[681].exec("Ax{1234}XYZ", 1804));
+assertNull(res[681].exec("** Failers", 1805));
+assertNull(res[681].exec("XYZ", 1806));
+assertNull(res[681].exec("** Failers", 1807));
+assertNull(res[681].exec("AXYZ", 1808));
+assertNull(res[681].exec("x{1234}XYZ", 1809));
+assertNull(res[681].exec("Ax{1234}XYZ", 1810));
+assertNull(res[681].exec("XYZ", 1811));
+assertNull(res[682].exec("XYZ", 1812));
+assertNull(res[682].exec("AXYZ", 1813));
+assertNull(res[682].exec("x{1234}XYZ", 1814));
+assertNull(res[682].exec("Ax{1234}XYZ", 1815));
+assertNull(res[682].exec("** Failers", 1816));
+assertNull(res[683].exec("XYZ", 1817));
+assertNull(res[683].exec("** Failers", 1818));
+assertNull(res[683].exec("AXYZ", 1819));
+assertNull(res[683].exec("x{1234}XYZ", 1820));
+assertNull(res[683].exec("Ax{1234}XYZ", 1821));
+assertToStringEquals("AX", res[684].exec("AXYZ"), 1822);
+assertNull(res[684].exec("x{1234}XYZ ", 1823));
+assertNull(res[684].exec("** Failers", 1824));
+assertNull(res[684].exec("X  ", 1825));
+assertNull(res[685].exec("** Failers", 1826));
+assertToStringEquals("AX", res[685].exec("AX"), 1827);
+assertToStringEquals("X", res[686].exec("XYZ"), 1828);
+assertToStringEquals("AX", res[686].exec("AXYZ"), 1829);
+assertNull(res[686].exec("x{1234}XYZ ", 1830));
+assertNull(res[686].exec("** Failers", 1831));
+assertNull(res[686].exec("ABXYZ   ", 1832));
+assertToStringEquals("X", res[687].exec("XYZ"), 1833);
+assertNull(res[687].exec("** Failers", 1834));
+assertToStringEquals("AX", res[687].exec("AXYZ"), 1835);
+assertNull(res[687].exec("x{1234}XYZ ", 1836));
+assertNull(res[687].exec("ABXYZ   ", 1837));
+assertToStringEquals("AX", res[688].exec("AXYZ"), 1838);
+assertNull(res[688].exec("x{1234}XYZ", 1839));
+assertNull(res[688].exec("Ax{1234}XYZ", 1840));
+assertNull(res[688].exec("** Failers", 1841));
+assertNull(res[688].exec("XYZ", 1842));
+assertNull(res[689].exec("** Failers", 1843));
+assertToStringEquals("AX", res[689].exec("AXYZ"), 1844);
+assertNull(res[689].exec("x{1234}XYZ", 1845));
+assertNull(res[689].exec("Ax{1234}XYZ", 1846));
+assertNull(res[689].exec("XYZ", 1847));
+assertToStringEquals("X", res[690].exec("XYZ"), 1848);
+assertToStringEquals("AX", res[690].exec("AXYZ"), 1849);
+assertNull(res[690].exec("x{1234}XYZ", 1850));
+assertNull(res[690].exec("Ax{1234}XYZ", 1851));
+assertNull(res[690].exec("** Failers", 1852));
+assertToStringEquals("X", res[691].exec("XYZ"), 1853);
+assertNull(res[691].exec("** Failers", 1854));
+assertToStringEquals("AX", res[691].exec("AXYZ"), 1855);
+assertNull(res[691].exec("x{1234}XYZ", 1856));
+assertNull(res[691].exec("Ax{1234}XYZ", 1857));
+assertNull(res[692].exec("abcdefgh", 1858));
+assertNull(res[692].exec("x{1234}\n\x0dx{3456}xyz ", 1859));
+assertNull(res[693].exec("abcdefgh", 1860));
+assertNull(res[693].exec("x{1234}\n\x0dx{3456}xyz ", 1861));
+assertNull(res[694].exec("** Failers", 1862));
+assertNull(res[694].exec("abcdefgh", 1863));
+assertNull(res[694].exec("x{1234}\n\x0dx{3456}xyz ", 1864));
+assertNull(res[695].exec(" AXY", 1865));
+assertNull(res[695].exec(" aXY", 1866));
+assertNull(res[695].exec(" x{1c5}XY", 1867));
+assertNull(res[695].exec(" ** Failers", 1868));
+assertNull(res[695].exec(" x{1bb}XY", 1869));
+assertNull(res[695].exec(" x{2b0}XY", 1870));
+assertNull(res[695].exec(" !XY      ", 1871));
+assertNull(res[696].exec(" AXY", 1872));
+assertNull(res[696].exec(" aXY", 1873));
+assertNull(res[696].exec(" x{1c5}XY", 1874));
+assertNull(res[696].exec(" ** Failers", 1875));
+assertNull(res[696].exec(" x{1bb}XY", 1876));
+assertNull(res[696].exec(" x{2b0}XY", 1877));
+assertNull(res[696].exec(" !XY      ", 1878));
+assertNull(res[696].exec(" AXY", 1879));
+assertNull(res[696].exec(" aXY", 1880));
+assertNull(res[696].exec(" AbcdeXyz ", 1881));
+assertNull(res[696].exec(" x{1c5}AbXY", 1882));
+assertNull(res[696].exec(" abcDEXypqreXlmn ", 1883));
+assertNull(res[696].exec(" ** Failers", 1884));
+assertNull(res[696].exec(" x{1bb}XY", 1885));
+assertNull(res[696].exec(" x{2b0}XY", 1886));
+assertNull(res[696].exec(" !XY      ", 1887));
+assertNull(res[697].exec(" AXY", 1888));
+assertNull(res[697].exec(" aXY", 1889));
+assertNull(res[697].exec(" AbcdeXyz ", 1890));
+assertNull(res[697].exec(" x{1c5}AbXY", 1891));
+assertNull(res[697].exec(" abcDEXypqreXlmn ", 1892));
+assertNull(res[697].exec(" ** Failers", 1893));
+assertNull(res[697].exec(" x{1bb}XY", 1894));
+assertNull(res[697].exec(" x{2b0}XY", 1895));
+assertNull(res[697].exec(" !XY      ", 1896));
+assertNull(res[697].exec(" AXY", 1897));
+assertNull(res[697].exec(" aXY", 1898));
+assertNull(res[697].exec(" AbcdeXyz ", 1899));
+assertNull(res[697].exec(" x{1c5}AbXY", 1900));
+assertNull(res[697].exec(" abcDEXypqreXlmn ", 1901));
+assertNull(res[697].exec(" ** Failers", 1902));
+assertNull(res[697].exec(" x{1bb}XY", 1903));
+assertNull(res[697].exec(" x{2b0}XY", 1904));
+assertNull(res[697].exec(" !XY      ", 1905));
+assertNull(res[698].exec(" AXY", 1906));
+assertNull(res[698].exec(" aXY", 1907));
+assertNull(res[698].exec(" AbcdeXyz ", 1908));
+assertNull(res[698].exec(" x{1c5}AbXY", 1909));
+assertNull(res[698].exec(" abcDEXypqreXlmn ", 1910));
+assertNull(res[698].exec(" ** Failers", 1911));
+assertNull(res[698].exec(" x{1bb}XY", 1912));
+assertNull(res[698].exec(" x{2b0}XY", 1913));
+assertNull(res[698].exec(" !XY      ", 1914));
+assertNull(res[699].exec(" !XY", 1915));
+assertNull(res[699].exec(" x{1bb}XY", 1916));
+assertNull(res[699].exec(" x{2b0}XY", 1917));
+assertNull(res[699].exec(" ** Failers", 1918));
+assertNull(res[699].exec(" x{1c5}XY", 1919));
+assertNull(res[699].exec(" AXY      ", 1920));
+assertNull(res[700].exec(" !XY", 1921));
+assertNull(res[700].exec(" x{1bb}XY", 1922));
+assertNull(res[700].exec(" x{2b0}XY", 1923));
+assertNull(res[700].exec(" ** Failers", 1924));
+assertNull(res[700].exec(" x{1c5}XY", 1925));
+assertNull(res[700].exec(" AXY      ", 1926));
+assertNull(res[701].exec("\xa0!", 1927));
+assertNull(res[701].exec("AabcabcYZ    ", 1928));
+assertToStringEquals("L=abcX,L=abc,abc", res[702].exec("L=abcX"), 1929);
+assertNull(res[702].exec("x{c0}", 1930));
+assertNull(res[702].exec("x{e0} ", 1931));
+assertNull(res[702].exec("x{c0}", 1932));
+assertNull(res[702].exec("x{e0} ", 1933));
+assertNull(res[703].exec("x{1b00}x{12000}x{7c0}x{a840}x{10900}", 1934));
+assertNull(res[706].exec("123abcdefg", 1935));
+assertNull(res[706].exec("123abc\xc4\xc5zz", 1936));
+assertNull(res[710].exec("A\x80", 1937));
+assertNull(res[725].exec("x{60e} ", 1938));
+assertNull(res[725].exec("x{656} ", 1939));
+assertNull(res[725].exec("x{657} ", 1940));
+assertNull(res[725].exec("x{658} ", 1941));
+assertNull(res[725].exec("x{659} ", 1942));
+assertNull(res[725].exec("x{65a} ", 1943));
+assertNull(res[725].exec("x{65b} ", 1944));
+assertNull(res[725].exec("x{65c} ", 1945));
+assertNull(res[725].exec("x{65d} ", 1946));
+assertNull(res[725].exec("x{65e} ", 1947));
+assertNull(res[725].exec("x{66a} ", 1948));
+assertNull(res[725].exec("x{6e9} ", 1949));
+assertNull(res[725].exec("x{6ef}", 1950));
+assertNull(res[725].exec("x{6fa}  ", 1951));
+assertNull(res[725].exec("** Failers", 1952));
+assertNull(res[725].exec("x{600}", 1953));
+assertNull(res[725].exec("x{650}", 1954));
+assertNull(res[725].exec("x{651}  ", 1955));
+assertNull(res[725].exec("x{652}  ", 1956));
+assertNull(res[725].exec("x{653}  ", 1957));
+assertNull(res[725].exec("x{654} ", 1958));
+assertNull(res[725].exec("x{655} ", 1959));
+assertNull(res[725].exec("x{65f}  ", 1960));
+assertNull(res[726].exec("x{1d2b} ", 1961));
+assertNull(res[727].exec("x{589}", 1962));
+assertNull(res[727].exec("x{60c}", 1963));
+assertNull(res[727].exec("x{61f}  ", 1964));
+assertNull(res[727].exec("x{964}", 1965));
+assertNull(res[727].exec("x{965}  ", 1966));
+assertNull(res[727].exec("x{970}  ", 1967));
+assertNull(res[728].exec("x{64b}", 1968));
+assertNull(res[728].exec("x{654}", 1969));
+assertNull(res[728].exec("x{655}", 1970));
+assertNull(res[728].exec("x{200c} ", 1971));
+assertNull(res[728].exec("** Failers", 1972));
+assertNull(res[728].exec("x{64a}", 1973));
+assertNull(res[728].exec("x{656}     ", 1974));
+assertNull(res[729].exec("x{10450}", 1975));
+assertNull(res[729].exec("x{1047f}", 1976));
+assertNull(res[730].exec("x{10400}", 1977));
+assertNull(res[730].exec("x{1044f}", 1978));
+assertNull(res[731].exec("x{10480}", 1979));
+assertNull(res[731].exec("x{1049d}", 1980));
+assertNull(res[731].exec("x{104a0}", 1981));
+assertNull(res[731].exec("x{104a9}", 1982));
+assertNull(res[731].exec("** Failers", 1983));
+assertNull(res[731].exec("x{1049e}", 1984));
+assertNull(res[731].exec("x{1049f}", 1985));
+assertNull(res[731].exec("x{104aa}           ", 1986));
+assertNull(res[731].exec("\xe2\x80\xa8\xe2\x80\xa8", 1987));
+assertNull(res[731].exec("x{2028}x{2028}x{2028}", 1988));
+assertNull(res[732].exec("x{c0}x{e0}x{116}x{117}", 1989));
+assertNull(res[732].exec("x{c0}x{e0}x{116}x{117}", 1990));
+assertNull(res[733].exec("x{102A4}x{AA52}x{A91D}x{1C46}x{10283}x{1092E}x{1C6B}x{A93B}x{A8BF}x{1BA0}x{A50A}====", 1991));
+assertNull(res[733].exec("x{a77d}x{1d79}", 1992));
+assertNull(res[733].exec("x{1d79}x{a77d} ", 1993));
+assertNull(res[733].exec("x{a77d}x{1d79}", 1994));
+assertNull(res[733].exec("** Failers ", 1995));
+assertNull(res[733].exec("x{1d79}x{a77d} ", 1996));
+assertToStringEquals("AA,A", res[734].exec("AA"), 1997);
+assertToStringEquals("Aa,A", res[734].exec("Aa"), 1998);
+assertToStringEquals("aa,a", res[734].exec("aa"), 1999);
+assertToStringEquals("aA,a", res[734].exec("aA"), 2000);
+assertNull(res[734].exec("x{de}x{de}", 2001));
+assertNull(res[734].exec("x{de}x{fe}", 2002));
+assertNull(res[734].exec("x{fe}x{fe}", 2003));
+assertNull(res[734].exec("x{fe}x{de}", 2004));
+assertNull(res[734].exec("x{10a}x{10a}", 2005));
+assertNull(res[734].exec("x{10a}x{10b}", 2006));
+assertNull(res[734].exec("x{10b}x{10b}", 2007));
+assertNull(res[734].exec("x{10b}x{10a}", 2008));
+assertToStringEquals("abc", res[736].exec("abc"), 2009);
+assertToStringEquals("abc", res[737].exec("abc"), 2010);
+assertToStringEquals("abbbbc", res[737].exec("abbbbc"), 2011);
+assertToStringEquals("ac", res[737].exec("ac"), 2012);
+assertToStringEquals("abc", res[738].exec("abc"), 2013);
+assertToStringEquals("abbbbbbc", res[738].exec("abbbbbbc"), 2014);
+assertNull(res[738].exec("*** Failers ", 2015));
+assertNull(res[738].exec("ac", 2016));
+assertNull(res[738].exec("ab", 2017));
+assertToStringEquals("a", res[739].exec("a"), 2018);
+assertToStringEquals("aaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaa"), 2019);
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa "), 2020);
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[739].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaF "), 2021);
+assertToStringEquals("a,a", res[740].exec("a"), 2022);
+assertToStringEquals("a,a", res[740].exec("abcd"), 2023);
+assertToStringEquals("a,a", res[740].exec("african"), 2024);
+assertToStringEquals("abc", res[741].exec("abcdef"), 2025);
+assertNull(res[741].exec("*** Failers", 2026));
+assertNull(res[741].exec("xyzabc", 2027));
+assertNull(res[741].exec("xyz\nabc    ", 2028));
+assertToStringEquals("abc", res[742].exec("abcdef"), 2029);
+assertToStringEquals("abc", res[742].exec("xyz\nabc    "), 2030);
+assertNull(res[742].exec("*** Failers", 2031));
+assertNull(res[742].exec("xyzabc", 2032));
+assertNull(res[743].exec("abcdef", 2033));
+assertNull(res[743].exec("*** Failers", 2034));
+assertNull(res[743].exec("xyzabc", 2035));
+assertNull(res[743].exec("xyz\nabc    ", 2036));
+assertNull(res[744].exec("abcdef", 2037));
+assertNull(res[744].exec("*** Failers", 2038));
+assertNull(res[744].exec("xyzabc", 2039));
+assertNull(res[744].exec("xyz\nabc    ", 2040));
+assertNull(res[745].exec("abcdef", 2041));
+assertNull(res[745].exec("xyzabc>3", 2042));
+assertNull(res[745].exec("*** Failers", 2043));
+assertNull(res[745].exec("xyzabc    ", 2044));
+assertNull(res[745].exec("xyzabc>2 ", 2045));
+assertToStringEquals("x9yzz", res[746].exec("x9yzz"), 2046);
+assertToStringEquals("x0y+z", res[746].exec("x0y+z"), 2047);
+assertNull(res[746].exec("*** Failers", 2048));
+assertNull(res[746].exec("xyz", 2049));
+assertNull(res[746].exec("xxy0z     ", 2050));
+assertToStringEquals("x yzz", res[747].exec("x yzz"), 2051);
+assertToStringEquals("x y+z", res[747].exec("x y+z"), 2052);
+assertNull(res[747].exec("*** Failers", 2053));
+assertNull(res[747].exec("xyz", 2054));
+assertNull(res[747].exec("xxyyz", 2055));
+assertToStringEquals("xxy+z", res[748].exec("xxy+z"), 2056);
+assertNull(res[748].exec("*** Failers", 2057));
+assertNull(res[748].exec("xxy0z", 2058));
+assertNull(res[748].exec("x+y+z         ", 2059));
+assertToStringEquals("x+y", res[749].exec("x+y"), 2060);
+assertToStringEquals("x-y", res[749].exec("x-y"), 2061);
+assertNull(res[749].exec("*** Failers", 2062));
+assertNull(res[749].exec("x\ny", 2063));
+assertToStringEquals("x+y", res[750].exec("x+y"), 2064);
+assertToStringEquals("x-y", res[750].exec("x-y"), 2065);
+assertNull(res[750].exec("x\ny", 2066));
+assertNull(res[750].exec("a+bc+dp+q", 2067));
+assertNull(res[750].exec("a+bc\ndp+q", 2068));
+assertNull(res[750].exec("x\nyp+q ", 2069));
+assertNull(res[750].exec("*** Failers ", 2070));
+assertNull(res[750].exec("a\nbc\ndp+q", 2071));
+assertNull(res[750].exec("a+bc\ndp\nq", 2072));
+assertNull(res[750].exec("x\nyp\nq ", 2073));
+assertNull(res[751].exec("ba0", 2074));
+assertNull(res[751].exec("*** Failers", 2075));
+assertNull(res[751].exec("ba0\n", 2076));
+assertNull(res[751].exec("ba0\ncd   ", 2077));
+assertNull(res[752].exec("ba0", 2078));
+assertNull(res[752].exec("*** Failers", 2079));
+assertNull(res[752].exec("ba0\n", 2080));
+assertNull(res[752].exec("ba0\ncd   ", 2081));
+assertNull(res[753].exec("ba0", 2082));
+assertNull(res[753].exec("ba0\n", 2083));
+assertNull(res[753].exec("*** Failers", 2084));
+assertNull(res[753].exec("ba0\ncd   ", 2085));
+assertNull(res[754].exec("ba0", 2086));
+assertNull(res[754].exec("ba0\n", 2087));
+assertNull(res[754].exec("*** Failers", 2088));
+assertNull(res[754].exec("ba0\ncd   ", 2089));
+assertToStringEquals("a0", res[755].exec("ba0"), 2090);
+assertNull(res[755].exec("ba0\n", 2091));
+assertNull(res[755].exec("*** Failers", 2092));
+assertNull(res[755].exec("ba0\ncd   ", 2093));
+assertToStringEquals("a0", res[756].exec("ba0"), 2094);
+assertToStringEquals("a0", res[756].exec("ba0\n"), 2095);
+assertToStringEquals("a0", res[756].exec("ba0\ncd   "), 2096);
+assertNull(res[756].exec("*** Failers", 2097));
+assertToStringEquals("abc", res[757].exec("abc"), 2098);
+assertToStringEquals("aBc", res[757].exec("aBc"), 2099);
+assertToStringEquals("ABC", res[757].exec("ABC"), 2100);
+assertToStringEquals("b", res[758].exec("abcd"), 2101);
+assertToStringEquals("abz", res[759].exec("abz"), 2102);
+assertToStringEquals("abb", res[759].exec("abbz"), 2103);
+assertToStringEquals("az", res[759].exec("azz  "), 2104);
+assertToStringEquals("yz", res[760].exec("ayzq"), 2105);
+assertToStringEquals("xyz", res[760].exec("axyzq"), 2106);
+assertToStringEquals("xxyz", res[760].exec("axxyz"), 2107);
+assertToStringEquals("xxxyz", res[760].exec("axxxyzq"), 2108);
+assertToStringEquals("xxxyz", res[760].exec("axxxxyzq"), 2109);
+assertNull(res[760].exec("*** Failers", 2110));
+assertNull(res[760].exec("ax", 2111));
+assertNull(res[760].exec("axx     ", 2112));
+assertNull(res[760].exec("  ", 2113));
+assertToStringEquals("xxxyz", res[761].exec("axxxyzq"), 2114);
+assertToStringEquals("xxxyz", res[761].exec("axxxxyzq"), 2115);
+assertNull(res[761].exec("*** Failers", 2116));
+assertNull(res[761].exec("ax", 2117));
+assertNull(res[761].exec("axx     ", 2118));
+assertNull(res[761].exec("ayzq", 2119));
+assertNull(res[761].exec("axyzq", 2120));
+assertNull(res[761].exec("axxyz", 2121));
+assertNull(res[761].exec("  ", 2122));
+assertToStringEquals("xxyz", res[762].exec("axxyz"), 2123);
+assertToStringEquals("xxxyz", res[762].exec("axxxyzq"), 2124);
+assertToStringEquals("xxxyz", res[762].exec("axxxxyzq"), 2125);
+assertNull(res[762].exec("*** Failers", 2126));
+assertNull(res[762].exec("ax", 2127));
+assertNull(res[762].exec("axx     ", 2128));
+assertNull(res[762].exec("ayzq", 2129));
+assertNull(res[762].exec("axyzq", 2130));
+assertNull(res[762].exec("  ", 2131));
+assertToStringEquals("b", res[763].exec("bac"), 2132);
+assertToStringEquals("bcdef", res[763].exec("bcdefax"), 2133);
+assertToStringEquals("*** F", res[763].exec("*** Failers"), 2134);
+assertToStringEquals("   ", res[763].exec("aaaaa   "), 2135);
+assertToStringEquals("b", res[764].exec("bac"), 2136);
+assertToStringEquals("bcdef", res[764].exec("bcdefax"), 2137);
+assertToStringEquals("*** F", res[764].exec("*** Failers"), 2138);
+assertToStringEquals("", res[764].exec("aaaaa   "), 2139);
+assertToStringEquals("xyz", res[765].exec("xyz"), 2140);
+assertToStringEquals("wxyz", res[765].exec("awxyza"), 2141);
+assertToStringEquals("bcdef", res[765].exec("abcdefa"), 2142);
+assertToStringEquals("bcdef", res[765].exec("abcdefghijk"), 2143);
+assertToStringEquals("*** F", res[765].exec("*** Failers"), 2144);
+assertNull(res[765].exec("axya", 2145));
+assertNull(res[765].exec("axa", 2146));
+assertToStringEquals("     ", res[765].exec("aaaaa         "), 2147);
+assertToStringEquals("1234", res[766].exec("1234b567"), 2148);
+assertToStringEquals("", res[766].exec("xyz"), 2149);
+assertToStringEquals("a", res[767].exec("a1234b567"), 2150);
+assertToStringEquals("xyz", res[767].exec("xyz"), 2151);
+assertToStringEquals(" ", res[767].exec(" "), 2152);
+assertToStringEquals("1234", res[768].exec("ab1234c56"), 2153);
+assertNull(res[768].exec("*** Failers", 2154));
+assertNull(res[768].exec("xyz", 2155));
+assertToStringEquals("ab", res[769].exec("ab123c56"), 2156);
+assertToStringEquals("*** Failers", res[769].exec("*** Failers"), 2157);
+assertNull(res[769].exec("789", 2158));
+assertToStringEquals("5A", res[770].exec("045ABC"), 2159);
+assertToStringEquals("A", res[770].exec("ABC"), 2160);
+assertNull(res[770].exec("*** Failers", 2161));
+assertNull(res[770].exec("XYZ", 2162));
+assertToStringEquals("A", res[771].exec("ABC"), 2163);
+assertToStringEquals("BA", res[771].exec("BAC"), 2164);
+assertToStringEquals("A", res[771].exec("9ABC             "), 2165);
+assertNull(res[771].exec("*** Failers", 2166));
+assertToStringEquals("aaaa", res[772].exec("aaaa"), 2167);
+assertToStringEquals("xyz", res[773].exec("xyz"), 2168);
+assertToStringEquals("ggggggggxyz", res[773].exec("ggggggggxyz"), 2169);
+assertToStringEquals("abcdxyz", res[774].exec("abcdxyz"), 2170);
+assertToStringEquals("axyz", res[774].exec("axyz"), 2171);
+assertNull(res[774].exec("*** Failers", 2172));
+assertNull(res[774].exec("xyz", 2173));
+assertToStringEquals("xyz", res[775].exec("xyz"), 2174);
+assertToStringEquals("cxyz", res[775].exec("cxyz       "), 2175);
+assertToStringEquals("12X", res[776].exec("12X"), 2176);
+assertToStringEquals("123X", res[776].exec("123X"), 2177);
+assertNull(res[776].exec("*** Failers", 2178));
+assertNull(res[776].exec("X", 2179));
+assertNull(res[776].exec("1X", 2180));
+assertNull(res[776].exec("1234X     ", 2181));
+assertToStringEquals("a4", res[777].exec("a45"), 2182);
+assertToStringEquals("b9", res[777].exec("b93"), 2183);
+assertToStringEquals("c9", res[777].exec("c99z"), 2184);
+assertToStringEquals("d0", res[777].exec("d04"), 2185);
+assertNull(res[777].exec("*** Failers", 2186));
+assertNull(res[777].exec("e45", 2187));
+assertNull(res[777].exec("abcd      ", 2188));
+assertNull(res[777].exec("abcd1234", 2189));
+assertNull(res[777].exec("1234  ", 2190));
+assertToStringEquals("a4", res[778].exec("a45"), 2191);
+assertToStringEquals("b9", res[778].exec("b93"), 2192);
+assertToStringEquals("c9", res[778].exec("c99z"), 2193);
+assertToStringEquals("d0", res[778].exec("d04"), 2194);
+assertToStringEquals("abcd1", res[778].exec("abcd1234"), 2195);
+assertToStringEquals("1", res[778].exec("1234  "), 2196);
+assertNull(res[778].exec("*** Failers", 2197));
+assertNull(res[778].exec("e45", 2198));
+assertNull(res[778].exec("abcd      ", 2199));
+assertToStringEquals("a4", res[779].exec("a45"), 2200);
+assertToStringEquals("b9", res[779].exec("b93"), 2201);
+assertToStringEquals("c9", res[779].exec("c99z"), 2202);
+assertToStringEquals("d0", res[779].exec("d04"), 2203);
+assertToStringEquals("abcd1", res[779].exec("abcd1234"), 2204);
+assertNull(res[779].exec("*** Failers", 2205));
+assertNull(res[779].exec("1234  ", 2206));
+assertNull(res[779].exec("e45", 2207));
+assertNull(res[779].exec("abcd      ", 2208));
+assertToStringEquals("aX", res[780].exec("aX"), 2209);
+assertToStringEquals("aaX", res[780].exec("aaX "), 2210);
+assertToStringEquals("a4", res[781].exec("a45"), 2211);
+assertToStringEquals("b9", res[781].exec("b93"), 2212);
+assertToStringEquals("c9", res[781].exec("c99z"), 2213);
+assertToStringEquals("d0", res[781].exec("d04"), 2214);
+assertToStringEquals("1", res[781].exec("1234  "), 2215);
+assertNull(res[781].exec("*** Failers", 2216));
+assertNull(res[781].exec("abcd1234", 2217));
+assertNull(res[781].exec("e45", 2218));
+assertToStringEquals("ab4", res[782].exec("ab45"), 2219);
+assertToStringEquals("bcd9", res[782].exec("bcd93"), 2220);
+assertNull(res[782].exec("*** Failers", 2221));
+assertNull(res[782].exec("1234 ", 2222));
+assertNull(res[782].exec("a36 ", 2223));
+assertNull(res[782].exec("abcd1234", 2224));
+assertNull(res[782].exec("ee45", 2225));
+assertToStringEquals("abc4,abc", res[783].exec("abc45"), 2226);
+assertToStringEquals("abcabcabc4,abc", res[783].exec("abcabcabc45"), 2227);
+assertToStringEquals("4,", res[783].exec("42xyz "), 2228);
+assertNull(res[783].exec("*** Failers", 2229));
+assertToStringEquals("abc4,abc", res[784].exec("abc45"), 2230);
+assertToStringEquals("abcabcabc4,abc", res[784].exec("abcabcabc45"), 2231);
+assertNull(res[784].exec("*** Failers", 2232));
+assertNull(res[784].exec("42xyz ", 2233));
+assertToStringEquals("abc4,abc", res[785].exec("abc45"), 2234);
+assertToStringEquals("4,", res[785].exec("42xyz "), 2235);
+assertNull(res[785].exec("*** Failers", 2236));
+assertNull(res[785].exec("abcabcabc45", 2237));
+assertToStringEquals("abcabc4,abc", res[786].exec("abcabc45"), 2238);
+assertToStringEquals("abcabcabc4,abc", res[786].exec("abcabcabc45"), 2239);
+assertNull(res[786].exec("*** Failers", 2240));
+assertNull(res[786].exec("abcabcabcabc45", 2241));
+assertNull(res[786].exec("abc45", 2242));
+assertNull(res[786].exec("42xyz ", 2243));
+assertNull(res[786].exec("1abc2abc3456", 2244));
+assertNull(res[786].exec("1abc2xyz3456 ", 2245));
+assertToStringEquals("ab=ab,ab,ab", res[787].exec("ab=ab"), 2246);
+assertToStringEquals("ab=ab,ab,ab", res[787].exec("ab=ab"), 2247);
+assertNull(res[787].exec("abc", 2248));
+assertNull(res[787].exec("a(b)c", 2249));
+assertNull(res[787].exec("a(b(c))d  ", 2250));
+assertNull(res[787].exec("*** Failers)", 2251));
+assertNull(res[787].exec("a(b(c)d  ", 2252));
+assertNull(res[787].exec(">abc>123<xyz<", 2253));
+assertNull(res[787].exec(">abc>1(2)3<xyz<", 2254));
+assertNull(res[787].exec(">abc>(1(2)3)<xyz<", 2255));
+assertNull(res[787].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9876", 2256));
+assertNull(res[787].exec("*** Failers ", 2257));
+assertNull(res[787].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 2258));
+assertNull(res[787].exec("<>", 2259));
+assertNull(res[787].exec("<abcd>", 2260));
+assertNull(res[787].exec("<abc <123> hij>", 2261));
+assertNull(res[787].exec("<abc <def> hij>", 2262));
+assertNull(res[787].exec("<abc<>def> ", 2263));
+assertNull(res[787].exec("<abc<>      ", 2264));
+assertNull(res[787].exec("*** Failers", 2265));
+assertNull(res[787].exec("<abc", 2266));
+assertNull(res[787].exec("abc:                          ", 2267));
+assertNull(res[787].exec("12                             ", 2268));
+assertNull(res[787].exec("*** Failers                     ", 2269));
+assertNull(res[787].exec("123                       ", 2270));
+assertNull(res[787].exec("xyz                        ", 2271));
+assertNull(res[787].exec("                            ", 2272));
+assertNull(res[787].exec("abc:                        ", 2273));
+assertNull(res[787].exec("12         ", 2274));
+assertNull(res[787].exec("*** Failers", 2275));
+assertNull(res[787].exec("123", 2276));
+assertNull(res[787].exec("xyz    ", 2277));
+assertNull(res[788].exec("abcde:                          ", 2278));
+assertNull(res[788].exec("*** Failers                     ", 2279));
+assertNull(res[788].exec("abc.. ", 2280));
+assertNull(res[788].exec("123                       ", 2281));
+assertNull(res[788].exec("vwxyz                        ", 2282));
+assertNull(res[788].exec("                            ", 2283));
+assertNull(res[789].exec("12         ", 2284));
+assertNull(res[789].exec("*** Failers", 2285));
+assertNull(res[789].exec("abcde:", 2286));
+assertNull(res[789].exec("abc..  ", 2287));
+assertNull(res[789].exec("123", 2288));
+assertNull(res[789].exec("vwxyz    ", 2289));
+assertNull(res[789].exec("abc12345", 2290));
+assertNull(res[789].exec("wxy123z", 2291));
+assertNull(res[789].exec("*** Failers", 2292));
+assertNull(res[789].exec("123abc", 2293));
+assertNull(res[789].exec("123abc", 2294));
+assertNull(res[789].exec("mno123456 ", 2295));
+assertNull(res[789].exec("*** Failers", 2296));
+assertNull(res[789].exec("abc12345", 2297));
+assertNull(res[789].exec("wxy123z", 2298));
+assertNull(res[789].exec("abcxyz", 2299));
+assertNull(res[789].exec("123abcxyz999 ", 2300));
+assertToStringEquals("abc", res[791].exec("abcdef"), 2301);
+assertNull(res[791].exec("*** Failers", 2302));
+assertToStringEquals("abc", res[791].exec("abcdefB  "), 2303);
+assertToStringEquals(",", res[792].exec("bcd"), 2304);
+assertToStringEquals("aaa,aaa", res[792].exec("aaabcd"), 2305);
+assertToStringEquals(",", res[792].exec("xyz"), 2306);
+assertToStringEquals(",", res[792].exec("xyzN  "), 2307);
+assertToStringEquals(",", res[792].exec("*** Failers"), 2308);
+assertToStringEquals(",", res[792].exec("bcdN   "), 2309);
+assertToStringEquals("xyz", res[793].exec("xyz"), 2310);
+assertNull(res[793].exec("xyz\n", 2311));
+assertNull(res[793].exec("*** Failers", 2312));
+assertNull(res[793].exec("xyzZ", 2313));
+assertNull(res[793].exec("xyz\nZ    ", 2314));
+assertToStringEquals("xyz", res[794].exec("xyz"), 2315);
+assertToStringEquals("xyz", res[794].exec("xyz\n "), 2316);
+assertToStringEquals("xyz", res[794].exec("abcxyz\npqr "), 2317);
+assertToStringEquals("xyz", res[794].exec("abcxyz\npqrZ "), 2318);
+assertToStringEquals("xyz", res[794].exec("xyz\nZ    "), 2319);
+assertNull(res[794].exec("*** Failers", 2320));
+assertNull(res[794].exec("xyzZ", 2321));
+assertNull(res[795].exec("abcdef", 2322));
+assertNull(res[795].exec("defabcxyz>3 ", 2323));
+assertNull(res[795].exec("*** Failers ", 2324));
+assertNull(res[795].exec("defabcxyz", 2325));
+assertNull(res[796].exec("abP", 2326));
+assertNull(res[796].exec("abcdeP", 2327));
+assertToStringEquals("abcdef", res[796].exec("abcdefP"), 2328);
+assertNull(res[796].exec("*** Failers", 2329));
+assertNull(res[796].exec("abxP    ", 2330));
+assertNull(res[797].exec("aP", 2331));
+assertNull(res[797].exec("aaP", 2332));
+assertNull(res[797].exec("aa2P ", 2333));
+assertNull(res[797].exec("aaaP", 2334));
+assertNull(res[797].exec("aaa23P ", 2335));
+assertNull(res[797].exec("aaaa12345P", 2336));
+assertToStringEquals("aa0z", res[797].exec("aa0zP"), 2337);
+assertToStringEquals("aaaa4444444444444z", res[797].exec("aaaa4444444444444zP "), 2338);
+assertNull(res[797].exec("*** Failers", 2339));
+assertNull(res[797].exec("azP ", 2340));
+assertNull(res[797].exec("aaaaaP ", 2341));
+assertNull(res[797].exec("a56P ", 2342));
+assertNull(res[799].exec("adfadadaklhlkalkajhlkjahdfasdfasdfladsfjkjPZ", 2343));
+assertNull(res[799].exec("lkjhlkjhlkjhlkjhabbbbbbcdaefabbbbbbbefaPBZ", 2344));
+assertNull(res[799].exec("cdabbbbbbbbPRBZ", 2345));
+assertNull(res[799].exec("efabbbbbbbbbbbbbbbbPRBZ", 2346));
+assertNull(res[799].exec("bbbbbbbbbbbbcdXyasdfadfPRBZ    ", 2347));
+assertNull(res[799].exec("abc", 2348));
+assertNull(res[799].exec("** Failers", 2349));
+assertNull(res[799].exec("def  ", 2350));
+assertToStringEquals("the quick brown fox", res[800].exec("the quick brown fox"), 2351);
+assertNull(res[800].exec("The quick brown FOX", 2352));
+assertToStringEquals("the quick brown fox", res[800].exec("What do you know about the quick brown fox?"), 2353);
+assertNull(res[800].exec("What do you know about THE QUICK BROWN FOX?", 2354));
+assertToStringEquals("the quick brown fox", res[801].exec("the quick brown fox"), 2355);
+assertToStringEquals("The quick brown FOX", res[801].exec("The quick brown FOX"), 2356);
+assertToStringEquals("the quick brown fox", res[801].exec("What do you know about the quick brown fox?"), 2357);
+assertToStringEquals("THE QUICK BROWN FOX", res[801].exec("What do you know about THE QUICK BROWN FOX?"), 2358);
+assertToStringEquals("abcd\x09\n\x0d\x0cae9;$\\?caxyz", res[802].exec("abcd\x09\n\x0d\x0cae9;$\\?caxyz"), 2359);
+assertToStringEquals("abxyzpqrrrabbxyyyypqAzz", res[803].exec("abxyzpqrrrabbxyyyypqAzz"), 2360);
+assertToStringEquals("abxyzpqrrrabbxyyyypqAzz", res[803].exec("abxyzpqrrrabbxyyyypqAzz"), 2361);
+assertToStringEquals("aabxyzpqrrrabbxyyyypqAzz", res[803].exec("aabxyzpqrrrabbxyyyypqAzz"), 2362);
+assertToStringEquals("aaabxyzpqrrrabbxyyyypqAzz", res[803].exec("aaabxyzpqrrrabbxyyyypqAzz"), 2363);
+assertToStringEquals("aaaabxyzpqrrrabbxyyyypqAzz", res[803].exec("aaaabxyzpqrrrabbxyyyypqAzz"), 2364);
+assertToStringEquals("abcxyzpqrrrabbxyyyypqAzz", res[803].exec("abcxyzpqrrrabbxyyyypqAzz"), 2365);
+assertToStringEquals("aabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aabcxyzpqrrrabbxyyyypqAzz"), 2366);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypAzz"), 2367);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqAzz"), 2368);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqAzz"), 2369);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqAzz"), 2370);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqAzz"), 2371);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqAzz"), 2372);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypqqqqqqAzz", res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqqAzz"), 2373);
+assertToStringEquals("aaaabcxyzpqrrrabbxyyyypqAzz", res[803].exec("aaaabcxyzpqrrrabbxyyyypqAzz"), 2374);
+assertToStringEquals("abxyzzpqrrrabbxyyyypqAzz", res[803].exec("abxyzzpqrrrabbxyyyypqAzz"), 2375);
+assertToStringEquals("aabxyzzzpqrrrabbxyyyypqAzz", res[803].exec("aabxyzzzpqrrrabbxyyyypqAzz"), 2376);
+assertToStringEquals("aaabxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaabxyzzzzpqrrrabbxyyyypqAzz"), 2377);
+assertToStringEquals("aaaabxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaaabxyzzzzpqrrrabbxyyyypqAzz"), 2378);
+assertToStringEquals("abcxyzzpqrrrabbxyyyypqAzz", res[803].exec("abcxyzzpqrrrabbxyyyypqAzz"), 2379);
+assertToStringEquals("aabcxyzzzpqrrrabbxyyyypqAzz", res[803].exec("aabcxyzzzpqrrrabbxyyyypqAzz"), 2380);
+assertToStringEquals("aaabcxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaabcxyzzzzpqrrrabbxyyyypqAzz"), 2381);
+assertToStringEquals("aaaabcxyzzzzpqrrrabbxyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbxyyyypqAzz"), 2382);
+assertToStringEquals("aaaabcxyzzzzpqrrrabbbxyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyypqAzz"), 2383);
+assertToStringEquals("aaaabcxyzzzzpqrrrabbbxyyyyypqAzz", res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyyypqAzz"), 2384);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypABzz", res[803].exec("aaabcxyzpqrrrabbxyyyypABzz"), 2385);
+assertToStringEquals("aaabcxyzpqrrrabbxyyyypABBzz", res[803].exec("aaabcxyzpqrrrabbxyyyypABBzz"), 2386);
+assertToStringEquals("aaabxyzpqrrrabbxyyyypqAzz", res[803].exec(">>>aaabxyzpqrrrabbxyyyypqAzz"), 2387);
+assertToStringEquals("aaaabxyzpqrrrabbxyyyypqAzz", res[803].exec(">aaaabxyzpqrrrabbxyyyypqAzz"), 2388);
+assertToStringEquals("abcxyzpqrrrabbxyyyypqAzz", res[803].exec(">>>>abcxyzpqrrrabbxyyyypqAzz"), 2389);
+assertNull(res[803].exec("*** Failers", 2390));
+assertNull(res[803].exec("abxyzpqrrabbxyyyypqAzz", 2391));
+assertNull(res[803].exec("abxyzpqrrrrabbxyyyypqAzz", 2392));
+assertNull(res[803].exec("abxyzpqrrrabxyyyypqAzz", 2393));
+assertNull(res[803].exec("aaaabcxyzzzzpqrrrabbbxyyyyyypqAzz", 2394));
+assertNull(res[803].exec("aaaabcxyzzzzpqrrrabbbxyyypqAzz", 2395));
+assertNull(res[803].exec("aaabcxyzpqrrrabbxyyyypqqqqqqqAzz", 2396));
+assertToStringEquals("abczz,abc", res[804].exec("abczz"), 2397);
+assertToStringEquals("abcabczz,abc", res[804].exec("abcabczz"), 2398);
+assertNull(res[804].exec("*** Failers", 2399));
+assertNull(res[804].exec("zz", 2400));
+assertNull(res[804].exec("abcabcabczz", 2401));
+assertNull(res[804].exec(">>abczz", 2402));
+assertToStringEquals("bc,b", res[805].exec("bc"), 2403);
+assertToStringEquals("bbc,b", res[805].exec("bbc"), 2404);
+assertToStringEquals("bbbc,bb", res[805].exec("bbbc"), 2405);
+assertToStringEquals("bac,a", res[805].exec("bac"), 2406);
+assertToStringEquals("bbac,a", res[805].exec("bbac"), 2407);
+assertToStringEquals("aac,a", res[805].exec("aac"), 2408);
+assertToStringEquals("abbbbbbbbbbbc,bbbbbbbbbbb", res[805].exec("abbbbbbbbbbbc"), 2409);
+assertToStringEquals("bbbbbbbbbbbac,a", res[805].exec("bbbbbbbbbbbac"), 2410);
+assertNull(res[805].exec("*** Failers", 2411));
+assertNull(res[805].exec("aaac", 2412));
+assertNull(res[805].exec("abbbbbbbbbbbac", 2413));
+assertToStringEquals("bc,b", res[806].exec("bc"), 2414);
+assertToStringEquals("bbc,bb", res[806].exec("bbc"), 2415);
+assertToStringEquals("bbbc,bbb", res[806].exec("bbbc"), 2416);
+assertToStringEquals("bac,a", res[806].exec("bac"), 2417);
+assertToStringEquals("bbac,a", res[806].exec("bbac"), 2418);
+assertToStringEquals("aac,a", res[806].exec("aac"), 2419);
+assertToStringEquals("abbbbbbbbbbbc,bbbbbbbbbbb", res[806].exec("abbbbbbbbbbbc"), 2420);
+assertToStringEquals("bbbbbbbbbbbac,a", res[806].exec("bbbbbbbbbbbac"), 2421);
+assertNull(res[806].exec("*** Failers", 2422));
+assertNull(res[806].exec("aaac", 2423));
+assertNull(res[806].exec("abbbbbbbbbbbac", 2424));
+assertToStringEquals("bbc,bb", res[806].exec("bbc"), 2425);
+assertToStringEquals("babc,ba", res[807].exec("babc"), 2426);
+assertToStringEquals("bbabc,ba", res[807].exec("bbabc"), 2427);
+assertToStringEquals("bababc,ba", res[807].exec("bababc"), 2428);
+assertNull(res[807].exec("*** Failers", 2429));
+assertNull(res[807].exec("bababbc", 2430));
+assertNull(res[807].exec("babababc", 2431));
+assertToStringEquals("babc,ba", res[808].exec("babc"), 2432);
+assertToStringEquals("bbabc,ba", res[808].exec("bbabc"), 2433);
+assertToStringEquals("bababc,ba", res[808].exec("bababc"), 2434);
+assertNull(res[808].exec("*** Failers", 2435));
+assertNull(res[808].exec("bababbc", 2436));
+assertNull(res[808].exec("babababc", 2437));
 assertThrows("var re = /^\\ca\\cA\\c[\\c{\\c:/;", 2438);
-assertEquals(null, res[808].exec("\x01\x01e;z", 2439));
-assertEquals("a", res[809].exec("athing"), 2440);
-assertEquals("b", res[809].exec("bthing"), 2441);
-assertEquals("]", res[809].exec("]thing"), 2442);
-assertEquals("c", res[809].exec("cthing"), 2443);
-assertEquals("d", res[809].exec("dthing"), 2444);
-assertEquals("e", res[809].exec("ething"), 2445);
-assertEquals(null, res[809].exec("*** Failers", 2446));
-assertEquals(null, res[809].exec("fthing", 2447));
-assertEquals(null, res[809].exec("[thing", 2448));
-assertEquals(null, res[809].exec("\\thing", 2449));
-assertEquals(null, res[810].exec("]thing", 2450));
-assertEquals(null, res[810].exec("cthing", 2451));
-assertEquals(null, res[810].exec("dthing", 2452));
-assertEquals(null, res[810].exec("ething", 2453));
-assertEquals(null, res[810].exec("*** Failers", 2454));
-assertEquals(null, res[810].exec("athing", 2455));
-assertEquals(null, res[810].exec("fthing", 2456));
-assertEquals("f", res[811].exec("fthing"), 2457);
-assertEquals("[", res[811].exec("[thing"), 2458);
-assertEquals("\\", res[811].exec("\\thing"), 2459);
-assertEquals("*", res[811].exec("*** Failers"), 2460);
-assertEquals(null, res[811].exec("athing", 2461));
-assertEquals(null, res[811].exec("bthing", 2462));
-assertEquals(null, res[811].exec("]thing", 2463));
-assertEquals(null, res[811].exec("cthing", 2464));
-assertEquals(null, res[811].exec("dthing", 2465));
-assertEquals(null, res[811].exec("ething", 2466));
-assertEquals(null, res[812].exec("athing", 2467));
-assertEquals(null, res[812].exec("fthing", 2468));
-assertEquals(null, res[812].exec("*** Failers", 2469));
-assertEquals(null, res[812].exec("]thing", 2470));
-assertEquals(null, res[812].exec("cthing", 2471));
-assertEquals(null, res[812].exec("dthing", 2472));
-assertEquals(null, res[812].exec("ething", 2473));
-assertEquals(null, res[812].exec("\ufffd", 2474));
-assertEquals(null, res[812].exec("\ufffd", 2475));
-assertEquals("0", res[813].exec("0"), 2476);
-assertEquals("1", res[813].exec("1"), 2477);
-assertEquals("2", res[813].exec("2"), 2478);
-assertEquals("3", res[813].exec("3"), 2479);
-assertEquals("4", res[813].exec("4"), 2480);
-assertEquals("5", res[813].exec("5"), 2481);
-assertEquals("6", res[813].exec("6"), 2482);
-assertEquals("7", res[813].exec("7"), 2483);
-assertEquals("8", res[813].exec("8"), 2484);
-assertEquals("9", res[813].exec("9"), 2485);
-assertEquals("10", res[813].exec("10"), 2486);
-assertEquals("100", res[813].exec("100"), 2487);
-assertEquals(null, res[813].exec("*** Failers", 2488));
-assertEquals(null, res[813].exec("abc", 2489));
-assertEquals("enter", res[814].exec("enter"), 2490);
-assertEquals("inter", res[814].exec("inter"), 2491);
-assertEquals("uponter", res[814].exec("uponter"), 2492);
-assertEquals("xxx0", res[815].exec("xxx0"), 2493);
-assertEquals("xxx1234", res[815].exec("xxx1234"), 2494);
-assertEquals(null, res[815].exec("*** Failers", 2495));
-assertEquals(null, res[815].exec("xxx", 2496));
-assertEquals("x123", res[816].exec("x123"), 2497);
-assertEquals("xx123", res[816].exec("xx123"), 2498);
-assertEquals("123456", res[816].exec("123456"), 2499);
-assertEquals(null, res[816].exec("*** Failers", 2500));
-assertEquals(null, res[816].exec("123", 2501));
-assertEquals("x1234", res[816].exec("x1234"), 2502);
-assertEquals("x123", res[817].exec("x123"), 2503);
-assertEquals("xx123", res[817].exec("xx123"), 2504);
-assertEquals("123456", res[817].exec("123456"), 2505);
-assertEquals(null, res[817].exec("*** Failers", 2506));
-assertEquals(null, res[817].exec("123", 2507));
-assertEquals("x1234", res[817].exec("x1234"), 2508);
-assertEquals("abc!pqr=apquxz.ixr.zzz.ac.uk,abc,pqr", res[818].exec("abc!pqr=apquxz.ixr.zzz.ac.uk"), 2509);
-assertEquals(null, res[818].exec("*** Failers", 2510));
-assertEquals(null, res[818].exec("!pqr=apquxz.ixr.zzz.ac.uk", 2511));
-assertEquals(null, res[818].exec("abc!=apquxz.ixr.zzz.ac.uk", 2512));
-assertEquals(null, res[818].exec("abc!pqr=apquxz:ixr.zzz.ac.uk", 2513));
-assertEquals(null, res[818].exec("abc!pqr=apquxz.ixr.zzz.ac.ukk", 2514));
-assertEquals(":", res[819].exec("Well, we need a colon: somewhere"), 2515);
-assertEquals(null, res[819].exec("*** Fail if we don't", 2516));
-assertEquals("0abc,0abc", res[820].exec("0abc"), 2517);
-assertEquals("abc,abc", res[820].exec("abc"), 2518);
-assertEquals("fed,fed", res[820].exec("fed"), 2519);
-assertEquals("E,E", res[820].exec("E"), 2520);
-assertEquals("::,::", res[820].exec("::"), 2521);
-assertEquals("5f03:12C0::932e,5f03:12C0::932e", res[820].exec("5f03:12C0::932e"), 2522);
-assertEquals("def,def", res[820].exec("fed def"), 2523);
-assertEquals("ff,ff", res[820].exec("Any old stuff"), 2524);
-assertEquals(null, res[820].exec("*** Failers", 2525));
-assertEquals(null, res[820].exec("0zzz", 2526));
-assertEquals(null, res[820].exec("gzzz", 2527));
-assertEquals(null, res[820].exec("fed ", 2528));
-assertEquals(null, res[820].exec("Any old rubbish", 2529));
-assertEquals(".1.2.3,1,2,3", res[821].exec(".1.2.3"), 2530);
-assertEquals("A.12.123.0,12,123,0", res[821].exec("A.12.123.0"), 2531);
-assertEquals(null, res[821].exec("*** Failers", 2532));
-assertEquals(null, res[821].exec(".1.2.3333", 2533));
-assertEquals(null, res[821].exec("1.2.3", 2534));
-assertEquals(null, res[821].exec("1234.2.3", 2535));
-assertEquals("1 IN SOA non-sp1 non-sp2(,1,non-sp1,non-sp2", res[822].exec("1 IN SOA non-sp1 non-sp2("), 2536);
-assertEquals("1    IN    SOA    non-sp1    non-sp2   (,1,non-sp1,non-sp2", res[822].exec("1    IN    SOA    non-sp1    non-sp2   ("), 2537);
-assertEquals(null, res[822].exec("*** Failers", 2538));
-assertEquals(null, res[822].exec("1IN SOA non-sp1 non-sp2(", 2539));
-assertEquals("a.,", res[823].exec("a."), 2540);
-assertEquals("Z.,", res[823].exec("Z."), 2541);
-assertEquals("2.,", res[823].exec("2."), 2542);
-assertEquals("ab-c.pq-r.,.pq-r", res[823].exec("ab-c.pq-r."), 2543);
-assertEquals("sxk.zzz.ac.uk.,.uk", res[823].exec("sxk.zzz.ac.uk."), 2544);
-assertEquals("x-.y-.,.y-", res[823].exec("x-.y-."), 2545);
-assertEquals(null, res[823].exec("*** Failers", 2546));
-assertEquals(null, res[823].exec("-abc.peq.", 2547));
-assertEquals("*.a,,,", res[824].exec("*.a"), 2548);
-assertEquals("*.b0-a,0-a,,", res[824].exec("*.b0-a"), 2549);
-assertEquals("*.c3-b.c,3-b,.c,", res[824].exec("*.c3-b.c"), 2550);
-assertEquals("*.c-a.b-c,-a,.b-c,-c", res[824].exec("*.c-a.b-c"), 2551);
-assertEquals(null, res[824].exec("*** Failers", 2552));
-assertEquals(null, res[824].exec("*.0", 2553));
-assertEquals(null, res[824].exec("*.a-", 2554));
-assertEquals(null, res[824].exec("*.a-b.c-", 2555));
-assertEquals(null, res[824].exec("*.c-a.0-c", 2556));
-assertEquals("abde,de,abd,e", res[825].exec("abde"), 2557);
-assertEquals("abdf,,abd,f", res[826].exec("abdf"), 2558);
-assertEquals("ab,abcd,cd,ab", res[827].exec("abcd"), 2559);
-assertEquals("a.b.c.d,.d", res[828].exec("a.b.c.d"), 2560);
-assertEquals("A.B.C.D,.D", res[828].exec("A.B.C.D"), 2561);
-assertEquals("a.b.c.1.2.3.C,.C", res[828].exec("a.b.c.1.2.3.C"), 2562);
-assertEquals("\"1234\",", res[829].exec("\"1234\""), 2563);
-assertEquals("\"abcd\" ;,;", res[829].exec("\"abcd\" ;"), 2564);
-assertEquals("\"\" ; rhubarb,; rhubarb", res[829].exec("\"\" ; rhubarb"), 2565);
-assertEquals(null, res[829].exec("*** Failers", 2566));
-assertEquals(null, res[829].exec("\"1234\" : things", 2567));
-assertEquals(null, res[830].exec("\\", 2568));
-assertEquals(null, res[830].exec("*** Failers", 2569));
-assertEquals("ab c", res[831].exec("ab c"), 2570);
-assertEquals(null, res[831].exec("*** Failers", 2571));
-assertEquals(null, res[831].exec("abc", 2572));
-assertEquals(null, res[831].exec("ab cde", 2573));
-assertEquals("ab c", res[831].exec("ab c"), 2574);
-assertEquals(null, res[831].exec("*** Failers", 2575));
-assertEquals(null, res[831].exec("abc", 2576));
-assertEquals(null, res[831].exec("ab cde", 2577));
-assertEquals("a bcd", res[832].exec("a bcd"), 2578);
-assertEquals(null, res[832].exec("a b d", 2579));
-assertEquals(null, res[832].exec("*** Failers", 2580));
-assertEquals(null, res[832].exec("abcd", 2581));
-assertEquals(null, res[832].exec("ab d", 2582));
-assertEquals("abcdefhijklm,abc,bc,c,def,ef,f,hij,ij,j,klm,lm,m", res[833].exec("abcdefhijklm"), 2583);
-assertEquals("abcdefhijklm,bc,c,ef,f,ij,j,lm,m", res[834].exec("abcdefhijklm"), 2584);
-assertEquals(null, res[835].exec("a+ Z0+\x08\n\x1d\x12", 2585));
-assertEquals(null, res[835].exec(".^$(*+)|{?,?}", 2586));
-assertEquals("z", res[836].exec("z"), 2587);
-assertEquals("az", res[836].exec("az"), 2588);
-assertEquals("aaaz", res[836].exec("aaaz"), 2589);
-assertEquals("a", res[836].exec("a"), 2590);
-assertEquals("aa", res[836].exec("aa"), 2591);
-assertEquals("aaaa", res[836].exec("aaaa"), 2592);
-assertEquals("a", res[836].exec("a+"), 2593);
-assertEquals("aa", res[836].exec("aa+"), 2594);
-assertEquals("z", res[837].exec("z"), 2595);
-assertEquals("a", res[837].exec("az"), 2596);
-assertEquals("a", res[837].exec("aaaz"), 2597);
-assertEquals("a", res[837].exec("a"), 2598);
-assertEquals("a", res[837].exec("aa"), 2599);
-assertEquals("a", res[837].exec("aaaa"), 2600);
-assertEquals("a", res[837].exec("a+"), 2601);
-assertEquals("a", res[837].exec("aa+"), 2602);
-assertEquals("az", res[838].exec("az"), 2603);
-assertEquals("aaaz", res[838].exec("aaaz"), 2604);
-assertEquals("aa", res[838].exec("aa"), 2605);
-assertEquals("aaaa", res[838].exec("aaaa"), 2606);
-assertEquals("aa", res[838].exec("aa+"), 2607);
-assertEquals("az", res[839].exec("az"), 2608);
-assertEquals("aa", res[839].exec("aaaz"), 2609);
-assertEquals("aa", res[839].exec("aa"), 2610);
-assertEquals("aa", res[839].exec("aaaa"), 2611);
-assertEquals("aa", res[839].exec("aa+"), 2612);
-assertEquals("1234567890", res[840].exec("1234567890"), 2613);
-assertEquals("12345678ab", res[840].exec("12345678ab"), 2614);
-assertEquals("12345678__", res[840].exec("12345678__"), 2615);
-assertEquals(null, res[840].exec("*** Failers", 2616));
-assertEquals(null, res[840].exec("1234567", 2617));
-assertEquals("uoie", res[841].exec("uoie"), 2618);
-assertEquals("1234", res[841].exec("1234"), 2619);
-assertEquals("12345", res[841].exec("12345"), 2620);
-assertEquals("aaaaa", res[841].exec("aaaaa"), 2621);
-assertEquals(null, res[841].exec("*** Failers", 2622));
-assertEquals(null, res[841].exec("123456", 2623));
-assertEquals("uoie", res[842].exec("uoie"), 2624);
-assertEquals("1234", res[842].exec("1234"), 2625);
-assertEquals("1234", res[842].exec("12345"), 2626);
-assertEquals("aaaa", res[842].exec("aaaaa"), 2627);
-assertEquals("1234", res[842].exec("123456"), 2628);
-assertEquals("From abcd  Mon Sep 01 12:33,abcd", res[843].exec("From abcd  Mon Sep 01 12:33:02 1997"), 2629);
-assertEquals("From abcd  Mon Sep 01 12:33,Sep ", res[844].exec("From abcd  Mon Sep 01 12:33:02 1997"), 2630);
-assertEquals("From abcd  Mon Sep  1 12:33,Sep  ", res[844].exec("From abcd  Mon Sep  1 12:33:02 1997"), 2631);
-assertEquals(null, res[844].exec("*** Failers", 2632));
-assertEquals(null, res[844].exec("From abcd  Sep 01 12:33:02 1997", 2633));
-assertEquals(null, res[845].exec("12\n34", 2634));
-assertEquals(null, res[845].exec("12\x0d34", 2635));
-assertEquals("brown", res[846].exec("the quick brown\x09 fox"), 2636);
-assertEquals("foolish see?,lish see?", res[847].exec("foobar is foolish see?"), 2637);
-assertEquals("rowbar etc, etc", res[848].exec("foobar crowbar etc"), 2638);
-assertEquals("barrel,rel", res[848].exec("barrel"), 2639);
-assertEquals("2barrel,rel", res[848].exec("2barrel"), 2640);
-assertEquals("A barrel,rel", res[848].exec("A barrel"), 2641);
-assertEquals("abc,abc", res[849].exec("abc456"), 2642);
-assertEquals(null, res[849].exec("*** Failers", 2643));
-assertEquals(null, res[849].exec("abc123", 2644));
-assertEquals("1234", res[850].exec("1234"), 2645);
-assertEquals("1234", res[851].exec("1234"), 2646);
-assertEquals("abcd", res[852].exec("abcd"), 2647);
-assertEquals("abcd", res[853].exec("abcd"), 2648);
-assertEquals("abc", res[854].exec("the abc"), 2649);
-assertEquals(null, res[854].exec("*** Failers", 2650));
-assertEquals(null, res[854].exec("abc", 2651));
-assertEquals("abc", res[855].exec("abc"), 2652);
-assertEquals(null, res[855].exec("*** Failers", 2653));
-assertEquals(null, res[855].exec("the abc", 2654));
-assertEquals("aabb,b", res[856].exec("aabbbbb"), 2655);
-assertEquals("aabbbbb,abbbbb", res[857].exec("aabbbbb"), 2656);
-assertEquals("aa,a", res[858].exec("aabbbbb"), 2657);
-assertEquals("aabb,b", res[859].exec("aabbbbb"), 2658);
-assertEquals("Alan Other <user@dom.ain>", res[860].exec("Alan Other <user@dom.ain>"), 2659);
-assertEquals("user@dom.ain", res[860].exec("<user@dom.ain>"), 2660);
-assertEquals("user@dom.ain", res[860].exec("user@dom.ain"), 2661);
-assertEquals("\"A. Other\" <user.1234@dom.ain> (a comment)", res[860].exec("\"A. Other\" <user.1234@dom.ain> (a comment)"), 2662);
-assertEquals(" Other <user.1234@dom.ain> (a comment)", res[860].exec("A. Other <user.1234@dom.ain> (a comment)"), 2663);
-assertEquals("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay", res[860].exec("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay"), 2664);
-assertEquals("user@some.where", res[860].exec("A missing angle <user@some.where"), 2665);
-assertEquals(null, res[860].exec("*** Failers", 2666));
-assertEquals(null, res[860].exec("The quick brown fox", 2667));
-assertEquals("Alan Other <user@dom.ain>", res[861].exec("Alan Other <user@dom.ain>"), 2668);
-assertEquals("user@dom.ain", res[861].exec("<user@dom.ain>"), 2669);
-assertEquals("user@dom.ain", res[861].exec("user@dom.ain"), 2670);
-assertEquals("\"A. Other\" <user.1234@dom.ain>", res[861].exec("\"A. Other\" <user.1234@dom.ain> (a comment)"), 2671);
-assertEquals(" Other <user.1234@dom.ain>", res[861].exec("A. Other <user.1234@dom.ain> (a comment)"), 2672);
-assertEquals("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay", res[861].exec("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay"), 2673);
-assertEquals("user@some.where", res[861].exec("A missing angle <user@some.where"), 2674);
-assertEquals(null, res[861].exec("*** Failers", 2675));
-assertEquals(null, res[861].exec("The quick brown fox", 2676));
-assertEquals(null, res[861].exec("abc\x00def\x00pqr\x00xyz\x000AB", 2677));
-assertEquals(null, res[861].exec("abc456 abc\x00def\x00pqr\x00xyz\x000ABCDE", 2678));
-assertEquals("abc\x0def\x00pqr\x000xyz\x0000AB", res[862].exec("abc\x0def\x00pqr\x000xyz\x0000AB"), 2679);
-assertEquals("abc\x0def\x00pqr\x000xyz\x0000AB", res[862].exec("abc456 abc\x0def\x00pqr\x000xyz\x0000ABCDE"), 2680);
-assertEquals("\x00", res[863].exec("\x00A"), 2681);
-assertEquals("\x01", res[863].exec("\x01B"), 2682);
-assertEquals("\x1f", res[863].exec("\x1fC"), 2683);
-assertEquals("\x00\x00\x00\x00", res[864].exec("\x00\x00\x00\x00"), 2684);
-assertEquals(null, res[865].exec("The Ax0x0Z", 2685));
-assertEquals(null, res[865].exec("An A\x00x0\x00Z", 2686));
-assertEquals(null, res[865].exec("*** Failers", 2687));
-assertEquals(null, res[865].exec("A\x00Z", 2688));
-assertEquals(null, res[865].exec("A\x00x0\x00x0Z", 2689));
-assertEquals(" ", res[866].exec(" abc"), 2690);
-assertEquals("\x0c", res[866].exec("\x0cabc"), 2691);
-assertEquals("\n", res[866].exec("\nabc"), 2692);
-assertEquals("\x0d", res[866].exec("\x0dabc"), 2693);
-assertEquals("\x09", res[866].exec("\x09abc"), 2694);
-assertEquals(null, res[866].exec("*** Failers", 2695));
-assertEquals(null, res[866].exec("abc", 2696));
-assertEquals("abc", res[867].exec("abc"), 2697);
-assertEquals("abbbbc", res[868].exec("abbbbc"), 2698);
-assertEquals("abbbc", res[868].exec("abbbc"), 2699);
-assertEquals("abbc", res[868].exec("abbc"), 2700);
-assertEquals(null, res[868].exec("*** Failers", 2701));
-assertEquals(null, res[868].exec("abc", 2702));
-assertEquals(null, res[868].exec("abbbbbc", 2703));
-assertEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[869].exec("track1.title:TBlah blah blah"), 2704);
-assertEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[870].exec("track1.title:TBlah blah blah"), 2705);
-assertEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[871].exec("track1.title:TBlah blah blah"), 2706);
-assertEquals("WXY_^abc", res[872].exec("WXY_^abc"), 2707);
-assertEquals(null, res[872].exec("*** Failers", 2708));
-assertEquals(null, res[872].exec("wxy", 2709));
-assertEquals("WXY_^abc", res[873].exec("WXY_^abc"), 2710);
-assertEquals("wxy_^ABC", res[873].exec("wxy_^ABC"), 2711);
-assertEquals("WXY_^abc", res[874].exec("WXY_^abc"), 2712);
-assertEquals("wxy_^ABC", res[874].exec("wxy_^ABC"), 2713);
-assertEquals("abc", res[875].exec("abc"), 2714);
-assertEquals("abc", res[875].exec("qqq\nabc"), 2715);
-assertEquals("abc", res[875].exec("abc\nzzz"), 2716);
-assertEquals("abc", res[875].exec("qqq\nabc\nzzz"), 2717);
-assertEquals("abc", res[876].exec("abc"), 2718);
-assertEquals(null, res[876].exec("*** Failers", 2719));
-assertEquals(null, res[876].exec("qqq\nabc", 2720));
-assertEquals(null, res[876].exec("abc\nzzz", 2721));
-assertEquals(null, res[876].exec("qqq\nabc\nzzz", 2722));
-assertEquals(null, res[877].exec("abc", 2723));
-assertEquals(null, res[877].exec("abc\n ", 2724));
-assertEquals(null, res[877].exec("*** Failers", 2725));
-assertEquals(null, res[877].exec("qqq\nabc", 2726));
-assertEquals(null, res[877].exec("abc\nzzz", 2727));
-assertEquals(null, res[877].exec("qqq\nabc\nzzz", 2728));
-assertEquals(null, res[878].exec("abc\ndef", 2729));
-assertEquals(null, res[879].exec("*** Failers", 2730));
-assertEquals(null, res[879].exec("abc\ndef", 2731));
-assertEquals("b", res[880].exec("b::c"), 2732);
-assertEquals("::", res[880].exec("c::b"), 2733);
-assertEquals("az-", res[881].exec("az-"), 2734);
-assertEquals("a", res[881].exec("*** Failers"), 2735);
-assertEquals(null, res[881].exec("b", 2736));
-assertEquals("za-", res[882].exec("za-"), 2737);
-assertEquals("a", res[882].exec("*** Failers"), 2738);
-assertEquals(null, res[882].exec("b", 2739));
-assertEquals("a-z", res[883].exec("a-z"), 2740);
-assertEquals("a", res[883].exec("*** Failers"), 2741);
-assertEquals(null, res[883].exec("b", 2742));
-assertEquals("abcdxyz", res[884].exec("abcdxyz"), 2743);
-assertEquals("12-34", res[885].exec("12-34"), 2744);
-assertEquals(null, res[885].exec("*** Failers", 2745));
-assertEquals(null, res[885].exec("aaa", 2746));
-assertEquals("12-34z", res[886].exec("12-34z"), 2747);
-assertEquals(null, res[886].exec("*** Failers", 2748));
-assertEquals(null, res[886].exec("aaa", 2749));
-assertEquals("\\", res[887].exec("\\\\"), 2750);
-assertEquals(" Z", res[888].exec("the Zoo"), 2751);
-assertEquals(null, res[888].exec("*** Failers", 2752));
-assertEquals(null, res[888].exec("Zulu", 2753));
-assertEquals("ab{3cd", res[889].exec("ab{3cd"), 2754);
-assertEquals("ab{3,cd", res[890].exec("ab{3,cd"), 2755);
-assertEquals("ab{3,4a}cd", res[891].exec("ab{3,4a}cd"), 2756);
-assertEquals("{4,5a}bc", res[892].exec("{4,5a}bc"), 2757);
-assertEquals(null, res[893].exec("a\x0db", 2758));
-assertEquals(null, res[893].exec("*** Failers", 2759));
-assertEquals(null, res[893].exec("a\nb", 2760));
-assertEquals("abc", res[894].exec("abc"), 2761);
-assertEquals(null, res[894].exec("abc\n", 2762));
-assertEquals(null, res[894].exec("*** Failers", 2763));
-assertEquals(null, res[894].exec("abc\ndef", 2764));
-assertEquals("abcS,abc", res[895].exec("abcS"), 2765);
-assertEquals("abc\x93,abc", res[896].exec("abc\x93"), 2766);
-assertEquals("abc\xd3,abc", res[897].exec("abc\xd3"), 2767);
-assertEquals("abc@,abc", res[898].exec("abc@"), 2768);
-assertEquals("abc@,abc", res[898].exec("abc@"), 2769);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2770);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2771);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2772);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2773);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2774);
-assertEquals("abc@,abc", res[898].exec("abc@0"), 2775);
-assertEquals(null, res[899].exec("abc\x0081", 2776));
-assertEquals(null, res[899].exec("abc\x0081", 2777));
-assertEquals(null, res[900].exec("abc\x0091", 2778));
-assertEquals(null, res[900].exec("abc\x0091", 2779));
-assertEquals("abcdefghijk\nS,a,b,c,d,e,f,g,h,i,j,k", res[901].exec("abcdefghijk\nS"), 2780);
-assertEquals("abidef", res[902].exec("abidef"), 2781);
-assertEquals("bc", res[903].exec("bc"), 2782);
-assertEquals("xyz,,", res[904].exec("xyz"), 2783);
-assertEquals("abc\x08de", res[905].exec("abc\x08de"), 2784);
-assertEquals("abc\x01de", res[906].exec("abc\x01de"), 2785);
-assertEquals("abc\x01de,abc", res[907].exec("abc\x01de"), 2786);
-assertEquals(null, res[907].exec("a\nb", 2787));
-assertEquals("baNOTcccc,b,a,NOT,cccc", res[908].exec("baNOTccccd"), 2788);
-assertEquals("baNOTccc,b,a,NOT,ccc", res[908].exec("baNOTcccd"), 2789);
-assertEquals("baNOTcc,b,a,NO,Tcc", res[908].exec("baNOTccd"), 2790);
-assertEquals("baccc,b,a,,ccc", res[908].exec("bacccd"), 2791);
-assertEquals("*** Failers,*,*,* Fail,ers", res[908].exec("*** Failers"), 2792);
-assertEquals(null, res[908].exec("anything", 2793));
-assertEquals(null, res[908].exec("b\x08c   ", 2794));
-assertEquals(null, res[908].exec("baccd", 2795));
-assertEquals("A", res[909].exec("Abc"), 2796);
-assertEquals("b", res[910].exec("Abc "), 2797);
-assertEquals("AAA", res[911].exec("AAAaAbc"), 2798);
-assertEquals("bc ", res[912].exec("AAAaAbc "), 2799);
-assertEquals("bbb\nccc", res[913].exec("bbb\nccc"), 2800);
-assertEquals("c", res[914].exec("abc"), 2801);
-assertEquals("s", res[914].exec("*** Failers"), 2802);
-assertEquals(" ", res[914].exec("abk   "), 2803);
-assertEquals("abc", res[915].exec("abc"), 2804);
-assertEquals("bc", res[915].exec("kbc"), 2805);
-assertEquals("bc ", res[915].exec("kabc "), 2806);
-assertEquals("ers", res[915].exec("*** Failers"), 2807);
-assertEquals(null, res[915].exec("abk", 2808));
-assertEquals(null, res[915].exec("akb", 2809));
-assertEquals(null, res[915].exec("akk ", 2810));
-assertEquals("12345678@a.b.c.d", res[916].exec("12345678@a.b.c.d"), 2811);
-assertEquals("123456789@x.y.z", res[916].exec("123456789@x.y.z"), 2812);
-assertEquals(null, res[916].exec("*** Failers", 2813));
-assertEquals(null, res[916].exec("12345678@x.y.uk", 2814));
-assertEquals(null, res[916].exec("1234567@a.b.c.d       ", 2815));
-assertEquals("b", res[917].exec("aaaabcd"), 2816);
-assertEquals("A", res[917].exec("aaAabcd "), 2817);
-assertEquals("b", res[918].exec("aaaabcd"), 2818);
-assertEquals("b", res[918].exec("aaAabcd "), 2819);
-assertEquals("b", res[919].exec("aaaabcd"), 2820);
-assertEquals("A", res[919].exec("aaAabcd "), 2821);
-assertEquals("b", res[920].exec("aaaabcd"), 2822);
-assertEquals("b", res[920].exec("aaAabcd "), 2823);
-assertEquals("PSTAIREISLL", res[922].exec("xxxxxxxxxxxPSTAIREISLLxxxxxxxxx"), 2824);
-assertEquals("PSTAIREISLL", res[923].exec("xxxxxxxxxxxPSTAIREISLLxxxxxxxxx"), 2825);
-assertEquals(".230003938,.23", res[924].exec("1.230003938"), 2826);
-assertEquals(".875000282,.875", res[924].exec("1.875000282   "), 2827);
-assertEquals(".235,.23", res[924].exec("1.235  "), 2828);
-assertEquals(null, res[924].exec("              ", 2829));
-assertEquals(".23,.23,", res[925].exec("1.230003938      "), 2830);
-assertEquals(".875,.875,5", res[925].exec("1.875000282"), 2831);
-assertEquals(null, res[925].exec("*** Failers ", 2832));
-assertEquals(null, res[925].exec("1.235 ", 2833));
+assertNull(res[808].exec("\x01\x01e;z", 2439));
+assertToStringEquals("a", res[809].exec("athing"), 2440);
+assertToStringEquals("b", res[809].exec("bthing"), 2441);
+assertToStringEquals("]", res[809].exec("]thing"), 2442);
+assertToStringEquals("c", res[809].exec("cthing"), 2443);
+assertToStringEquals("d", res[809].exec("dthing"), 2444);
+assertToStringEquals("e", res[809].exec("ething"), 2445);
+assertNull(res[809].exec("*** Failers", 2446));
+assertNull(res[809].exec("fthing", 2447));
+assertNull(res[809].exec("[thing", 2448));
+assertNull(res[809].exec("\\thing", 2449));
+assertNull(res[810].exec("]thing", 2450));
+assertNull(res[810].exec("cthing", 2451));
+assertNull(res[810].exec("dthing", 2452));
+assertNull(res[810].exec("ething", 2453));
+assertNull(res[810].exec("*** Failers", 2454));
+assertNull(res[810].exec("athing", 2455));
+assertNull(res[810].exec("fthing", 2456));
+assertToStringEquals("f", res[811].exec("fthing"), 2457);
+assertToStringEquals("[", res[811].exec("[thing"), 2458);
+assertToStringEquals("\\", res[811].exec("\\thing"), 2459);
+assertToStringEquals("*", res[811].exec("*** Failers"), 2460);
+assertNull(res[811].exec("athing", 2461));
+assertNull(res[811].exec("bthing", 2462));
+assertNull(res[811].exec("]thing", 2463));
+assertNull(res[811].exec("cthing", 2464));
+assertNull(res[811].exec("dthing", 2465));
+assertNull(res[811].exec("ething", 2466));
+assertNull(res[812].exec("athing", 2467));
+assertNull(res[812].exec("fthing", 2468));
+assertNull(res[812].exec("*** Failers", 2469));
+assertNull(res[812].exec("]thing", 2470));
+assertNull(res[812].exec("cthing", 2471));
+assertNull(res[812].exec("dthing", 2472));
+assertNull(res[812].exec("ething", 2473));
+assertNull(res[812].exec("\ufffd", 2474));
+assertNull(res[812].exec("\ufffd", 2475));
+assertToStringEquals("0", res[813].exec("0"), 2476);
+assertToStringEquals("1", res[813].exec("1"), 2477);
+assertToStringEquals("2", res[813].exec("2"), 2478);
+assertToStringEquals("3", res[813].exec("3"), 2479);
+assertToStringEquals("4", res[813].exec("4"), 2480);
+assertToStringEquals("5", res[813].exec("5"), 2481);
+assertToStringEquals("6", res[813].exec("6"), 2482);
+assertToStringEquals("7", res[813].exec("7"), 2483);
+assertToStringEquals("8", res[813].exec("8"), 2484);
+assertToStringEquals("9", res[813].exec("9"), 2485);
+assertToStringEquals("10", res[813].exec("10"), 2486);
+assertToStringEquals("100", res[813].exec("100"), 2487);
+assertNull(res[813].exec("*** Failers", 2488));
+assertNull(res[813].exec("abc", 2489));
+assertToStringEquals("enter", res[814].exec("enter"), 2490);
+assertToStringEquals("inter", res[814].exec("inter"), 2491);
+assertToStringEquals("uponter", res[814].exec("uponter"), 2492);
+assertToStringEquals("xxx0", res[815].exec("xxx0"), 2493);
+assertToStringEquals("xxx1234", res[815].exec("xxx1234"), 2494);
+assertNull(res[815].exec("*** Failers", 2495));
+assertNull(res[815].exec("xxx", 2496));
+assertToStringEquals("x123", res[816].exec("x123"), 2497);
+assertToStringEquals("xx123", res[816].exec("xx123"), 2498);
+assertToStringEquals("123456", res[816].exec("123456"), 2499);
+assertNull(res[816].exec("*** Failers", 2500));
+assertNull(res[816].exec("123", 2501));
+assertToStringEquals("x1234", res[816].exec("x1234"), 2502);
+assertToStringEquals("x123", res[817].exec("x123"), 2503);
+assertToStringEquals("xx123", res[817].exec("xx123"), 2504);
+assertToStringEquals("123456", res[817].exec("123456"), 2505);
+assertNull(res[817].exec("*** Failers", 2506));
+assertNull(res[817].exec("123", 2507));
+assertToStringEquals("x1234", res[817].exec("x1234"), 2508);
+assertToStringEquals("abc!pqr=apquxz.ixr.zzz.ac.uk,abc,pqr", res[818].exec("abc!pqr=apquxz.ixr.zzz.ac.uk"), 2509);
+assertNull(res[818].exec("*** Failers", 2510));
+assertNull(res[818].exec("!pqr=apquxz.ixr.zzz.ac.uk", 2511));
+assertNull(res[818].exec("abc!=apquxz.ixr.zzz.ac.uk", 2512));
+assertNull(res[818].exec("abc!pqr=apquxz:ixr.zzz.ac.uk", 2513));
+assertNull(res[818].exec("abc!pqr=apquxz.ixr.zzz.ac.ukk", 2514));
+assertToStringEquals(":", res[819].exec("Well, we need a colon: somewhere"), 2515);
+assertNull(res[819].exec("*** Fail if we don't", 2516));
+assertToStringEquals("0abc,0abc", res[820].exec("0abc"), 2517);
+assertToStringEquals("abc,abc", res[820].exec("abc"), 2518);
+assertToStringEquals("fed,fed", res[820].exec("fed"), 2519);
+assertToStringEquals("E,E", res[820].exec("E"), 2520);
+assertToStringEquals("::,::", res[820].exec("::"), 2521);
+assertToStringEquals("5f03:12C0::932e,5f03:12C0::932e", res[820].exec("5f03:12C0::932e"), 2522);
+assertToStringEquals("def,def", res[820].exec("fed def"), 2523);
+assertToStringEquals("ff,ff", res[820].exec("Any old stuff"), 2524);
+assertNull(res[820].exec("*** Failers", 2525));
+assertNull(res[820].exec("0zzz", 2526));
+assertNull(res[820].exec("gzzz", 2527));
+assertNull(res[820].exec("fed ", 2528));
+assertNull(res[820].exec("Any old rubbish", 2529));
+assertToStringEquals(".1.2.3,1,2,3", res[821].exec(".1.2.3"), 2530);
+assertToStringEquals("A.12.123.0,12,123,0", res[821].exec("A.12.123.0"), 2531);
+assertNull(res[821].exec("*** Failers", 2532));
+assertNull(res[821].exec(".1.2.3333", 2533));
+assertNull(res[821].exec("1.2.3", 2534));
+assertNull(res[821].exec("1234.2.3", 2535));
+assertToStringEquals("1 IN SOA non-sp1 non-sp2(,1,non-sp1,non-sp2", res[822].exec("1 IN SOA non-sp1 non-sp2("), 2536);
+assertToStringEquals("1    IN    SOA    non-sp1    non-sp2   (,1,non-sp1,non-sp2", res[822].exec("1    IN    SOA    non-sp1    non-sp2   ("), 2537);
+assertNull(res[822].exec("*** Failers", 2538));
+assertNull(res[822].exec("1IN SOA non-sp1 non-sp2(", 2539));
+assertToStringEquals("a.,", res[823].exec("a."), 2540);
+assertToStringEquals("Z.,", res[823].exec("Z."), 2541);
+assertToStringEquals("2.,", res[823].exec("2."), 2542);
+assertToStringEquals("ab-c.pq-r.,.pq-r", res[823].exec("ab-c.pq-r."), 2543);
+assertToStringEquals("sxk.zzz.ac.uk.,.uk", res[823].exec("sxk.zzz.ac.uk."), 2544);
+assertToStringEquals("x-.y-.,.y-", res[823].exec("x-.y-."), 2545);
+assertNull(res[823].exec("*** Failers", 2546));
+assertNull(res[823].exec("-abc.peq.", 2547));
+assertToStringEquals("*.a,,,", res[824].exec("*.a"), 2548);
+assertToStringEquals("*.b0-a,0-a,,", res[824].exec("*.b0-a"), 2549);
+assertToStringEquals("*.c3-b.c,3-b,.c,", res[824].exec("*.c3-b.c"), 2550);
+assertToStringEquals("*.c-a.b-c,-a,.b-c,-c", res[824].exec("*.c-a.b-c"), 2551);
+assertNull(res[824].exec("*** Failers", 2552));
+assertNull(res[824].exec("*.0", 2553));
+assertNull(res[824].exec("*.a-", 2554));
+assertNull(res[824].exec("*.a-b.c-", 2555));
+assertNull(res[824].exec("*.c-a.0-c", 2556));
+assertToStringEquals("abde,de,abd,e", res[825].exec("abde"), 2557);
+assertToStringEquals("abdf,,abd,f", res[826].exec("abdf"), 2558);
+assertToStringEquals("ab,abcd,cd,ab", res[827].exec("abcd"), 2559);
+assertToStringEquals("a.b.c.d,.d", res[828].exec("a.b.c.d"), 2560);
+assertToStringEquals("A.B.C.D,.D", res[828].exec("A.B.C.D"), 2561);
+assertToStringEquals("a.b.c.1.2.3.C,.C", res[828].exec("a.b.c.1.2.3.C"), 2562);
+assertToStringEquals("\"1234\",", res[829].exec("\"1234\""), 2563);
+assertToStringEquals("\"abcd\" ;,;", res[829].exec("\"abcd\" ;"), 2564);
+assertToStringEquals("\"\" ; rhubarb,; rhubarb", res[829].exec("\"\" ; rhubarb"), 2565);
+assertNull(res[829].exec("*** Failers", 2566));
+assertNull(res[829].exec("\"1234\" : things", 2567));
+assertNull(res[830].exec("\\", 2568));
+assertNull(res[830].exec("*** Failers", 2569));
+assertToStringEquals("ab c", res[831].exec("ab c"), 2570);
+assertNull(res[831].exec("*** Failers", 2571));
+assertNull(res[831].exec("abc", 2572));
+assertNull(res[831].exec("ab cde", 2573));
+assertToStringEquals("ab c", res[831].exec("ab c"), 2574);
+assertNull(res[831].exec("*** Failers", 2575));
+assertNull(res[831].exec("abc", 2576));
+assertNull(res[831].exec("ab cde", 2577));
+assertToStringEquals("a bcd", res[832].exec("a bcd"), 2578);
+assertNull(res[832].exec("a b d", 2579));
+assertNull(res[832].exec("*** Failers", 2580));
+assertNull(res[832].exec("abcd", 2581));
+assertNull(res[832].exec("ab d", 2582));
+assertToStringEquals("abcdefhijklm,abc,bc,c,def,ef,f,hij,ij,j,klm,lm,m", res[833].exec("abcdefhijklm"), 2583);
+assertToStringEquals("abcdefhijklm,bc,c,ef,f,ij,j,lm,m", res[834].exec("abcdefhijklm"), 2584);
+assertNull(res[835].exec("a+ Z0+\x08\n\x1d\x12", 2585));
+assertNull(res[835].exec(".^$(*+)|{?,?}", 2586));
+assertToStringEquals("z", res[836].exec("z"), 2587);
+assertToStringEquals("az", res[836].exec("az"), 2588);
+assertToStringEquals("aaaz", res[836].exec("aaaz"), 2589);
+assertToStringEquals("a", res[836].exec("a"), 2590);
+assertToStringEquals("aa", res[836].exec("aa"), 2591);
+assertToStringEquals("aaaa", res[836].exec("aaaa"), 2592);
+assertToStringEquals("a", res[836].exec("a+"), 2593);
+assertToStringEquals("aa", res[836].exec("aa+"), 2594);
+assertToStringEquals("z", res[837].exec("z"), 2595);
+assertToStringEquals("a", res[837].exec("az"), 2596);
+assertToStringEquals("a", res[837].exec("aaaz"), 2597);
+assertToStringEquals("a", res[837].exec("a"), 2598);
+assertToStringEquals("a", res[837].exec("aa"), 2599);
+assertToStringEquals("a", res[837].exec("aaaa"), 2600);
+assertToStringEquals("a", res[837].exec("a+"), 2601);
+assertToStringEquals("a", res[837].exec("aa+"), 2602);
+assertToStringEquals("az", res[838].exec("az"), 2603);
+assertToStringEquals("aaaz", res[838].exec("aaaz"), 2604);
+assertToStringEquals("aa", res[838].exec("aa"), 2605);
+assertToStringEquals("aaaa", res[838].exec("aaaa"), 2606);
+assertToStringEquals("aa", res[838].exec("aa+"), 2607);
+assertToStringEquals("az", res[839].exec("az"), 2608);
+assertToStringEquals("aa", res[839].exec("aaaz"), 2609);
+assertToStringEquals("aa", res[839].exec("aa"), 2610);
+assertToStringEquals("aa", res[839].exec("aaaa"), 2611);
+assertToStringEquals("aa", res[839].exec("aa+"), 2612);
+assertToStringEquals("1234567890", res[840].exec("1234567890"), 2613);
+assertToStringEquals("12345678ab", res[840].exec("12345678ab"), 2614);
+assertToStringEquals("12345678__", res[840].exec("12345678__"), 2615);
+assertNull(res[840].exec("*** Failers", 2616));
+assertNull(res[840].exec("1234567", 2617));
+assertToStringEquals("uoie", res[841].exec("uoie"), 2618);
+assertToStringEquals("1234", res[841].exec("1234"), 2619);
+assertToStringEquals("12345", res[841].exec("12345"), 2620);
+assertToStringEquals("aaaaa", res[841].exec("aaaaa"), 2621);
+assertNull(res[841].exec("*** Failers", 2622));
+assertNull(res[841].exec("123456", 2623));
+assertToStringEquals("uoie", res[842].exec("uoie"), 2624);
+assertToStringEquals("1234", res[842].exec("1234"), 2625);
+assertToStringEquals("1234", res[842].exec("12345"), 2626);
+assertToStringEquals("aaaa", res[842].exec("aaaaa"), 2627);
+assertToStringEquals("1234", res[842].exec("123456"), 2628);
+assertToStringEquals("From abcd  Mon Sep 01 12:33,abcd", res[843].exec("From abcd  Mon Sep 01 12:33:02 1997"), 2629);
+assertToStringEquals("From abcd  Mon Sep 01 12:33,Sep ", res[844].exec("From abcd  Mon Sep 01 12:33:02 1997"), 2630);
+assertToStringEquals("From abcd  Mon Sep  1 12:33,Sep  ", res[844].exec("From abcd  Mon Sep  1 12:33:02 1997"), 2631);
+assertNull(res[844].exec("*** Failers", 2632));
+assertNull(res[844].exec("From abcd  Sep 01 12:33:02 1997", 2633));
+assertNull(res[845].exec("12\n34", 2634));
+assertNull(res[845].exec("12\x0d34", 2635));
+assertToStringEquals("brown", res[846].exec("the quick brown\x09 fox"), 2636);
+assertToStringEquals("foolish see?,lish see?", res[847].exec("foobar is foolish see?"), 2637);
+assertToStringEquals("rowbar etc, etc", res[848].exec("foobar crowbar etc"), 2638);
+assertToStringEquals("barrel,rel", res[848].exec("barrel"), 2639);
+assertToStringEquals("2barrel,rel", res[848].exec("2barrel"), 2640);
+assertToStringEquals("A barrel,rel", res[848].exec("A barrel"), 2641);
+assertToStringEquals("abc,abc", res[849].exec("abc456"), 2642);
+assertNull(res[849].exec("*** Failers", 2643));
+assertNull(res[849].exec("abc123", 2644));
+assertToStringEquals("1234", res[850].exec("1234"), 2645);
+assertToStringEquals("1234", res[851].exec("1234"), 2646);
+assertToStringEquals("abcd", res[852].exec("abcd"), 2647);
+assertToStringEquals("abcd", res[853].exec("abcd"), 2648);
+assertToStringEquals("abc", res[854].exec("the abc"), 2649);
+assertNull(res[854].exec("*** Failers", 2650));
+assertNull(res[854].exec("abc", 2651));
+assertToStringEquals("abc", res[855].exec("abc"), 2652);
+assertNull(res[855].exec("*** Failers", 2653));
+assertNull(res[855].exec("the abc", 2654));
+assertToStringEquals("aabb,b", res[856].exec("aabbbbb"), 2655);
+assertToStringEquals("aabbbbb,abbbbb", res[857].exec("aabbbbb"), 2656);
+assertToStringEquals("aa,a", res[858].exec("aabbbbb"), 2657);
+assertToStringEquals("aabb,b", res[859].exec("aabbbbb"), 2658);
+assertToStringEquals("Alan Other <user@dom.ain>", res[860].exec("Alan Other <user@dom.ain>"), 2659);
+assertToStringEquals("user@dom.ain", res[860].exec("<user@dom.ain>"), 2660);
+assertToStringEquals("user@dom.ain", res[860].exec("user@dom.ain"), 2661);
+assertToStringEquals("\"A. Other\" <user.1234@dom.ain> (a comment)", res[860].exec("\"A. Other\" <user.1234@dom.ain> (a comment)"), 2662);
+assertToStringEquals(" Other <user.1234@dom.ain> (a comment)", res[860].exec("A. Other <user.1234@dom.ain> (a comment)"), 2663);
+assertToStringEquals("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay", res[860].exec("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay"), 2664);
+assertToStringEquals("user@some.where", res[860].exec("A missing angle <user@some.where"), 2665);
+assertNull(res[860].exec("*** Failers", 2666));
+assertNull(res[860].exec("The quick brown fox", 2667));
+assertToStringEquals("Alan Other <user@dom.ain>", res[861].exec("Alan Other <user@dom.ain>"), 2668);
+assertToStringEquals("user@dom.ain", res[861].exec("<user@dom.ain>"), 2669);
+assertToStringEquals("user@dom.ain", res[861].exec("user@dom.ain"), 2670);
+assertToStringEquals("\"A. Other\" <user.1234@dom.ain>", res[861].exec("\"A. Other\" <user.1234@dom.ain> (a comment)"), 2671);
+assertToStringEquals(" Other <user.1234@dom.ain>", res[861].exec("A. Other <user.1234@dom.ain> (a comment)"), 2672);
+assertToStringEquals("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay", res[861].exec("\"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"@x400-re.lay"), 2673);
+assertToStringEquals("user@some.where", res[861].exec("A missing angle <user@some.where"), 2674);
+assertNull(res[861].exec("*** Failers", 2675));
+assertNull(res[861].exec("The quick brown fox", 2676));
+assertNull(res[861].exec("abc\x00def\x00pqr\x00xyz\x000AB", 2677));
+assertNull(res[861].exec("abc456 abc\x00def\x00pqr\x00xyz\x000ABCDE", 2678));
+assertToStringEquals("abc\x0def\x00pqr\x000xyz\x0000AB", res[862].exec("abc\x0def\x00pqr\x000xyz\x0000AB"), 2679);
+assertToStringEquals("abc\x0def\x00pqr\x000xyz\x0000AB", res[862].exec("abc456 abc\x0def\x00pqr\x000xyz\x0000ABCDE"), 2680);
+assertToStringEquals("\x00", res[863].exec("\x00A"), 2681);
+assertToStringEquals("\x01", res[863].exec("\x01B"), 2682);
+assertToStringEquals("\x1f", res[863].exec("\x1fC"), 2683);
+assertToStringEquals("\x00\x00\x00\x00", res[864].exec("\x00\x00\x00\x00"), 2684);
+assertNull(res[865].exec("The Ax0x0Z", 2685));
+assertNull(res[865].exec("An A\x00x0\x00Z", 2686));
+assertNull(res[865].exec("*** Failers", 2687));
+assertNull(res[865].exec("A\x00Z", 2688));
+assertNull(res[865].exec("A\x00x0\x00x0Z", 2689));
+assertToStringEquals(" ", res[866].exec(" abc"), 2690);
+assertToStringEquals("\x0c", res[866].exec("\x0cabc"), 2691);
+assertToStringEquals("\n", res[866].exec("\nabc"), 2692);
+assertToStringEquals("\x0d", res[866].exec("\x0dabc"), 2693);
+assertToStringEquals("\x09", res[866].exec("\x09abc"), 2694);
+assertNull(res[866].exec("*** Failers", 2695));
+assertNull(res[866].exec("abc", 2696));
+assertToStringEquals("abc", res[867].exec("abc"), 2697);
+assertToStringEquals("abbbbc", res[868].exec("abbbbc"), 2698);
+assertToStringEquals("abbbc", res[868].exec("abbbc"), 2699);
+assertToStringEquals("abbc", res[868].exec("abbc"), 2700);
+assertNull(res[868].exec("*** Failers", 2701));
+assertNull(res[868].exec("abc", 2702));
+assertNull(res[868].exec("abbbbbc", 2703));
+assertToStringEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[869].exec("track1.title:TBlah blah blah"), 2704);
+assertToStringEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[870].exec("track1.title:TBlah blah blah"), 2705);
+assertToStringEquals("track1.title:TBlah blah blah,track1,title,Blah blah blah", res[871].exec("track1.title:TBlah blah blah"), 2706);
+assertToStringEquals("WXY_^abc", res[872].exec("WXY_^abc"), 2707);
+assertNull(res[872].exec("*** Failers", 2708));
+assertNull(res[872].exec("wxy", 2709));
+assertToStringEquals("WXY_^abc", res[873].exec("WXY_^abc"), 2710);
+assertToStringEquals("wxy_^ABC", res[873].exec("wxy_^ABC"), 2711);
+assertToStringEquals("WXY_^abc", res[874].exec("WXY_^abc"), 2712);
+assertToStringEquals("wxy_^ABC", res[874].exec("wxy_^ABC"), 2713);
+assertToStringEquals("abc", res[875].exec("abc"), 2714);
+assertToStringEquals("abc", res[875].exec("qqq\nabc"), 2715);
+assertToStringEquals("abc", res[875].exec("abc\nzzz"), 2716);
+assertToStringEquals("abc", res[875].exec("qqq\nabc\nzzz"), 2717);
+assertToStringEquals("abc", res[876].exec("abc"), 2718);
+assertNull(res[876].exec("*** Failers", 2719));
+assertNull(res[876].exec("qqq\nabc", 2720));
+assertNull(res[876].exec("abc\nzzz", 2721));
+assertNull(res[876].exec("qqq\nabc\nzzz", 2722));
+assertNull(res[877].exec("abc", 2723));
+assertNull(res[877].exec("abc\n ", 2724));
+assertNull(res[877].exec("*** Failers", 2725));
+assertNull(res[877].exec("qqq\nabc", 2726));
+assertNull(res[877].exec("abc\nzzz", 2727));
+assertNull(res[877].exec("qqq\nabc\nzzz", 2728));
+assertNull(res[878].exec("abc\ndef", 2729));
+assertNull(res[879].exec("*** Failers", 2730));
+assertNull(res[879].exec("abc\ndef", 2731));
+assertToStringEquals("b", res[880].exec("b::c"), 2732);
+assertToStringEquals("::", res[880].exec("c::b"), 2733);
+assertToStringEquals("az-", res[881].exec("az-"), 2734);
+assertToStringEquals("a", res[881].exec("*** Failers"), 2735);
+assertNull(res[881].exec("b", 2736));
+assertToStringEquals("za-", res[882].exec("za-"), 2737);
+assertToStringEquals("a", res[882].exec("*** Failers"), 2738);
+assertNull(res[882].exec("b", 2739));
+assertToStringEquals("a-z", res[883].exec("a-z"), 2740);
+assertToStringEquals("a", res[883].exec("*** Failers"), 2741);
+assertNull(res[883].exec("b", 2742));
+assertToStringEquals("abcdxyz", res[884].exec("abcdxyz"), 2743);
+assertToStringEquals("12-34", res[885].exec("12-34"), 2744);
+assertNull(res[885].exec("*** Failers", 2745));
+assertNull(res[885].exec("aaa", 2746));
+assertToStringEquals("12-34z", res[886].exec("12-34z"), 2747);
+assertNull(res[886].exec("*** Failers", 2748));
+assertNull(res[886].exec("aaa", 2749));
+assertToStringEquals("\\", res[887].exec("\\\\"), 2750);
+assertToStringEquals(" Z", res[888].exec("the Zoo"), 2751);
+assertNull(res[888].exec("*** Failers", 2752));
+assertNull(res[888].exec("Zulu", 2753));
+assertToStringEquals("ab{3cd", res[889].exec("ab{3cd"), 2754);
+assertToStringEquals("ab{3,cd", res[890].exec("ab{3,cd"), 2755);
+assertToStringEquals("ab{3,4a}cd", res[891].exec("ab{3,4a}cd"), 2756);
+assertToStringEquals("{4,5a}bc", res[892].exec("{4,5a}bc"), 2757);
+assertNull(res[893].exec("a\x0db", 2758));
+assertNull(res[893].exec("*** Failers", 2759));
+assertNull(res[893].exec("a\nb", 2760));
+assertToStringEquals("abc", res[894].exec("abc"), 2761);
+assertNull(res[894].exec("abc\n", 2762));
+assertNull(res[894].exec("*** Failers", 2763));
+assertNull(res[894].exec("abc\ndef", 2764));
+assertToStringEquals("abcS,abc", res[895].exec("abcS"), 2765);
+assertToStringEquals("abc\x93,abc", res[896].exec("abc\x93"), 2766);
+assertToStringEquals("abc\xd3,abc", res[897].exec("abc\xd3"), 2767);
+assertToStringEquals("abc@,abc", res[898].exec("abc@"), 2768);
+assertToStringEquals("abc@,abc", res[898].exec("abc@"), 2769);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2770);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2771);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2772);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2773);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2774);
+assertToStringEquals("abc@,abc", res[898].exec("abc@0"), 2775);
+assertNull(res[899].exec("abc\x0081", 2776));
+assertNull(res[899].exec("abc\x0081", 2777));
+assertNull(res[900].exec("abc\x0091", 2778));
+assertNull(res[900].exec("abc\x0091", 2779));
+assertToStringEquals("abcdefghijk\nS,a,b,c,d,e,f,g,h,i,j,k", res[901].exec("abcdefghijk\nS"), 2780);
+assertToStringEquals("abidef", res[902].exec("abidef"), 2781);
+assertToStringEquals("bc", res[903].exec("bc"), 2782);
+assertToStringEquals("xyz,,", res[904].exec("xyz"), 2783);
+assertToStringEquals("abc\x08de", res[905].exec("abc\x08de"), 2784);
+assertToStringEquals("abc\x01de", res[906].exec("abc\x01de"), 2785);
+assertToStringEquals("abc\x01de,abc", res[907].exec("abc\x01de"), 2786);
+assertNull(res[907].exec("a\nb", 2787));
+assertToStringEquals("baNOTcccc,b,a,NOT,cccc", res[908].exec("baNOTccccd"), 2788);
+assertToStringEquals("baNOTccc,b,a,NOT,ccc", res[908].exec("baNOTcccd"), 2789);
+assertToStringEquals("baNOTcc,b,a,NO,Tcc", res[908].exec("baNOTccd"), 2790);
+assertToStringEquals("baccc,b,a,,ccc", res[908].exec("bacccd"), 2791);
+assertToStringEquals("*** Failers,*,*,* Fail,ers", res[908].exec("*** Failers"), 2792);
+assertNull(res[908].exec("anything", 2793));
+assertNull(res[908].exec("b\x08c   ", 2794));
+assertNull(res[908].exec("baccd", 2795));
+assertToStringEquals("A", res[909].exec("Abc"), 2796);
+assertToStringEquals("b", res[910].exec("Abc "), 2797);
+assertToStringEquals("AAA", res[911].exec("AAAaAbc"), 2798);
+assertToStringEquals("bc ", res[912].exec("AAAaAbc "), 2799);
+assertToStringEquals("bbb\nccc", res[913].exec("bbb\nccc"), 2800);
+assertToStringEquals("c", res[914].exec("abc"), 2801);
+assertToStringEquals("s", res[914].exec("*** Failers"), 2802);
+assertToStringEquals(" ", res[914].exec("abk   "), 2803);
+assertToStringEquals("abc", res[915].exec("abc"), 2804);
+assertToStringEquals("bc", res[915].exec("kbc"), 2805);
+assertToStringEquals("bc ", res[915].exec("kabc "), 2806);
+assertToStringEquals("ers", res[915].exec("*** Failers"), 2807);
+assertNull(res[915].exec("abk", 2808));
+assertNull(res[915].exec("akb", 2809));
+assertNull(res[915].exec("akk ", 2810));
+assertToStringEquals("12345678@a.b.c.d", res[916].exec("12345678@a.b.c.d"), 2811);
+assertToStringEquals("123456789@x.y.z", res[916].exec("123456789@x.y.z"), 2812);
+assertNull(res[916].exec("*** Failers", 2813));
+assertNull(res[916].exec("12345678@x.y.uk", 2814));
+assertNull(res[916].exec("1234567@a.b.c.d       ", 2815));
+assertToStringEquals("b", res[917].exec("aaaabcd"), 2816);
+assertToStringEquals("A", res[917].exec("aaAabcd "), 2817);
+assertToStringEquals("b", res[918].exec("aaaabcd"), 2818);
+assertToStringEquals("b", res[918].exec("aaAabcd "), 2819);
+assertToStringEquals("b", res[919].exec("aaaabcd"), 2820);
+assertToStringEquals("A", res[919].exec("aaAabcd "), 2821);
+assertToStringEquals("b", res[920].exec("aaaabcd"), 2822);
+assertToStringEquals("b", res[920].exec("aaAabcd "), 2823);
+assertToStringEquals("PSTAIREISLL", res[922].exec("xxxxxxxxxxxPSTAIREISLLxxxxxxxxx"), 2824);
+assertToStringEquals("PSTAIREISLL", res[923].exec("xxxxxxxxxxxPSTAIREISLLxxxxxxxxx"), 2825);
+assertToStringEquals(".230003938,.23", res[924].exec("1.230003938"), 2826);
+assertToStringEquals(".875000282,.875", res[924].exec("1.875000282   "), 2827);
+assertToStringEquals(".235,.23", res[924].exec("1.235  "), 2828);
+assertNull(res[924].exec("              ", 2829));
+assertToStringEquals(".23,.23,", res[925].exec("1.230003938      "), 2830);
+assertToStringEquals(".875,.875,5", res[925].exec("1.875000282"), 2831);
+assertNull(res[925].exec("*** Failers ", 2832));
+assertNull(res[925].exec("1.235 ", 2833));
 assertThrows("var re = /a(?)b/;", 2834);
-assertEquals(null, res[925].exec("ab ", 2835));
-assertEquals("foo table,foo,table", res[926].exec("Food is on the foo table"), 2836);
-assertEquals("food is under the bar in the bar,d is under the bar in the ", res[927].exec("The food is under the bar in the barn."), 2837);
-assertEquals("food is under the bar,d is under the ", res[928].exec("The food is under the bar in the barn."), 2838);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: 53147,", res[929].exec("I have 2 numbers: 53147"), 2839);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: 5314,7", res[930].exec("I have 2 numbers: 53147"), 2840);
-assertEquals(",,", res[931].exec("I have 2 numbers: 53147"), 2841);
-assertEquals("I have 2,I have ,2", res[932].exec("I have 2 numbers: 53147"), 2842);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: 5314,7", res[933].exec("I have 2 numbers: 53147"), 2843);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[934].exec("I have 2 numbers: 53147"), 2844);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[935].exec("I have 2 numbers: 53147"), 2845);
-assertEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[936].exec("I have 2 numbers: 53147"), 2846);
-assertEquals("AB", res[937].exec("ABC123"), 2847);
-assertEquals(" ", res[937].exec(" "), 2848);
-assertEquals("ABC,ABC", res[938].exec("ABC445"), 2849);
-assertEquals(null, res[938].exec("*** Failers", 2850));
-assertEquals(null, res[938].exec("ABC123", 2851));
-assertEquals("W46]", res[939].exec("W46]789 "), 2852);
-assertEquals("-46]", res[939].exec("-46]789"), 2853);
-assertEquals(null, res[939].exec("*** Failers", 2854));
-assertEquals(null, res[939].exec("Wall", 2855));
-assertEquals(null, res[939].exec("Zebra", 2856));
-assertEquals(null, res[939].exec("42", 2857));
-assertEquals(null, res[939].exec("[abcd] ", 2858));
-assertEquals(null, res[939].exec("]abcd[", 2859));
-assertEquals(null, res[939].exec("   ", 2860));
-assertEquals("W", res[940].exec("W46]789 "), 2861);
-assertEquals("W", res[940].exec("Wall"), 2862);
-assertEquals("Z", res[940].exec("Zebra"), 2863);
-assertEquals("X", res[940].exec("Xylophone  "), 2864);
-assertEquals("4", res[940].exec("42"), 2865);
-assertEquals("[", res[940].exec("[abcd] "), 2866);
-assertEquals("]", res[940].exec("]abcd["), 2867);
-assertEquals("\\", res[940].exec("\\backslash "), 2868);
-assertEquals(null, res[940].exec("*** Failers", 2869));
-assertEquals(null, res[940].exec("-46]789", 2870));
-assertEquals(null, res[940].exec("well", 2871));
-assertEquals("01/01/2000", res[941].exec("01/01/2000"), 2872);
-assertEquals(",", res[944].exec("bcd"), 2873);
-assertEquals(",", res[944].exec("abc"), 2874);
-assertEquals(",", res[944].exec("aab     "), 2875);
-assertEquals(",", res[945].exec("bcd"), 2876);
-assertEquals("a,a", res[945].exec("abc"), 2877);
-assertEquals("a,a", res[945].exec("aab  "), 2878);
-assertEquals(",", res[946].exec("bcd"), 2879);
-assertEquals("a,a", res[946].exec("abc"), 2880);
-assertEquals("aa,a", res[946].exec("aab  "), 2881);
-assertEquals(",", res[947].exec("bcd"), 2882);
-assertEquals("a,a", res[947].exec("abc"), 2883);
-assertEquals("aa,a", res[947].exec("aab"), 2884);
-assertEquals("aaa,a", res[947].exec("aaa   "), 2885);
-assertEquals(",", res[948].exec("bcd"), 2886);
-assertEquals("a,a", res[948].exec("abc"), 2887);
-assertEquals("aa,a", res[948].exec("aab"), 2888);
-assertEquals("aaa,a", res[948].exec("aaa"), 2889);
-assertEquals("aaaaaaaa,a", res[948].exec("aaaaaaaa    "), 2890);
-assertEquals(null, res[949].exec("bcd", 2891));
-assertEquals("a,a", res[949].exec("abc"), 2892);
-assertEquals("a,a", res[949].exec("aab  "), 2893);
-assertEquals(null, res[950].exec("bcd", 2894));
-assertEquals("a,a", res[950].exec("abc"), 2895);
-assertEquals("aa,a", res[950].exec("aab  "), 2896);
-assertEquals(null, res[951].exec("bcd", 2897));
-assertEquals("a,a", res[951].exec("abc"), 2898);
-assertEquals("aa,a", res[951].exec("aab"), 2899);
-assertEquals("aaa,a", res[951].exec("aaa   "), 2900);
-assertEquals(null, res[952].exec("bcd", 2901));
-assertEquals("a,a", res[952].exec("abc"), 2902);
-assertEquals("aa,a", res[952].exec("aab"), 2903);
-assertEquals("aaa,a", res[952].exec("aaa"), 2904);
-assertEquals("aaaaaaaa,a", res[952].exec("aaaaaaaa    "), 2905);
-assertEquals("bib.gif", res[953].exec("borfle\nbib.gif\nno"), 2906);
-assertEquals("bib.gif", res[954].exec("borfle\nbib.gif\nno"), 2907);
-assertEquals("bib.gif", res[955].exec("borfle\nbib.gif\nno"), 2908);
-assertEquals("bib.gif", res[956].exec("borfle\nbib.gif\nno"), 2909);
-assertEquals("bib.gif", res[957].exec("borfle\nbib.gif\nno"), 2910);
-assertEquals("no", res[958].exec("borfle\nbib.gif\nno"), 2911);
-assertEquals("borfle", res[959].exec("borfle\nbib.gif\nno"), 2912);
-assertEquals("no", res[960].exec("borfle\nbib.gif\nno"), 2913);
-assertEquals("borfle", res[961].exec("borfle\nbib.gif\nno"), 2914);
-assertEquals("", res[962].exec("borfle\nbib.gif\nno\n"), 2915);
-assertEquals("borfle", res[963].exec("borfle\nbib.gif\nno\n"), 2916);
-assertEquals("", res[964].exec("borfle\nbib.gif\nno\n"), 2917);
-assertEquals("borfle", res[965].exec("borfle\nbib.gif\nno\n"), 2918);
-assertEquals("1234X,1234X", res[966].exec("abcde\n1234Xyz"), 2919);
-assertEquals("B,B", res[966].exec("BarFoo "), 2920);
-assertEquals(null, res[966].exec("*** Failers", 2921));
-assertEquals(null, res[966].exec("abcde\nBar  ", 2922));
-assertEquals("1234X,1234X", res[967].exec("abcde\n1234Xyz"), 2923);
-assertEquals("B,B", res[967].exec("BarFoo "), 2924);
-assertEquals("B,B", res[967].exec("abcde\nBar  "), 2925);
-assertEquals("1234X,1234X", res[968].exec("abcde\n1234Xyz"), 2926);
-assertEquals("B,B", res[968].exec("BarFoo "), 2927);
-assertEquals(null, res[968].exec("*** Failers", 2928));
-assertEquals(null, res[968].exec("abcde\nBar  ", 2929));
-assertEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2930);
-assertEquals("B,B", res[969].exec("BarFoo "), 2931);
-assertEquals("B,B", res[969].exec("abcde\nBar  "), 2932);
-assertEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2933);
-assertEquals("B,B", res[969].exec("BarFoo "), 2934);
-assertEquals(null, res[969].exec("*** Failers ", 2935));
-assertEquals("B,B", res[969].exec("abcde\nBar  "), 2936);
-assertEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2937);
-assertEquals("B,B", res[969].exec("BarFoo "), 2938);
-assertEquals(null, res[969].exec("*** Failers ", 2939));
-assertEquals("B,B", res[969].exec("abcde\nBar  "), 2940);
-assertEquals(null, res[970].exec("**** Failers", 2941));
-assertEquals(null, res[970].exec("abc\nB", 2942));
-assertEquals(null, res[970].exec(" ", 2943));
-assertEquals(null, res[970].exec("abc\nB", 2944));
-assertEquals(null, res[970].exec("abc\nB", 2945));
-assertEquals(null, res[970].exec(" ", 2946));
-assertEquals(null, res[970].exec("abc\nB", 2947));
-assertEquals(null, res[970].exec("abc\nB", 2948));
-assertEquals("B", res[970].exec("B\n"), 2949);
-assertEquals("123456654321", res[971].exec("123456654321"), 2950);
-assertEquals("123456654321", res[972].exec("123456654321 "), 2951);
-assertEquals("123456654321", res[973].exec("123456654321"), 2952);
-assertEquals("abcabcabcabc", res[974].exec("abcabcabcabc"), 2953);
-assertEquals("abcabcabcabc", res[975].exec("abcabcabcabc"), 2954);
-assertEquals("abcabcabcabc,c", res[976].exec("abcabcabcabc "), 2955);
-assertEquals("n", res[977].exec("n"), 2956);
-assertEquals(null, res[977].exec("*** Failers ", 2957));
-assertEquals(null, res[977].exec("z ", 2958));
-assertEquals("abcd", res[978].exec("abcd"), 2959);
-assertEquals(null, res[978].exec("*** Failers", 2960));
-assertEquals(null, res[978].exec("abce  ", 2961));
-assertEquals("abe", res[979].exec("abe"), 2962);
-assertEquals(null, res[979].exec("*** Failers", 2963));
-assertEquals(null, res[979].exec("abcde ", 2964));
-assertEquals("abd,", res[980].exec("abd"), 2965);
-assertEquals(null, res[980].exec("*** Failers", 2966));
-assertEquals(null, res[980].exec("abcd   ", 2967));
-assertEquals("a,", res[981].exec("a"), 2968);
-assertEquals("ab,b", res[981].exec("ab"), 2969);
-assertEquals("abbbb,bbbb", res[981].exec("abbbb"), 2970);
-assertEquals("a,", res[981].exec("*** Failers"), 2971);
-assertEquals(null, res[981].exec("bbbbb    ", 2972));
-assertEquals("abe", res[982].exec("abe"), 2973);
-assertEquals(null, res[982].exec("*** Failers", 2974));
-assertEquals(null, res[982].exec("ab1e   ", 2975));
-assertEquals("\"quick\",quick", res[983].exec("the \"quick\" brown fox"), 2976);
-assertEquals("\"the \\\"quick\\\" brown fox\", brown fox", res[983].exec("\"the \\\"quick\\\" brown fox\" "), 2977);
-assertEquals("", res[984].exec("abc"), 2978);
-assertEquals("", res[985].exec("abc "), 2979);
-assertEquals("", res[986].exec("abc "), 2980);
+assertNull(res[925].exec("ab ", 2835));
+assertToStringEquals("foo table,foo,table", res[926].exec("Food is on the foo table"), 2836);
+assertToStringEquals("food is under the bar in the bar,d is under the bar in the ", res[927].exec("The food is under the bar in the barn."), 2837);
+assertToStringEquals("food is under the bar,d is under the ", res[928].exec("The food is under the bar in the barn."), 2838);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: 53147,", res[929].exec("I have 2 numbers: 53147"), 2839);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: 5314,7", res[930].exec("I have 2 numbers: 53147"), 2840);
+assertToStringEquals(",,", res[931].exec("I have 2 numbers: 53147"), 2841);
+assertToStringEquals("I have 2,I have ,2", res[932].exec("I have 2 numbers: 53147"), 2842);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: 5314,7", res[933].exec("I have 2 numbers: 53147"), 2843);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[934].exec("I have 2 numbers: 53147"), 2844);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[935].exec("I have 2 numbers: 53147"), 2845);
+assertToStringEquals("I have 2 numbers: 53147,I have 2 numbers: ,53147", res[936].exec("I have 2 numbers: 53147"), 2846);
+assertToStringEquals("AB", res[937].exec("ABC123"), 2847);
+assertToStringEquals(" ", res[937].exec(" "), 2848);
+assertToStringEquals("ABC,ABC", res[938].exec("ABC445"), 2849);
+assertNull(res[938].exec("*** Failers", 2850));
+assertNull(res[938].exec("ABC123", 2851));
+assertToStringEquals("W46]", res[939].exec("W46]789 "), 2852);
+assertToStringEquals("-46]", res[939].exec("-46]789"), 2853);
+assertNull(res[939].exec("*** Failers", 2854));
+assertNull(res[939].exec("Wall", 2855));
+assertNull(res[939].exec("Zebra", 2856));
+assertNull(res[939].exec("42", 2857));
+assertNull(res[939].exec("[abcd] ", 2858));
+assertNull(res[939].exec("]abcd[", 2859));
+assertNull(res[939].exec("   ", 2860));
+assertToStringEquals("W", res[940].exec("W46]789 "), 2861);
+assertToStringEquals("W", res[940].exec("Wall"), 2862);
+assertToStringEquals("Z", res[940].exec("Zebra"), 2863);
+assertToStringEquals("X", res[940].exec("Xylophone  "), 2864);
+assertToStringEquals("4", res[940].exec("42"), 2865);
+assertToStringEquals("[", res[940].exec("[abcd] "), 2866);
+assertToStringEquals("]", res[940].exec("]abcd["), 2867);
+assertToStringEquals("\\", res[940].exec("\\backslash "), 2868);
+assertNull(res[940].exec("*** Failers", 2869));
+assertNull(res[940].exec("-46]789", 2870));
+assertNull(res[940].exec("well", 2871));
+assertToStringEquals("01/01/2000", res[941].exec("01/01/2000"), 2872);
+assertToStringEquals(",", res[944].exec("bcd"), 2873);
+assertToStringEquals(",", res[944].exec("abc"), 2874);
+assertToStringEquals(",", res[944].exec("aab     "), 2875);
+assertToStringEquals(",", res[945].exec("bcd"), 2876);
+assertToStringEquals("a,a", res[945].exec("abc"), 2877);
+assertToStringEquals("a,a", res[945].exec("aab  "), 2878);
+assertToStringEquals(",", res[946].exec("bcd"), 2879);
+assertToStringEquals("a,a", res[946].exec("abc"), 2880);
+assertToStringEquals("aa,a", res[946].exec("aab  "), 2881);
+assertToStringEquals(",", res[947].exec("bcd"), 2882);
+assertToStringEquals("a,a", res[947].exec("abc"), 2883);
+assertToStringEquals("aa,a", res[947].exec("aab"), 2884);
+assertToStringEquals("aaa,a", res[947].exec("aaa   "), 2885);
+assertToStringEquals(",", res[948].exec("bcd"), 2886);
+assertToStringEquals("a,a", res[948].exec("abc"), 2887);
+assertToStringEquals("aa,a", res[948].exec("aab"), 2888);
+assertToStringEquals("aaa,a", res[948].exec("aaa"), 2889);
+assertToStringEquals("aaaaaaaa,a", res[948].exec("aaaaaaaa    "), 2890);
+assertNull(res[949].exec("bcd", 2891));
+assertToStringEquals("a,a", res[949].exec("abc"), 2892);
+assertToStringEquals("a,a", res[949].exec("aab  "), 2893);
+assertNull(res[950].exec("bcd", 2894));
+assertToStringEquals("a,a", res[950].exec("abc"), 2895);
+assertToStringEquals("aa,a", res[950].exec("aab  "), 2896);
+assertNull(res[951].exec("bcd", 2897));
+assertToStringEquals("a,a", res[951].exec("abc"), 2898);
+assertToStringEquals("aa,a", res[951].exec("aab"), 2899);
+assertToStringEquals("aaa,a", res[951].exec("aaa   "), 2900);
+assertNull(res[952].exec("bcd", 2901));
+assertToStringEquals("a,a", res[952].exec("abc"), 2902);
+assertToStringEquals("aa,a", res[952].exec("aab"), 2903);
+assertToStringEquals("aaa,a", res[952].exec("aaa"), 2904);
+assertToStringEquals("aaaaaaaa,a", res[952].exec("aaaaaaaa    "), 2905);
+assertToStringEquals("bib.gif", res[953].exec("borfle\nbib.gif\nno"), 2906);
+assertToStringEquals("bib.gif", res[954].exec("borfle\nbib.gif\nno"), 2907);
+assertToStringEquals("bib.gif", res[955].exec("borfle\nbib.gif\nno"), 2908);
+assertToStringEquals("bib.gif", res[956].exec("borfle\nbib.gif\nno"), 2909);
+assertToStringEquals("bib.gif", res[957].exec("borfle\nbib.gif\nno"), 2910);
+assertToStringEquals("no", res[958].exec("borfle\nbib.gif\nno"), 2911);
+assertToStringEquals("borfle", res[959].exec("borfle\nbib.gif\nno"), 2912);
+assertToStringEquals("no", res[960].exec("borfle\nbib.gif\nno"), 2913);
+assertToStringEquals("borfle", res[961].exec("borfle\nbib.gif\nno"), 2914);
+assertToStringEquals("", res[962].exec("borfle\nbib.gif\nno\n"), 2915);
+assertToStringEquals("borfle", res[963].exec("borfle\nbib.gif\nno\n"), 2916);
+assertToStringEquals("", res[964].exec("borfle\nbib.gif\nno\n"), 2917);
+assertToStringEquals("borfle", res[965].exec("borfle\nbib.gif\nno\n"), 2918);
+assertToStringEquals("1234X,1234X", res[966].exec("abcde\n1234Xyz"), 2919);
+assertToStringEquals("B,B", res[966].exec("BarFoo "), 2920);
+assertNull(res[966].exec("*** Failers", 2921));
+assertNull(res[966].exec("abcde\nBar  ", 2922));
+assertToStringEquals("1234X,1234X", res[967].exec("abcde\n1234Xyz"), 2923);
+assertToStringEquals("B,B", res[967].exec("BarFoo "), 2924);
+assertToStringEquals("B,B", res[967].exec("abcde\nBar  "), 2925);
+assertToStringEquals("1234X,1234X", res[968].exec("abcde\n1234Xyz"), 2926);
+assertToStringEquals("B,B", res[968].exec("BarFoo "), 2927);
+assertNull(res[968].exec("*** Failers", 2928));
+assertNull(res[968].exec("abcde\nBar  ", 2929));
+assertToStringEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2930);
+assertToStringEquals("B,B", res[969].exec("BarFoo "), 2931);
+assertToStringEquals("B,B", res[969].exec("abcde\nBar  "), 2932);
+assertToStringEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2933);
+assertToStringEquals("B,B", res[969].exec("BarFoo "), 2934);
+assertNull(res[969].exec("*** Failers ", 2935));
+assertToStringEquals("B,B", res[969].exec("abcde\nBar  "), 2936);
+assertToStringEquals("1234X,1234X", res[969].exec("abcde\n1234Xyz"), 2937);
+assertToStringEquals("B,B", res[969].exec("BarFoo "), 2938);
+assertNull(res[969].exec("*** Failers ", 2939));
+assertToStringEquals("B,B", res[969].exec("abcde\nBar  "), 2940);
+assertNull(res[970].exec("**** Failers", 2941));
+assertNull(res[970].exec("abc\nB", 2942));
+assertNull(res[970].exec(" ", 2943));
+assertNull(res[970].exec("abc\nB", 2944));
+assertNull(res[970].exec("abc\nB", 2945));
+assertNull(res[970].exec(" ", 2946));
+assertNull(res[970].exec("abc\nB", 2947));
+assertNull(res[970].exec("abc\nB", 2948));
+assertToStringEquals("B", res[970].exec("B\n"), 2949);
+assertToStringEquals("123456654321", res[971].exec("123456654321"), 2950);
+assertToStringEquals("123456654321", res[972].exec("123456654321 "), 2951);
+assertToStringEquals("123456654321", res[973].exec("123456654321"), 2952);
+assertToStringEquals("abcabcabcabc", res[974].exec("abcabcabcabc"), 2953);
+assertToStringEquals("abcabcabcabc", res[975].exec("abcabcabcabc"), 2954);
+assertToStringEquals("abcabcabcabc,c", res[976].exec("abcabcabcabc "), 2955);
+assertToStringEquals("n", res[977].exec("n"), 2956);
+assertNull(res[977].exec("*** Failers ", 2957));
+assertNull(res[977].exec("z ", 2958));
+assertToStringEquals("abcd", res[978].exec("abcd"), 2959);
+assertNull(res[978].exec("*** Failers", 2960));
+assertNull(res[978].exec("abce  ", 2961));
+assertToStringEquals("abe", res[979].exec("abe"), 2962);
+assertNull(res[979].exec("*** Failers", 2963));
+assertNull(res[979].exec("abcde ", 2964));
+assertToStringEquals("abd,", res[980].exec("abd"), 2965);
+assertNull(res[980].exec("*** Failers", 2966));
+assertNull(res[980].exec("abcd   ", 2967));
+assertToStringEquals("a,", res[981].exec("a"), 2968);
+assertToStringEquals("ab,b", res[981].exec("ab"), 2969);
+assertToStringEquals("abbbb,bbbb", res[981].exec("abbbb"), 2970);
+assertToStringEquals("a,", res[981].exec("*** Failers"), 2971);
+assertNull(res[981].exec("bbbbb    ", 2972));
+assertToStringEquals("abe", res[982].exec("abe"), 2973);
+assertNull(res[982].exec("*** Failers", 2974));
+assertNull(res[982].exec("ab1e   ", 2975));
+assertToStringEquals("\"quick\",quick", res[983].exec("the \"quick\" brown fox"), 2976);
+assertToStringEquals("\"the \\\"quick\\\" brown fox\", brown fox", res[983].exec("\"the \\\"quick\\\" brown fox\" "), 2977);
+assertToStringEquals("", res[984].exec("abc"), 2978);
+assertToStringEquals("", res[985].exec("abc "), 2979);
+assertToStringEquals("", res[986].exec("abc "), 2980);
 assertThrows("var re = //;", 2981);
-assertEquals("", res[986].exec("abc"), 2982);
-assertEquals("acb", res[988].exec("acb"), 2983);
-assertEquals("a\nb", res[988].exec("a\nb"), 2984);
-assertEquals("acb", res[989].exec("acb"), 2985);
-assertEquals(null, res[989].exec("*** Failers ", 2986));
-assertEquals(null, res[989].exec("a\nb   ", 2987));
-assertEquals("acb", res[990].exec("acb"), 2988);
-assertEquals("a\nb", res[990].exec("a\nb  "), 2989);
-assertEquals("acb", res[991].exec("acb"), 2990);
-assertEquals(null, res[991].exec("a\nb  ", 2991));
-assertEquals("bac,a", res[992].exec("bac"), 2992);
-assertEquals("bbac,a", res[992].exec("bbac"), 2993);
-assertEquals("bbbac,a", res[992].exec("bbbac"), 2994);
-assertEquals("bbbbac,a", res[992].exec("bbbbac"), 2995);
-assertEquals("bbbbbac,a", res[992].exec("bbbbbac "), 2996);
-assertEquals("bac,a", res[993].exec("bac"), 2997);
-assertEquals("bbac,a", res[993].exec("bbac"), 2998);
-assertEquals("bbbac,a", res[993].exec("bbbac"), 2999);
-assertEquals("bbbbac,a", res[993].exec("bbbbac"), 3000);
-assertEquals("bbbbbac,a", res[993].exec("bbbbbac "), 3001);
-assertEquals("x", res[994].exec("x\nb\n"), 3002);
-assertEquals("x", res[994].exec("a\x08x\n  "), 3003);
-assertEquals(null, res[995].exec("\x00{ab} ", 3004));
-assertEquals("CD,", res[996].exec("CD "), 3005);
-assertEquals("CD,", res[997].exec("CD "), 3006);
-assertEquals(null, res[997].exec("foo", 3007));
-assertEquals(null, res[997].exec("catfood", 3008));
-assertEquals(null, res[997].exec("arfootle", 3009));
-assertEquals(null, res[997].exec("rfoosh", 3010));
-assertEquals(null, res[997].exec("*** Failers", 3011));
-assertEquals(null, res[997].exec("barfoo", 3012));
-assertEquals(null, res[997].exec("towbarfoo", 3013));
-assertEquals(null, res[997].exec("catfood", 3014));
-assertEquals(null, res[997].exec("*** Failers", 3015));
-assertEquals(null, res[997].exec("foo", 3016));
-assertEquals(null, res[997].exec("barfoo", 3017));
-assertEquals(null, res[997].exec("towbarfoo", 3018));
-assertEquals(null, res[997].exec("fooabar", 3019));
-assertEquals(null, res[997].exec("*** Failers", 3020));
-assertEquals(null, res[997].exec("bar", 3021));
-assertEquals(null, res[997].exec("foobbar", 3022));
-assertEquals(null, res[997].exec("  ", 3023));
-assertEquals(null, res[998].exec("abc", 3024));
-assertEquals(null, res[998].exec("*** Failers", 3025));
-assertEquals(null, res[998].exec("abc\n   ", 3026));
-assertEquals(null, res[998].exec("qqq\nabc", 3027));
-assertEquals(null, res[998].exec("abc\nzzz", 3028));
-assertEquals(null, res[998].exec("qqq\nabc\nzzz", 3029));
-assertEquals(null, res[998].exec("/this/is/a/very/long/line/in/deed/with/very/many/slashes/in/it/you/see/", 3030));
-assertEquals(null, res[998].exec("/this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo", 3031));
-assertEquals(null, res[998].exec("1.230003938", 3032));
-assertEquals(null, res[998].exec("1.875000282", 3033));
-assertEquals(null, res[998].exec("*** Failers ", 3034));
-assertEquals(null, res[998].exec("1.235 ", 3035));
-assertEquals(null, res[998].exec("now is the time for all good men to come to the aid of the party", 3036));
-assertEquals(null, res[998].exec("*** Failers", 3037));
-assertEquals(null, res[998].exec("this is not a line with only words and spaces!", 3038));
-assertEquals("12345a,12345,a", res[999].exec("12345a"), 3039);
-assertEquals("12345,1234,5", res[999].exec("12345+ "), 3040);
-assertEquals("12345a,12345,a", res[999].exec("12345a"), 3041);
-assertEquals(null, res[999].exec("*** Failers", 3042));
-assertEquals("12345,1234,5", res[999].exec("12345+ "), 3043);
-assertEquals(null, res[999].exec("aaab", 3044));
-assertEquals(null, res[999].exec("aaab", 3045));
-assertEquals(null, res[999].exec("aaab", 3046));
-assertEquals(null, res[999].exec("aaabbbccc", 3047));
-assertEquals(null, res[999].exec("aaabbbbccccd", 3048));
-assertEquals("aaabbbbcccc,ccc", res[1000].exec("aaabbbbccccd"), 3049);
-assertEquals("abc,b", res[1000].exec("((abc(ade)ufh()()x"), 3050);
-assertEquals(null, res[1000].exec("", 3051));
-assertEquals("abc,b", res[1000].exec("(abc)"), 3052);
-assertEquals("abc,b", res[1000].exec("(abc(def)xyz)"), 3053);
-assertEquals(null, res[1000].exec("*** Failers", 3054));
-assertEquals(null, res[1000].exec("ab", 3055));
-assertEquals(null, res[1000].exec("Ab", 3056));
-assertEquals(null, res[1000].exec("*** Failers ", 3057));
-assertEquals(null, res[1000].exec("aB", 3058));
-assertEquals(null, res[1000].exec("AB", 3059));
-assertEquals(null, res[1000].exec("    ", 3060));
-assertEquals("bc,b", res[1000].exec("a bcd e"), 3061);
-assertEquals(null, res[1000].exec("*** Failers", 3062));
-assertEquals("c,", res[1000].exec("a b cd e"), 3063);
-assertEquals("abc,b", res[1000].exec("abcd e   "), 3064);
-assertEquals("bc,b", res[1000].exec("a bcde "), 3065);
-assertEquals("bc,b", res[1000].exec("a bcde f"), 3066);
-assertEquals(null, res[1000].exec("*** Failers", 3067));
-assertEquals("abc,b", res[1000].exec("abcdef  "), 3068);
-assertEquals("abc,b", res[1000].exec("abc"), 3069);
-assertEquals("c,", res[1000].exec("aBc"), 3070);
-assertEquals(null, res[1000].exec("*** Failers", 3071));
-assertEquals(null, res[1000].exec("abC", 3072));
-assertEquals(null, res[1000].exec("aBC  ", 3073));
-assertEquals("bc,b", res[1000].exec("Abc"), 3074);
-assertEquals("c,", res[1000].exec("ABc"), 3075);
-assertEquals(null, res[1000].exec("ABC", 3076));
-assertEquals(null, res[1000].exec("AbC", 3077));
-assertEquals(null, res[1000].exec("", 3078));
-assertEquals("abc,b", res[1000].exec("abc"), 3079);
-assertEquals("c,", res[1000].exec("aBc"), 3080);
-assertEquals(null, res[1000].exec("*** Failers ", 3081));
-assertEquals(null, res[1000].exec("ABC", 3082));
-assertEquals(null, res[1000].exec("abC", 3083));
-assertEquals(null, res[1000].exec("aBC", 3084));
-assertEquals(null, res[1000].exec("", 3085));
-assertEquals("c,", res[1000].exec("aBc"), 3086);
-assertEquals("c,", res[1000].exec("aBBc"), 3087);
-assertEquals(null, res[1000].exec("*** Failers ", 3088));
-assertEquals(null, res[1000].exec("aBC", 3089));
-assertEquals(null, res[1000].exec("aBBC", 3090));
-assertEquals(null, res[1000].exec("", 3091));
-assertEquals("abc,b", res[1000].exec("abcd"), 3092);
-assertEquals(null, res[1000].exec("abCd", 3093));
-assertEquals(null, res[1000].exec("*** Failers", 3094));
-assertEquals(null, res[1000].exec("aBCd", 3095));
-assertEquals("abc,b", res[1000].exec("abcD     "), 3096);
-assertEquals(null, res[1000].exec("", 3097));
-assertEquals(null, res[1000].exec("more than million", 3098));
-assertEquals(null, res[1000].exec("more than MILLION", 3099));
-assertEquals(null, res[1000].exec("more \n than Million ", 3100));
-assertEquals(null, res[1000].exec("*** Failers", 3101));
-assertEquals(null, res[1000].exec("MORE THAN MILLION    ", 3102));
-assertEquals(null, res[1000].exec("more \n than \n million ", 3103));
-assertEquals(null, res[1000].exec("more than million", 3104));
-assertEquals(null, res[1000].exec("more than MILLION", 3105));
-assertEquals(null, res[1000].exec("more \n than Million ", 3106));
-assertEquals(null, res[1000].exec("*** Failers", 3107));
-assertEquals(null, res[1000].exec("MORE THAN MILLION    ", 3108));
-assertEquals(null, res[1000].exec("more \n than \n million ", 3109));
-assertEquals(null, res[1000].exec("", 3110));
-assertEquals("abc,b", res[1000].exec("abc"), 3111);
-assertEquals("bc,b", res[1000].exec("aBbc"), 3112);
-assertEquals("c,", res[1000].exec("aBBc "), 3113);
-assertEquals(null, res[1000].exec("*** Failers", 3114));
-assertEquals("bc,b", res[1000].exec("Abc"), 3115);
-assertEquals(null, res[1000].exec("abAb    ", 3116));
-assertEquals(null, res[1000].exec("abbC ", 3117));
-assertEquals(null, res[1000].exec("", 3118));
-assertEquals("abc,b", res[1000].exec("abc"), 3119);
-assertEquals("c,", res[1000].exec("aBc"), 3120);
-assertEquals(null, res[1000].exec("*** Failers", 3121));
-assertEquals(null, res[1000].exec("Ab ", 3122));
-assertEquals(null, res[1000].exec("abC", 3123));
-assertEquals(null, res[1000].exec("aBC     ", 3124));
-assertEquals(null, res[1000].exec("", 3125));
-assertEquals("c,", res[1000].exec("abxxc"), 3126);
-assertEquals("c,", res[1000].exec("aBxxc"), 3127);
-assertEquals(null, res[1000].exec("*** Failers", 3128));
-assertEquals("c,", res[1000].exec("Abxxc"), 3129);
-assertEquals("c,", res[1000].exec("ABxxc"), 3130);
-assertEquals(null, res[1000].exec("abxxC      ", 3131));
-assertEquals("abc,b", res[1000].exec("abc:"), 3132);
-assertEquals(null, res[1000].exec("12", 3133));
-assertEquals(null, res[1000].exec("*** Failers", 3134));
-assertEquals(null, res[1000].exec("123", 3135));
-assertEquals(null, res[1000].exec("xyz    ", 3136));
-assertEquals("abc,b", res[1000].exec("abc:"), 3137);
-assertEquals(null, res[1000].exec("12", 3138));
-assertEquals(null, res[1000].exec("*** Failers", 3139));
-assertEquals(null, res[1000].exec("123", 3140));
-assertEquals(null, res[1000].exec("xyz    ", 3141));
-assertEquals(null, res[1000].exec("", 3142));
-assertEquals(null, res[1000].exec("foobar", 3143));
-assertEquals("c,", res[1000].exec("cat"), 3144);
-assertEquals("c,", res[1000].exec("fcat"), 3145);
-assertEquals("c,", res[1000].exec("focat   "), 3146);
-assertEquals(null, res[1000].exec("*** Failers", 3147));
-assertEquals("c,", res[1000].exec("foocat  "), 3148);
-assertEquals(null, res[1000].exec("foobar", 3149));
-assertEquals("c,", res[1000].exec("cat"), 3150);
-assertEquals("c,", res[1000].exec("fcat"), 3151);
-assertEquals("c,", res[1000].exec("focat   "), 3152);
-assertEquals(null, res[1000].exec("*** Failers", 3153));
-assertEquals("c,", res[1000].exec("foocat  "), 3154);
-assertEquals(null, res[1000].exec("a", 3155));
-assertEquals(null, res[1000].exec("aa", 3156));
-assertEquals(null, res[1000].exec("aaaa", 3157));
-assertEquals(null, res[1000].exec("", 3158));
-assertEquals("abc,abc", res[1001].exec("abc"), 3159);
-assertEquals("abcabc,abc", res[1001].exec("abcabc"), 3160);
-assertEquals("abcabcabc,abc", res[1001].exec("abcabcabc"), 3161);
-assertEquals(",", res[1001].exec("xyz      "), 3162);
-assertEquals("a,a", res[1002].exec("a"), 3163);
-assertEquals("aaaaa,aaaaa", res[1002].exec("aaaaa "), 3164);
-assertEquals("a,a", res[1003].exec("a"), 3165);
-assertEquals("b,b", res[1003].exec("b"), 3166);
-assertEquals("ababab,ababab", res[1003].exec("ababab"), 3167);
-assertEquals("aaaab,aaaab", res[1003].exec("aaaabcde"), 3168);
-assertEquals("bbbb,bbbb", res[1003].exec("bbbb    "), 3169);
-assertEquals("b,b", res[1004].exec("b"), 3170);
-assertEquals("bbbb,bbbb", res[1004].exec("bbbb"), 3171);
-assertEquals(",", res[1004].exec("aaa   "), 3172);
-assertEquals("cccc,cccc", res[1005].exec("cccc"), 3173);
-assertEquals(",", res[1005].exec("abab  "), 3174);
-assertEquals("a,a", res[1006].exec("a"), 3175);
-assertEquals("aaaa,a", res[1006].exec("aaaa "), 3176);
-assertEquals("a,a", res[1007].exec("a"), 3177);
-assertEquals("b,b", res[1007].exec("b"), 3178);
-assertEquals("abab,b", res[1007].exec("abab"), 3179);
-assertEquals("baba,a", res[1007].exec("baba   "), 3180);
-assertEquals("b,b", res[1008].exec("b"), 3181);
-assertEquals("bbbb,b", res[1008].exec("bbbb"), 3182);
-assertEquals(",", res[1008].exec("aaa   "), 3183);
-assertEquals("c,c", res[1009].exec("c"), 3184);
-assertEquals("cccc,c", res[1009].exec("cccc"), 3185);
-assertEquals(",", res[1009].exec("baba   "), 3186);
-assertEquals(",", res[1009].exec("a"), 3187);
-assertEquals(",", res[1009].exec("aaabcde "), 3188);
-assertEquals(",", res[1009].exec("aaaaa"), 3189);
-assertEquals(",", res[1009].exec("aabbaa "), 3190);
-assertEquals(",", res[1009].exec("aaaaa"), 3191);
-assertEquals(",", res[1009].exec("aabbaa "), 3192);
-assertEquals("12-sep-98,8", res[1009].exec("12-sep-98"), 3193);
-assertEquals("12-09-98,8", res[1009].exec("12-09-98"), 3194);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3195);
-assertEquals("sep-12-98,8", res[1009].exec("sep-12-98"), 3196);
-assertEquals("    , ", res[1009].exec("    "), 3197);
-assertEquals("s,s", res[1009].exec("saturday"), 3198);
-assertEquals("sund,d", res[1009].exec("sunday"), 3199);
-assertEquals("S,S", res[1009].exec("Saturday"), 3200);
-assertEquals("Sund,d", res[1009].exec("Sunday"), 3201);
-assertEquals("SATURDAY,Y", res[1009].exec("SATURDAY"), 3202);
-assertEquals("SUNDAY,Y", res[1009].exec("SUNDAY"), 3203);
-assertEquals("SunD,D", res[1009].exec("SunDay"), 3204);
-assertEquals(",", res[1009].exec("abcx"), 3205);
-assertEquals(",", res[1009].exec("aBCx"), 3206);
-assertEquals(",", res[1009].exec("bbx"), 3207);
-assertEquals("BBx,x", res[1009].exec("BBx"), 3208);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3209);
-assertEquals(",", res[1009].exec("abcX"), 3210);
-assertEquals(",", res[1009].exec("aBCX"), 3211);
-assertEquals(",", res[1009].exec("bbX"), 3212);
-assertEquals("BBX               , ", res[1009].exec("BBX               "), 3213);
-assertEquals(",", res[1009].exec("ac"), 3214);
-assertEquals(",", res[1009].exec("aC"), 3215);
-assertEquals(",", res[1009].exec("bD"), 3216);
-assertEquals("eleph,h", res[1009].exec("elephant"), 3217);
-assertEquals("Europe , ", res[1009].exec("Europe "), 3218);
-assertEquals("frog,g", res[1009].exec("frog"), 3219);
-assertEquals("Fr,r", res[1009].exec("France"), 3220);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3221);
-assertEquals("Afric,c", res[1009].exec("Africa     "), 3222);
-assertEquals(",", res[1009].exec("ab"), 3223);
-assertEquals(",", res[1009].exec("aBd"), 3224);
-assertEquals("xy,y", res[1009].exec("xy"), 3225);
-assertEquals("xY,Y", res[1009].exec("xY"), 3226);
-assertEquals("ze,e", res[1009].exec("zebra"), 3227);
-assertEquals("Z,Z", res[1009].exec("Zambesi"), 3228);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3229);
-assertEquals(",", res[1009].exec("aCD  "), 3230);
-assertEquals("XY  , ", res[1009].exec("XY  "), 3231);
-assertEquals("foo\n,\n", res[1009].exec("foo\nbar"), 3232);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3233);
-assertEquals(",", res[1009].exec("bar"), 3234);
-assertEquals(",", res[1009].exec("baz\nbar   "), 3235);
-assertEquals(",", res[1009].exec("barbaz"), 3236);
-assertEquals(",", res[1009].exec("barbarbaz "), 3237);
-assertEquals("koo,o", res[1009].exec("koobarbaz "), 3238);
-assertEquals("*** F,F", res[1009].exec("*** Failers"), 3239);
-assertEquals(",", res[1009].exec("baz"), 3240);
-assertEquals("foo,o", res[1009].exec("foobarbaz "), 3241);
-assertEquals("abc", res[1012].exec("abc"), 3242);
-assertEquals("abc", res[1012].exec("xabcy"), 3243);
-assertEquals("abc", res[1012].exec("ababc"), 3244);
-assertEquals(null, res[1012].exec("*** Failers", 3245));
-assertEquals(null, res[1012].exec("xbc", 3246));
-assertEquals(null, res[1012].exec("axc", 3247));
-assertEquals(null, res[1012].exec("abx", 3248));
-assertEquals("abc", res[1013].exec("abc"), 3249);
-assertEquals("abc", res[1014].exec("abc"), 3250);
-assertEquals("abbc", res[1014].exec("abbc"), 3251);
-assertEquals("abbbbc", res[1014].exec("abbbbc"), 3252);
-assertEquals("a", res[1015].exec("abbbbc"), 3253);
-assertEquals("abbb", res[1016].exec("abbbbc"), 3254);
-assertEquals("abbbbc", res[1017].exec("abbbbc"), 3255);
-assertEquals("abbc", res[1018].exec("abbc"), 3256);
-assertEquals(null, res[1018].exec("*** Failers", 3257));
-assertEquals(null, res[1018].exec("abc", 3258));
-assertEquals(null, res[1018].exec("abq", 3259));
-assertEquals("abbbbc", res[1020].exec("abbbbc"), 3260);
-assertEquals("abbbbc", res[1021].exec("abbbbc"), 3261);
-assertEquals("abbbbc", res[1022].exec("abbbbc"), 3262);
-assertEquals("abbbbc", res[1023].exec("abbbbc"), 3263);
-assertEquals(null, res[1024].exec("*** Failers", 3264));
-assertEquals(null, res[1024].exec("abq", 3265));
-assertEquals(null, res[1024].exec("abbbbc", 3266));
-assertEquals("abbc", res[1025].exec("abbc"), 3267);
-assertEquals("abc", res[1025].exec("abc"), 3268);
-assertEquals("abc", res[1026].exec("abc"), 3269);
-assertEquals("abc", res[1028].exec("abc"), 3270);
-assertEquals("abc", res[1029].exec("abc"), 3271);
-assertEquals("abc", res[1030].exec("abc"), 3272);
-assertEquals(null, res[1030].exec("*** Failers", 3273));
-assertEquals(null, res[1030].exec("abbbbc", 3274));
-assertEquals(null, res[1030].exec("abcc", 3275));
-assertEquals("abc", res[1031].exec("abcc"), 3276);
-assertEquals("abc", res[1033].exec("aabc"), 3277);
-assertEquals(null, res[1033].exec("*** Failers", 3278));
-assertEquals("abc", res[1033].exec("aabc"), 3279);
-assertEquals(null, res[1033].exec("aabcd", 3280));
-assertEquals("", res[1034].exec("abc"), 3281);
-assertEquals("", res[1035].exec("abc"), 3282);
-assertEquals("abc", res[1036].exec("abc"), 3283);
-assertEquals("axc", res[1036].exec("axc"), 3284);
-assertEquals("axyzc", res[1037].exec("axyzc"), 3285);
-assertEquals("abd", res[1038].exec("abd"), 3286);
-assertEquals(null, res[1038].exec("*** Failers", 3287));
-assertEquals(null, res[1038].exec("axyzd", 3288));
-assertEquals(null, res[1038].exec("abc", 3289));
-assertEquals("ace", res[1039].exec("ace"), 3290);
-assertEquals("ac", res[1040].exec("aac"), 3291);
-assertEquals("a-", res[1041].exec("a-"), 3292);
-assertEquals("a-", res[1042].exec("a-"), 3293);
-assertEquals("a]", res[1043].exec("a]"), 3294);
-assertEquals(null, res[1044].exec("a]b", 3295));
-assertEquals("aed", res[1045].exec("aed"), 3296);
-assertEquals(null, res[1045].exec("*** Failers", 3297));
-assertEquals(null, res[1045].exec("abd", 3298));
-assertEquals(null, res[1045].exec("abd", 3299));
-assertEquals("adc", res[1046].exec("adc"), 3300);
-assertEquals(null, res[1047].exec("adc", 3301));
-assertEquals(null, res[1047].exec("*** Failers", 3302));
-assertEquals(null, res[1047].exec("a-c", 3303));
-assertEquals(null, res[1047].exec("a]c", 3304));
-assertEquals("a", res[1048].exec("a-"), 3305);
-assertEquals("a", res[1048].exec("-a"), 3306);
-assertEquals("a", res[1048].exec("-a-"), 3307);
-assertEquals(null, res[1049].exec("*** Failers", 3308));
-assertEquals(null, res[1049].exec("xy", 3309));
-assertEquals(null, res[1049].exec("yz", 3310));
-assertEquals(null, res[1049].exec("xyz", 3311));
-assertEquals("a", res[1050].exec("*** Failers"), 3312);
-assertEquals(null, res[1050].exec("a-", 3313));
-assertEquals(null, res[1050].exec("-a", 3314));
-assertEquals(null, res[1050].exec("-a-", 3315));
-assertEquals("y", res[1051].exec("xy"), 3316);
-assertEquals("y", res[1052].exec("yz"), 3317);
-assertEquals("y", res[1053].exec("xyz"), 3318);
-assertEquals("a", res[1054].exec("a"), 3319);
-assertEquals("-", res[1055].exec("-"), 3320);
-assertEquals("*", res[1055].exec("*** Failers"), 3321);
-assertEquals("-", res[1055].exec("-"), 3322);
-assertEquals(null, res[1055].exec("a", 3323));
-assertEquals("a b", res[1056].exec("a b"), 3324);
-assertEquals("a-b", res[1057].exec("a-b"), 3325);
-assertEquals(null, res[1057].exec("*** Failers", 3326));
-assertEquals("a-b", res[1057].exec("a-b"), 3327);
-assertEquals(null, res[1057].exec("a b", 3328));
-assertEquals("1", res[1058].exec("1"), 3329);
-assertEquals("-", res[1059].exec("-"), 3330);
-assertEquals("*", res[1059].exec("*** Failers"), 3331);
-assertEquals("-", res[1059].exec("-"), 3332);
-assertEquals(null, res[1059].exec("1", 3333));
-assertEquals("a", res[1060].exec("a"), 3334);
-assertEquals("-", res[1061].exec("-"), 3335);
-assertEquals("*", res[1061].exec("*** Failers"), 3336);
-assertEquals("-", res[1061].exec("-"), 3337);
-assertEquals(null, res[1061].exec("a", 3338));
-assertEquals("a b", res[1062].exec("a b"), 3339);
-assertEquals("a-b", res[1063].exec("a-b"), 3340);
-assertEquals(null, res[1063].exec("*** Failers", 3341));
-assertEquals("a-b", res[1063].exec("a-b"), 3342);
-assertEquals(null, res[1063].exec("a b", 3343));
-assertEquals("1", res[1064].exec("1"), 3344);
-assertEquals("-", res[1065].exec("-"), 3345);
-assertEquals("*", res[1065].exec("*** Failers"), 3346);
-assertEquals("-", res[1065].exec("-"), 3347);
-assertEquals(null, res[1065].exec("1", 3348));
-assertEquals("ab", res[1066].exec("abc"), 3349);
-assertEquals("ab", res[1066].exec("abcd"), 3350);
-assertEquals("ef,", res[1067].exec("def"), 3351);
-assertEquals("a(b", res[1069].exec("a(b"), 3352);
-assertEquals(null, res[1069].exec("ab", 3353));
-assertEquals(null, res[1069].exec("a((b", 3354));
-assertEquals(null, res[1070].exec("a\x08", 3355));
-assertEquals("a,a,a", res[1071].exec("abc"), 3356);
-assertEquals("abc,a,c", res[1072].exec("abc"), 3357);
-assertEquals("abc", res[1073].exec("aabbabc"), 3358);
-assertEquals("abc", res[1074].exec("aabbabc"), 3359);
-assertEquals("abc", res[1075].exec("abcabc"), 3360);
-assertEquals("ab,b", res[1076].exec("ab"), 3361);
-assertEquals("ab,b", res[1077].exec("ab"), 3362);
-assertEquals("ab,b", res[1078].exec("ab"), 3363);
-assertEquals("ab,b", res[1079].exec("ab"), 3364);
-assertEquals("a,a", res[1080].exec("ab"), 3365);
-assertEquals("a,a", res[1081].exec("ab"), 3366);
-assertEquals("cde", res[1082].exec("cde"), 3367);
-assertEquals(null, res[1083].exec("*** Failers", 3368));
-assertEquals(null, res[1083].exec("b", 3369));
-assertEquals("abbbcd,c", res[1085].exec("abbbcd"), 3370);
-assertEquals("abcd,a", res[1086].exec("abcd"), 3371);
-assertEquals("e", res[1087].exec("e"), 3372);
-assertEquals("ef,e", res[1088].exec("ef"), 3373);
-assertEquals("abcdefg", res[1089].exec("abcdefg"), 3374);
-assertEquals("ab", res[1090].exec("xabyabbbz"), 3375);
-assertEquals("a", res[1090].exec("xayabbbz"), 3376);
-assertEquals("cde,cd", res[1091].exec("abcde"), 3377);
-assertEquals("hij", res[1092].exec("hij"), 3378);
-assertEquals("ef,", res[1094].exec("abcdef"), 3379);
-assertEquals("bcd,b", res[1095].exec("abcd"), 3380);
-assertEquals("abc,a", res[1096].exec("abc"), 3381);
-assertEquals("abc,bc", res[1097].exec("abc"), 3382);
-assertEquals("abcd,bc,d", res[1098].exec("abcd"), 3383);
-assertEquals("abcd,bc,d", res[1099].exec("abcd"), 3384);
-assertEquals("abcd,b,cd", res[1100].exec("abcd"), 3385);
-assertEquals("adcdcde", res[1101].exec("adcdcde"), 3386);
-assertEquals(null, res[1102].exec("*** Failers", 3387));
-assertEquals(null, res[1102].exec("abcde", 3388));
-assertEquals(null, res[1102].exec("adcdcde", 3389));
-assertEquals("abc,ab", res[1103].exec("abc"), 3390);
-assertEquals("abcd,abc,a,b,d", res[1104].exec("abcd"), 3391);
-assertEquals("alpha", res[1105].exec("alpha"), 3392);
-assertEquals("bh,", res[1106].exec("abh"), 3393);
-assertEquals("effgz,effgz,", res[1107].exec("effgz"), 3394);
-assertEquals("ij,ij,j", res[1107].exec("ij"), 3395);
-assertEquals("effgz,effgz,", res[1107].exec("reffgz"), 3396);
-assertEquals(null, res[1107].exec("*** Failers", 3397));
-assertEquals(null, res[1107].exec("effg", 3398));
-assertEquals(null, res[1107].exec("bcdd", 3399));
-assertEquals("a,a,a,a,a,a,a,a,a,a,a", res[1108].exec("a"), 3400);
-assertEquals("a,a,a,a,a,a,a,a,a,a", res[1109].exec("a"), 3401);
-assertEquals(null, res[1110].exec("*** Failers", 3402));
-assertEquals(null, res[1110].exec("aa", 3403));
-assertEquals(null, res[1110].exec("uh-uh", 3404));
-assertEquals("multiple words", res[1111].exec("multiple words, yeah"), 3405);
-assertEquals("abcde,ab,de", res[1112].exec("abcde"), 3406);
-assertEquals("(a, b),a,b", res[1113].exec("(a, b)"), 3407);
-assertEquals("abcd", res[1115].exec("abcd"), 3408);
-assertEquals("abcd,bc", res[1116].exec("abcd"), 3409);
-assertEquals("ac", res[1117].exec("ac"), 3410);
-assertEquals("ABC", res[1118].exec("ABC"), 3411);
-assertEquals("ABC", res[1118].exec("XABCY"), 3412);
-assertEquals("ABC", res[1118].exec("ABABC"), 3413);
-assertEquals(null, res[1118].exec("*** Failers", 3414));
-assertEquals(null, res[1118].exec("aaxabxbaxbbx", 3415));
-assertEquals(null, res[1118].exec("XBC", 3416));
-assertEquals(null, res[1118].exec("AXC", 3417));
-assertEquals(null, res[1118].exec("ABX", 3418));
-assertEquals("ABC", res[1119].exec("ABC"), 3419);
-assertEquals("ABC", res[1120].exec("ABC"), 3420);
-assertEquals("ABBC", res[1120].exec("ABBC"), 3421);
-assertEquals("ABBBBC", res[1121].exec("ABBBBC"), 3422);
-assertEquals("ABBBBC", res[1122].exec("ABBBBC"), 3423);
-assertEquals("ABBC", res[1123].exec("ABBC"), 3424);
-assertEquals(null, res[1124].exec("*** Failers", 3425));
-assertEquals(null, res[1124].exec("ABC", 3426));
-assertEquals(null, res[1124].exec("ABQ", 3427));
-assertEquals("ABBBBC", res[1126].exec("ABBBBC"), 3428);
-assertEquals("ABBBBC", res[1127].exec("ABBBBC"), 3429);
-assertEquals("ABBBBC", res[1128].exec("ABBBBC"), 3430);
-assertEquals("ABBBBC", res[1129].exec("ABBBBC"), 3431);
-assertEquals(null, res[1130].exec("*** Failers", 3432));
-assertEquals(null, res[1130].exec("ABQ", 3433));
-assertEquals(null, res[1130].exec("ABBBBC", 3434));
-assertEquals("ABBC", res[1131].exec("ABBC"), 3435);
-assertEquals("ABC", res[1131].exec("ABC"), 3436);
-assertEquals("ABC", res[1132].exec("ABC"), 3437);
-assertEquals("ABC", res[1134].exec("ABC"), 3438);
-assertEquals("ABC", res[1135].exec("ABC"), 3439);
-assertEquals("ABC", res[1136].exec("ABC"), 3440);
-assertEquals(null, res[1136].exec("*** Failers", 3441));
-assertEquals(null, res[1136].exec("ABBBBC", 3442));
-assertEquals(null, res[1136].exec("ABCC", 3443));
-assertEquals("ABC", res[1137].exec("ABCC"), 3444);
-assertEquals("ABC", res[1139].exec("AABC"), 3445);
-assertEquals("", res[1140].exec("ABC"), 3446);
-assertEquals("", res[1141].exec("ABC"), 3447);
-assertEquals("ABC", res[1142].exec("ABC"), 3448);
-assertEquals("AXC", res[1142].exec("AXC"), 3449);
-assertEquals("AXYZC", res[1143].exec("AXYZC"), 3450);
-assertEquals(null, res[1144].exec("*** Failers", 3451));
-assertEquals("AABC", res[1144].exec("AABC"), 3452);
-assertEquals(null, res[1144].exec("AXYZD", 3453));
-assertEquals("ABD", res[1145].exec("ABD"), 3454);
-assertEquals("ACE", res[1146].exec("ACE"), 3455);
-assertEquals(null, res[1146].exec("*** Failers", 3456));
-assertEquals(null, res[1146].exec("ABC", 3457));
-assertEquals(null, res[1146].exec("ABD", 3458));
-assertEquals("AC", res[1147].exec("AAC"), 3459);
-assertEquals("A-", res[1148].exec("A-"), 3460);
-assertEquals("A-", res[1149].exec("A-"), 3461);
-assertEquals("A]", res[1150].exec("A]"), 3462);
-assertEquals(null, res[1151].exec("A]B", 3463));
-assertEquals("AED", res[1152].exec("AED"), 3464);
-assertEquals("ADC", res[1153].exec("ADC"), 3465);
-assertEquals(null, res[1153].exec("*** Failers", 3466));
-assertEquals(null, res[1153].exec("ABD", 3467));
-assertEquals(null, res[1153].exec("A-C", 3468));
-assertEquals(null, res[1154].exec("ADC", 3469));
-assertEquals("AB", res[1155].exec("ABC"), 3470);
-assertEquals("AB", res[1155].exec("ABCD"), 3471);
-assertEquals("EF,", res[1156].exec("DEF"), 3472);
-assertEquals(null, res[1157].exec("*** Failers", 3473));
-assertEquals(null, res[1157].exec("A]C", 3474));
-assertEquals(null, res[1157].exec("B", 3475));
-assertEquals("A(B", res[1158].exec("A(B"), 3476);
-assertEquals(null, res[1158].exec("AB", 3477));
-assertEquals(null, res[1158].exec("A((B", 3478));
-assertEquals(null, res[1159].exec("AB", 3479));
-assertEquals("A,A,A", res[1160].exec("ABC"), 3480);
-assertEquals("ABC,A,C", res[1161].exec("ABC"), 3481);
-assertEquals("ABC", res[1162].exec("AABBABC"), 3482);
-assertEquals("ABC", res[1163].exec("AABBABC"), 3483);
-assertEquals("ABC", res[1164].exec("ABCABC"), 3484);
-assertEquals("ABC", res[1165].exec("ABCABC"), 3485);
-assertEquals("ABC", res[1166].exec("ABCABC"), 3486);
-assertEquals("AB,B", res[1167].exec("AB"), 3487);
-assertEquals("AB,B", res[1168].exec("AB"), 3488);
-assertEquals("AB,B", res[1169].exec("AB"), 3489);
-assertEquals("AB,B", res[1170].exec("AB"), 3490);
-assertEquals("A,A", res[1171].exec("AB"), 3491);
-assertEquals("A,A", res[1172].exec("AB"), 3492);
-assertEquals(",", res[1173].exec("AB"), 3493);
-assertEquals("CDE", res[1174].exec("CDE"), 3494);
-assertEquals("ABBBCD,C", res[1177].exec("ABBBCD"), 3495);
-assertEquals("ABCD,A", res[1178].exec("ABCD"), 3496);
-assertEquals("E", res[1179].exec("E"), 3497);
-assertEquals("EF,E", res[1180].exec("EF"), 3498);
-assertEquals("ABCDEFG", res[1181].exec("ABCDEFG"), 3499);
-assertEquals("AB", res[1182].exec("XABYABBBZ"), 3500);
-assertEquals("A", res[1182].exec("XAYABBBZ"), 3501);
-assertEquals("CDE,CD", res[1183].exec("ABCDE"), 3502);
-assertEquals("HIJ", res[1184].exec("HIJ"), 3503);
-assertEquals(null, res[1185].exec("ABCDE", 3504));
-assertEquals("EF,", res[1186].exec("ABCDEF"), 3505);
-assertEquals("BCD,B", res[1187].exec("ABCD"), 3506);
-assertEquals("ABC,A", res[1188].exec("ABC"), 3507);
-assertEquals("ABC,BC", res[1189].exec("ABC"), 3508);
-assertEquals("ABCD,BC,D", res[1190].exec("ABCD"), 3509);
-assertEquals("ABCD,BC,D", res[1191].exec("ABCD"), 3510);
-assertEquals("ABCD,B,CD", res[1192].exec("ABCD"), 3511);
-assertEquals("ADCDCDE", res[1193].exec("ADCDCDE"), 3512);
-assertEquals("ABC,AB", res[1195].exec("ABC"), 3513);
-assertEquals("ABCD,ABC,A,B,D", res[1196].exec("ABCD"), 3514);
-assertEquals("ALPHA", res[1197].exec("ALPHA"), 3515);
-assertEquals("BH,", res[1198].exec("ABH"), 3516);
-assertEquals("EFFGZ,EFFGZ,", res[1199].exec("EFFGZ"), 3517);
-assertEquals("IJ,IJ,J", res[1199].exec("IJ"), 3518);
-assertEquals("EFFGZ,EFFGZ,", res[1199].exec("REFFGZ"), 3519);
-assertEquals(null, res[1199].exec("*** Failers", 3520));
-assertEquals(null, res[1199].exec("ADCDCDE", 3521));
-assertEquals(null, res[1199].exec("EFFG", 3522));
-assertEquals(null, res[1199].exec("BCDD", 3523));
-assertEquals("A,A,A,A,A,A,A,A,A,A,A", res[1200].exec("A"), 3524);
-assertEquals("A,A,A,A,A,A,A,A,A,A", res[1201].exec("A"), 3525);
-assertEquals("A,A", res[1202].exec("A"), 3526);
-assertEquals("C,C", res[1203].exec("C"), 3527);
-assertEquals(null, res[1204].exec("*** Failers", 3528));
-assertEquals(null, res[1204].exec("AA", 3529));
-assertEquals(null, res[1204].exec("UH-UH", 3530));
-assertEquals("MULTIPLE WORDS", res[1205].exec("MULTIPLE WORDS, YEAH"), 3531);
-assertEquals("ABCDE,AB,DE", res[1206].exec("ABCDE"), 3532);
-assertEquals("(A, B),A,B", res[1207].exec("(A, B)"), 3533);
-assertEquals("ABCD", res[1209].exec("ABCD"), 3534);
-assertEquals("ABCD,BC", res[1210].exec("ABCD"), 3535);
-assertEquals("AC", res[1211].exec("AC"), 3536);
-assertEquals("ad", res[1212].exec("abad"), 3537);
-assertEquals("ad", res[1213].exec("abad"), 3538);
-assertEquals("ad", res[1214].exec("abad"), 3539);
-assertEquals("ace,e", res[1215].exec("ace"), 3540);
-assertEquals("ace,e", res[1216].exec("ace"), 3541);
-assertEquals("ace,e", res[1217].exec("ace"), 3542);
-assertEquals("acd,d", res[1217].exec("acdbcdbe"), 3543);
-assertEquals("acdbcdbe,e", res[1218].exec("acdbcdbe"), 3544);
-assertEquals("acdb,b", res[1219].exec("acdbcdbe"), 3545);
-assertEquals("acdbcdb,b", res[1220].exec("acdbcdbe"), 3546);
-assertEquals("acdbcd,d", res[1221].exec("acdbcdbe"), 3547);
-assertEquals("foobar,bar,,bar", res[1222].exec("foobar"), 3548);
-assertEquals("acdbcdbe,e", res[1223].exec("acdbcdbe"), 3549);
-assertEquals("acdbcdbe,e", res[1224].exec("acdbcdbe"), 3550);
-assertEquals("acdbcdbe,e", res[1225].exec("acdbcdbe"), 3551);
-assertEquals("acdbcdb,b", res[1226].exec("acdbcdbe"), 3552);
-assertEquals("acdbcdbe,e", res[1227].exec("acdbcdbe"), 3553);
-assertEquals("acdbcdb,b", res[1228].exec("acdbcdbe"), 3554);
-assertEquals("ace,c,e", res[1229].exec("ace"), 3555);
-assertEquals("AB,A", res[1230].exec("AB"), 3556);
-assertEquals(".,.,", res[1231].exec("."), 3557);
-assertEquals("<&", res[1232].exec("<&OUT"), 3558);
-assertEquals("foobar,,,,b,a,r", res[1233].exec("foobar"), 3559);
-assertEquals(",,,,,,", res[1233].exec("ab"), 3560);
-assertEquals(",,,,,,", res[1233].exec("*** Failers"), 3561);
-assertEquals(",,,,,,", res[1233].exec("cb"), 3562);
-assertEquals(",,,,,,", res[1233].exec("b"), 3563);
-assertEquals(",,,,,,", res[1233].exec("ab"), 3564);
-assertEquals(",,,,,,", res[1233].exec("b"), 3565);
-assertEquals(",,,,,,", res[1233].exec("b"), 3566);
-assertEquals("aba", res[1234].exec("aba"), 3567);
-assertEquals("a", res[1235].exec("aba"), 3568);
-assertEquals(",", res[1236].exec("abc"), 3569);
-assertEquals("aax,a", res[1237].exec("aax"), 3570);
-assertEquals("aax,a,a", res[1238].exec("aax"), 3571);
-assertEquals("aax,a,a", res[1239].exec("aax"), 3572);
-assertEquals("ab,", res[1240].exec("cab"), 3573);
-assertEquals("ab,", res[1241].exec("cab"), 3574);
-assertEquals("ab,", res[1241].exec("ab"), 3575);
-assertEquals("ab,", res[1241].exec("ab"), 3576);
-assertEquals(null, res[1241].exec("Ab", 3577));
-assertEquals(null, res[1241].exec("Ab", 3578));
-assertEquals(null, res[1241].exec("*** Failers", 3579));
-assertEquals(null, res[1241].exec("cb", 3580));
-assertEquals(null, res[1241].exec("aB", 3581));
-assertEquals("ab,", res[1241].exec("ab"), 3582);
-assertEquals("ab,", res[1241].exec("ab"), 3583);
-assertEquals(null, res[1241].exec("Ab", 3584));
-assertEquals(null, res[1241].exec("Ab", 3585));
-assertEquals(null, res[1241].exec("*** Failers", 3586));
-assertEquals(null, res[1241].exec("aB", 3587));
-assertEquals(null, res[1241].exec("aB", 3588));
-assertEquals("ab,", res[1241].exec("ab"), 3589);
-assertEquals("ab,", res[1241].exec("ab"), 3590);
-assertEquals(null, res[1241].exec("aB", 3591));
-assertEquals(null, res[1241].exec("aB", 3592));
-assertEquals(null, res[1241].exec("*** Failers", 3593));
-assertEquals(null, res[1241].exec("aB", 3594));
-assertEquals(null, res[1241].exec("Ab", 3595));
-assertEquals(null, res[1241].exec("aB", 3596));
-assertEquals(null, res[1241].exec("aB", 3597));
-assertEquals(null, res[1241].exec("*** Failers", 3598));
-assertEquals(null, res[1241].exec("Ab", 3599));
-assertEquals(null, res[1241].exec("AB", 3600));
-assertEquals("ab,", res[1241].exec("ab"), 3601);
-assertEquals("ab,", res[1241].exec("ab"), 3602);
-assertEquals(null, res[1241].exec("aB", 3603));
-assertEquals(null, res[1241].exec("aB", 3604));
-assertEquals(null, res[1241].exec("*** Failers", 3605));
-assertEquals(null, res[1241].exec("AB", 3606));
-assertEquals(null, res[1241].exec("Ab", 3607));
-assertEquals(null, res[1241].exec("aB", 3608));
-assertEquals(null, res[1241].exec("aB", 3609));
-assertEquals(null, res[1241].exec("*** Failers", 3610));
-assertEquals(null, res[1241].exec("Ab", 3611));
-assertEquals(null, res[1241].exec("AB", 3612));
-assertEquals(null, res[1241].exec("*** Failers", 3613));
-assertEquals(null, res[1241].exec("AB", 3614));
-assertEquals(null, res[1241].exec("a\nB", 3615));
-assertEquals(null, res[1241].exec("a\nB", 3616));
-assertEquals("cabbbb", res[1242].exec("cabbbb"), 3617);
-assertEquals("caaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", res[1243].exec("caaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), 3618);
-assertEquals("foobar1234baz", res[1244].exec("foobar1234baz"), 3619);
-assertEquals("x~~,~~", res[1245].exec("x~~"), 3620);
-assertEquals("aaac", res[1246].exec("aaac"), 3621);
-assertEquals("aaac", res[1247].exec("aaac"), 3622);
-assertEquals(null, res[1247].exec("*** Failers", 3623));
-assertEquals(null, res[1247].exec("B\nB", 3624));
-assertEquals(null, res[1247].exec("dbcb", 3625));
-assertEquals(null, res[1247].exec("dbaacb", 3626));
-assertEquals(null, res[1247].exec("dbaacb", 3627));
-assertEquals(null, res[1247].exec("cdaccb", 3628));
-assertEquals(null, res[1248].exec("*** Failers", 3629));
-assertEquals(null, res[1248].exec("dbcb", 3630));
-assertEquals(null, res[1248].exec("a--", 3631));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3632));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3633));
-assertEquals(null, res[1248].exec("a\nb\n", 3634));
-assertEquals(null, res[1248].exec("a\nb\n", 3635));
-assertEquals(null, res[1248].exec("a\nb\n", 3636));
-assertEquals(null, res[1248].exec("a\nb\n", 3637));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3638));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3639));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3640));
-assertEquals(null, res[1248].exec("a\nb\nc\n", 3641));
-assertEquals(null, res[1250].exec("*** Failers", 3642));
-assertEquals(null, res[1250].exec("a\nb\nc\n", 3643));
-assertEquals(null, res[1250].exec("a\nb\nc\n", 3644));
-assertEquals(null, res[1250].exec("a\nb\nc\n", 3645));
-assertEquals(null, res[1250].exec("a", 3646));
-assertEquals(null, res[1250].exec("*** Failers", 3647));
-assertEquals(null, res[1250].exec("a", 3648));
-assertEquals(null, res[1250].exec("a", 3649));
-assertEquals(null, res[1250].exec("a", 3650));
-assertEquals("one:,one:", res[1251].exec("one:"), 3651);
-assertEquals(null, res[1251].exec("a", 3652));
-assertEquals("abcd,,abcd", res[1252].exec("abcd"), 3653);
-assertEquals("xy:z:::abcd,xy:z:::,abcd", res[1252].exec("xy:z:::abcd"), 3654);
-assertEquals("aexyc,c", res[1253].exec("aexycd"), 3655);
-assertEquals("aab,aa", res[1254].exec("caab"), 3656);
-assertEquals("abcd,,abcd", res[1255].exec("abcd"), 3657);
-assertEquals("xy:z:::abcd,xy:z:::,abcd", res[1255].exec("xy:z:::abcd"), 3658);
-assertEquals("Failers,,Failers", res[1255].exec("*** Failers"), 3659);
-assertEquals(null, res[1255].exec("abcd:", 3660));
-assertEquals(null, res[1255].exec("abcd:", 3661));
-assertEquals("aexyc,c", res[1256].exec("aexycd"), 3662);
-assertEquals(null, res[1257].exec("aaab", 3663));
-assertEquals(":[,:[", res[1258].exec("a:[b]:"), 3664);
-assertEquals("=[,=[", res[1259].exec("a=[b]="), 3665);
-assertEquals(".[,.[", res[1260].exec("a.[b]."), 3666);
-assertEquals(null, res[1260].exec("aaab", 3667));
-assertEquals(null, res[1260].exec("aaab", 3668));
-assertEquals(null, res[1260].exec("((abc(ade)ufh()()x", 3669));
-assertEquals(null, res[1261].exec("*** Failers", 3670));
-assertEquals(null, res[1261].exec("aaab", 3671));
-assertEquals(null, res[1261].exec("a\nb\n", 3672));
-assertEquals(null, res[1262].exec("a\nb\n", 3673));
-assertEquals(null, res[1264].exec("a\nb", 3674));
-assertEquals(null, res[1265].exec("a\nb", 3675));
-assertEquals(null, res[1265].exec("*** Failers", 3676));
-assertEquals(null, res[1265].exec("alphabetabcd", 3677));
-assertEquals(null, res[1265].exec("endingwxyz", 3678));
-assertEquals(null, res[1265].exec("*** Failers", 3679));
-assertEquals(null, res[1265].exec("a rather long string that doesn't end with one of them", 3680));
-assertEquals(null, res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark otherword", 3681));
-assertEquals(null, res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark", 3682));
-assertEquals(null, res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope", 3683));
-assertEquals(null, res[1265].exec("999foo", 3684));
-assertEquals(null, res[1265].exec("123999foo ", 3685));
-assertEquals(null, res[1265].exec("*** Failers", 3686));
-assertEquals(null, res[1265].exec("123abcfoo", 3687));
-assertEquals(null, res[1265].exec("999foo", 3688));
-assertEquals(null, res[1265].exec("123999foo ", 3689));
-assertEquals(null, res[1265].exec("*** Failers", 3690));
-assertEquals(null, res[1265].exec("123abcfoo", 3691));
-assertEquals(null, res[1265].exec("123abcfoo", 3692));
-assertEquals(null, res[1265].exec("123456foo ", 3693));
-assertEquals(null, res[1265].exec("*** Failers", 3694));
-assertEquals(null, res[1265].exec("123999foo  ", 3695));
-assertEquals(null, res[1265].exec("123abcfoo   ", 3696));
-assertEquals(null, res[1265].exec("123456foo ", 3697));
-assertEquals(null, res[1265].exec("*** Failers", 3698));
-assertEquals(null, res[1265].exec("123999foo  ", 3699));
-assertEquals("ZA,A,", res[1266].exec("ZABCDEFG"), 3700);
-assertEquals("ZA,A,", res[1267].exec("ZABCDEFG"), 3701);
-assertEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3702);
-assertEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3703);
-assertEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3704);
-assertEquals("a", res[1269].exec("abbab"), 3705);
-assertEquals("", res[1269].exec("abcde"), 3706);
-assertEquals("", res[1269].exec("-things"), 3707);
-assertEquals("", res[1269].exec("0digit"), 3708);
-assertEquals("", res[1269].exec("*** Failers"), 3709);
-assertEquals("", res[1269].exec("bcdef    "), 3710);
-assertEquals("a", res[1270].exec("abcde"), 3711);
-assertEquals("-", res[1270].exec("-things"), 3712);
-assertEquals("0", res[1270].exec("0digit"), 3713);
-assertEquals(null, res[1270].exec("*** Failers", 3714));
-assertEquals(null, res[1270].exec("bcdef    ", 3715));
-assertEquals(null, res[1271].exec("> \x09\n\x0c\x0d\x0b<", 3716));
-assertEquals(null, res[1271].exec(" ", 3717));
-assertEquals(null, res[1272].exec("> \x09\n\x0c\x0d\x0b<", 3718));
-assertEquals(null, res[1272].exec(" ", 3719));
-assertEquals(" \x09\n\x0c\x0d\x0b", res[1273].exec("> \x09\n\x0c\x0d\x0b<"), 3720);
-assertEquals(" ", res[1273].exec(" "), 3721);
-assertEquals(" \x09\n\x0c\x0d\x0b", res[1274].exec("> \x09\n\x0c\x0d\x0b<"), 3722);
-assertEquals(" ", res[1274].exec(" "), 3723);
-assertEquals(null, res[1275].exec("ab", 3724));
-assertEquals(null, res[1278].exec("abcabcabc", 3725));
-assertEquals(null, res[1278].exec("abc(*+|abc ", 3726));
-assertEquals(null, res[1279].exec("abc abcabc", 3727));
-assertEquals(null, res[1279].exec("*** Failers", 3728));
-assertEquals(null, res[1279].exec("abcabcabc  ", 3729));
-assertEquals(null, res[1280].exec("abc#not comment\n    literal     ", 3730));
-assertEquals(null, res[1281].exec("abc#not comment\n    literal     ", 3731));
-assertEquals(null, res[1282].exec("abc#not comment\n    literal     ", 3732));
-assertEquals(null, res[1283].exec("abc#not comment\n    literal     ", 3733));
-assertEquals(null, res[1284].exec("abc\\$xyz", 3734));
-assertEquals(null, res[1285].exec("abc$xyz", 3735));
-assertEquals(null, res[1286].exec("abc", 3736));
-assertEquals(null, res[1286].exec("*** Failers", 3737));
-assertEquals(null, res[1286].exec("xyzabc  ", 3738));
-assertEquals(null, res[1287].exec("abc1abc2xyzabc3", 3739));
-assertEquals("abc1", res[1288].exec("abc1abc2xyzabc3 "), 3740);
-assertEquals(null, res[1288].exec("XabcdY", 3741));
-assertEquals(null, res[1288].exec("*** Failers ", 3742));
-assertEquals(null, res[1288].exec("Xa b c d Y ", 3743));
-assertEquals("abcY", res[1288].exec("XabcY"), 3744);
-assertEquals(null, res[1288].exec("AxyzB ", 3745));
-assertEquals(null, res[1288].exec("XabCY", 3746));
-assertEquals(null, res[1288].exec("*** Failers", 3747));
-assertEquals("abcY", res[1288].exec("XabcY  "), 3748);
-assertEquals(null, res[1288].exec("abCE", 3749));
-assertEquals(null, res[1288].exec("DE", 3750));
-assertEquals(null, res[1288].exec("*** Failers", 3751));
-assertEquals("abcE", res[1288].exec("abcE"), 3752);
-assertEquals(null, res[1288].exec("abCe  ", 3753));
-assertEquals(null, res[1288].exec("dE", 3754));
-assertEquals(null, res[1288].exec("De    ", 3755));
-assertEquals(null, res[1289].exec("z", 3756));
-assertEquals(null, res[1289].exec("a", 3757));
-assertEquals(null, res[1289].exec("-", 3758));
-assertEquals(null, res[1289].exec("d", 3759));
-assertEquals(null, res[1289].exec("] ", 3760));
-assertEquals(null, res[1289].exec("*** Failers", 3761));
-assertEquals(null, res[1289].exec("b     ", 3762));
-assertEquals("z", res[1290].exec("z"), 3763);
-assertEquals("C", res[1290].exec("C "), 3764);
-assertEquals("M", res[1291].exec("M "), 3765);
-assertEquals(null, res[1292].exec("", 3766));
-assertEquals(null, res[1292].exec("REGular", 3767));
-assertEquals(null, res[1292].exec("regulaer", 3768));
-assertEquals(null, res[1292].exec("Regex  ", 3769));
-assertEquals(null, res[1292].exec("regul\ufffdr ", 3770));
-assertEquals(null, res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3771));
-assertEquals(null, res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3772));
-assertEquals(null, res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3773));
-assertEquals(null, res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3774));
-assertEquals(null, res[1292].exec("\x84XAZXB", 3775));
-assertEquals(null, res[1292].exec("123a", 3776));
-assertEquals(null, res[1292].exec("ac", 3777));
-assertEquals("b,", res[1292].exec("bbbbc"), 3778);
-assertEquals("ab,a", res[1292].exec("abc"), 3779);
-assertEquals(null, res[1292].exec("*** Failers", 3780));
-assertEquals("b,", res[1292].exec("bca"), 3781);
-assertEquals(null, res[1292].exec("", 3782));
-assertEquals("ab,a", res[1292].exec("abc"), 3783);
-assertEquals(null, res[1292].exec("*** Failers", 3784));
-assertEquals("b,", res[1292].exec("bca"), 3785);
-assertEquals("ab,a", res[1292].exec("abc"), 3786);
-assertEquals(null, res[1292].exec("*** Failers", 3787));
-assertEquals(null, res[1292].exec("def  ", 3788));
-assertEquals(null, res[1292].exec("", 3789));
-assertEquals("ab,a", res[1292].exec("abc"), 3790);
-assertEquals(null, res[1292].exec("*** Failers", 3791));
-assertEquals(null, res[1292].exec("def  ", 3792));
-assertEquals(null, res[1292].exec("", 3793));
-assertEquals("line\nbreak", res[1293].exec("this is a line\nbreak"), 3794);
-assertEquals("line\nbreak", res[1293].exec("line one\nthis is a line\nbreak in the second line "), 3795);
-assertEquals("line\nbreak", res[1294].exec("this is a line\nbreak"), 3796);
-assertEquals(null, res[1294].exec("** Failers ", 3797));
-assertEquals("line\nbreak", res[1294].exec("line one\nthis is a line\nbreak in the second line "), 3798);
-assertEquals("line\nbreak", res[1295].exec("this is a line\nbreak"), 3799);
-assertEquals(null, res[1295].exec("** Failers ", 3800));
-assertEquals("line\nbreak", res[1295].exec("line one\nthis is a line\nbreak in the second line "), 3801);
-assertEquals(null, res[1296].exec("123P", 3802));
-assertEquals(null, res[1296].exec("a4PR", 3803));
-assertEquals(null, res[1297].exec("123P", 3804));
-assertEquals(null, res[1297].exec("4PR", 3805));
-assertEquals("", res[1298].exec("a\nb\nc\n"), 3806);
-assertEquals("", res[1298].exec(" "), 3807);
-assertEquals("", res[1298].exec("A\nC\nC\n "), 3808);
-assertEquals("", res[1298].exec("AB"), 3809);
-assertEquals("", res[1298].exec("aB  "), 3810);
-assertEquals("", res[1298].exec("AB"), 3811);
-assertEquals("", res[1298].exec("aB  "), 3812);
-assertEquals("", res[1298].exec("AB"), 3813);
-assertEquals("", res[1298].exec("aB  "), 3814);
-assertEquals("", res[1298].exec("AB"), 3815);
-assertEquals("", res[1298].exec("aB  "), 3816);
-assertEquals("Content-Type:xxxxxyyy ", res[1299].exec("Content-Type:xxxxxyyy "), 3817);
-assertEquals("Content-Type:xxxxxyyyz", res[1300].exec("Content-Type:xxxxxyyyz"), 3818);
-assertEquals("Content-Type:xxxyyy ", res[1301].exec("Content-Type:xxxyyy "), 3819);
-assertEquals("Content-Type:xxxyyyz", res[1302].exec("Content-Type:xxxyyyz"), 3820);
-assertEquals("abc", res[1303].exec("xyz\nabc"), 3821);
-assertEquals("abc", res[1303].exec("xyz\nabc<lf>"), 3822);
-assertEquals("abc", res[1303].exec("xyz\x0d\nabc<lf>"), 3823);
-assertEquals("abc", res[1303].exec("xyz\x0dabc<cr>"), 3824);
-assertEquals("abc", res[1303].exec("xyz\x0d\nabc<crlf>"), 3825);
-assertEquals(null, res[1303].exec("** Failers ", 3826));
-assertEquals("abc", res[1303].exec("xyz\nabc<cr>"), 3827);
-assertEquals("abc", res[1303].exec("xyz\x0d\nabc<cr>"), 3828);
-assertEquals("abc", res[1303].exec("xyz\nabc<crlf>"), 3829);
-assertEquals("abc", res[1303].exec("xyz\x0dabc<crlf>"), 3830);
-assertEquals("abc", res[1303].exec("xyz\x0dabc<lf>"), 3831);
-assertEquals("abc", res[1304].exec("xyzabc"), 3832);
-assertEquals("abc", res[1304].exec("xyzabc\n "), 3833);
-assertEquals("abc", res[1304].exec("xyzabc\npqr "), 3834);
-assertEquals("abc", res[1304].exec("xyzabc\x0d<cr> "), 3835);
-assertEquals("abc", res[1304].exec("xyzabc\x0dpqr<cr> "), 3836);
-assertEquals("abc", res[1304].exec("xyzabc\x0d\n<crlf> "), 3837);
-assertEquals("abc", res[1304].exec("xyzabc\x0d\npqr<crlf> "), 3838);
-assertEquals(null, res[1304].exec("** Failers", 3839));
-assertEquals("abc", res[1304].exec("xyzabc\x0d "), 3840);
-assertEquals("abc", res[1304].exec("xyzabc\x0dpqr "), 3841);
-assertEquals("abc", res[1304].exec("xyzabc\x0d\n "), 3842);
-assertEquals("abc", res[1304].exec("xyzabc\x0d\npqr "), 3843);
-assertEquals("abc", res[1305].exec("xyz\x0dabcdef"), 3844);
-assertEquals("abc", res[1305].exec("xyz\nabcdef<lf>"), 3845);
-assertEquals(null, res[1305].exec("** Failers  ", 3846));
-assertEquals("abc", res[1305].exec("xyz\nabcdef"), 3847);
-assertEquals(null, res[1305].exec("   ", 3848));
-assertEquals("abc", res[1306].exec("xyz\nabcdef"), 3849);
-assertEquals("abc", res[1306].exec("xyz\x0dabcdef<cr>"), 3850);
-assertEquals(null, res[1306].exec("** Failers  ", 3851));
-assertEquals("abc", res[1306].exec("xyz\x0dabcdef"), 3852);
-assertEquals(null, res[1306].exec("   ", 3853));
-assertEquals("abc", res[1307].exec("xyz\x0d\nabcdef"), 3854);
-assertEquals("abc", res[1307].exec("xyz\x0dabcdef<cr>"), 3855);
-assertEquals(null, res[1307].exec("** Failers  ", 3856));
-assertEquals("abc", res[1307].exec("xyz\x0dabcdef"), 3857);
-assertEquals("abc", res[1308].exec("abc\ndef"), 3858);
-assertEquals("abc", res[1308].exec("abc\x0ddef"), 3859);
-assertEquals("abc", res[1308].exec("abc\x0d\ndef"), 3860);
-assertEquals("<cr>abc", res[1308].exec("<cr>abc\ndef"), 3861);
-assertEquals("<cr>abc", res[1308].exec("<cr>abc\x0ddef"), 3862);
-assertEquals("<cr>abc", res[1308].exec("<cr>abc\x0d\ndef"), 3863);
-assertEquals("<crlf>abc", res[1308].exec("<crlf>abc\ndef"), 3864);
-assertEquals("<crlf>abc", res[1308].exec("<crlf>abc\x0ddef"), 3865);
-assertEquals("<crlf>abc", res[1308].exec("<crlf>abc\x0d\ndef"), 3866);
-assertEquals(null, res[1309].exec("abc\ndef", 3867));
-assertEquals(null, res[1309].exec("abc\x0ddef", 3868));
-assertEquals(null, res[1309].exec("abc\x0d\ndef", 3869));
-assertEquals("abc=xyz\\,", res[1310].exec("abc=xyz\\\npqr"), 3870);
-assertEquals("aaaa,a,", res[1311].exec("aaaa"), 3871);
-assertEquals("aaaa", res[1312].exec("aaaa"), 3872);
-assertEquals("aaaa,a,", res[1313].exec("aaaa"), 3873);
-assertEquals("aaaa", res[1314].exec("aaaa"), 3874);
-assertEquals(null, res[1317].exec("a\x0db", 3875));
-assertEquals(null, res[1317].exec("a\nb<cr> ", 3876));
-assertEquals(null, res[1317].exec("** Failers", 3877));
-assertEquals(null, res[1317].exec("a\nb", 3878));
-assertEquals(null, res[1317].exec("a\nb<any>", 3879));
-assertEquals(null, res[1317].exec("a\x0db<cr>   ", 3880));
-assertEquals(null, res[1317].exec("a\x0db<any>   ", 3881));
-assertEquals("abc1", res[1318].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 \x85abc7 JUNK"), 3882);
-assertEquals("abc1", res[1319].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc9"), 3883);
-assertEquals(null, res[1320].exec("a\nb", 3884));
-assertEquals(null, res[1320].exec("a\x0db", 3885));
-assertEquals(null, res[1320].exec("a\x0d\nb", 3886));
-assertEquals(null, res[1320].exec("a\x0bb", 3887));
-assertEquals(null, res[1320].exec("a\x0cb", 3888));
-assertEquals(null, res[1320].exec("a\x85b   ", 3889));
-assertEquals(null, res[1320].exec("** Failers", 3890));
-assertEquals(null, res[1320].exec("a\n\x0db    ", 3891));
-assertEquals("ab", res[1321].exec("ab"), 3892);
-assertEquals(null, res[1321].exec("a\nb", 3893));
-assertEquals(null, res[1321].exec("a\x0db", 3894));
-assertEquals(null, res[1321].exec("a\x0d\nb", 3895));
-assertEquals(null, res[1321].exec("a\x0bb", 3896));
-assertEquals(null, res[1321].exec("a\x0cb", 3897));
-assertEquals(null, res[1321].exec("a\x85b   ", 3898));
-assertEquals(null, res[1321].exec("a\n\x0db    ", 3899));
-assertEquals(null, res[1321].exec("a\n\x0d\x85\x0cb ", 3900));
-assertEquals(null, res[1322].exec("a\nb", 3901));
-assertEquals(null, res[1322].exec("a\x0db", 3902));
-assertEquals(null, res[1322].exec("a\x0d\nb", 3903));
-assertEquals(null, res[1322].exec("a\x0bb", 3904));
-assertEquals(null, res[1322].exec("a\x0cb", 3905));
-assertEquals(null, res[1322].exec("a\x85b   ", 3906));
-assertEquals(null, res[1322].exec("a\n\x0db    ", 3907));
-assertEquals(null, res[1322].exec("a\n\x0d\x85\x0cb ", 3908));
-assertEquals(null, res[1322].exec("** Failers", 3909));
-assertEquals(null, res[1322].exec("ab  ", 3910));
-assertEquals(null, res[1323].exec("a\nb", 3911));
-assertEquals(null, res[1323].exec("a\n\x0db", 3912));
-assertEquals(null, res[1323].exec("a\n\x0d\x85b", 3913));
-assertEquals(null, res[1323].exec("a\x0d\n\x0d\nb ", 3914));
-assertEquals(null, res[1323].exec("a\x0d\n\x0d\n\x0d\nb ", 3915));
-assertEquals(null, res[1323].exec("a\n\x0d\n\x0db", 3916));
-assertEquals(null, res[1323].exec("a\n\n\x0d\nb ", 3917));
-assertEquals(null, res[1323].exec("** Failers", 3918));
-assertEquals(null, res[1323].exec("a\n\n\n\x0db", 3919));
-assertEquals(null, res[1323].exec("a\x0d", 3920));
-assertEquals("aRb", res[1324].exec("aRb"), 3921);
-assertEquals(null, res[1324].exec("** Failers", 3922));
-assertEquals(null, res[1324].exec("a\nb  ", 3923));
-assertEquals("afoo", res[1325].exec("afoo"), 3924);
-assertEquals(null, res[1325].exec("** Failers ", 3925));
-assertEquals(null, res[1325].exec("\x0d\nfoo ", 3926));
-assertEquals(null, res[1325].exec("\nfoo ", 3927));
-assertEquals("afoo", res[1326].exec("afoo"), 3928);
-assertEquals(null, res[1326].exec("\nfoo ", 3929));
-assertEquals(null, res[1326].exec("** Failers ", 3930));
-assertEquals(null, res[1326].exec("\x0d\nfoo ", 3931));
-assertEquals("afoo", res[1327].exec("afoo"), 3932);
-assertEquals(null, res[1327].exec("** Failers ", 3933));
-assertEquals(null, res[1327].exec("\nfoo ", 3934));
-assertEquals(null, res[1327].exec("\x0d\nfoo ", 3935));
-assertEquals("afoo", res[1328].exec("afoo"), 3936);
-assertEquals(null, res[1328].exec("\x0d\nfoo ", 3937));
-assertEquals(null, res[1328].exec("\nfoo ", 3938));
-assertEquals("", res[1329].exec("abc\x0d\x0dxyz"), 3939);
-assertEquals("", res[1329].exec("abc\n\x0dxyz  "), 3940);
-assertEquals(null, res[1329].exec("** Failers ", 3941));
-assertEquals("", res[1329].exec("abc\x0d\nxyz"), 3942);
-assertEquals("X", res[1330].exec("XABC"), 3943);
-assertEquals(null, res[1330].exec("** Failers ", 3944));
-assertEquals("X", res[1330].exec("XABCB"), 3945);
-assertEquals(null, res[1330].exec("abc\x0d\n\x0d\n", 3946));
-assertEquals(null, res[1330].exec("abc\x0d\n\x0d\n", 3947));
-assertEquals(null, res[1330].exec("abc\x0d\n\x0d\n", 3948));
+assertToStringEquals("", res[986].exec("abc"), 2982);
+assertToStringEquals("acb", res[988].exec("acb"), 2983);
+assertToStringEquals("a\nb", res[988].exec("a\nb"), 2984);
+assertToStringEquals("acb", res[989].exec("acb"), 2985);
+assertNull(res[989].exec("*** Failers ", 2986));
+assertNull(res[989].exec("a\nb   ", 2987));
+assertToStringEquals("acb", res[990].exec("acb"), 2988);
+assertToStringEquals("a\nb", res[990].exec("a\nb  "), 2989);
+assertToStringEquals("acb", res[991].exec("acb"), 2990);
+assertNull(res[991].exec("a\nb  ", 2991));
+assertToStringEquals("bac,a", res[992].exec("bac"), 2992);
+assertToStringEquals("bbac,a", res[992].exec("bbac"), 2993);
+assertToStringEquals("bbbac,a", res[992].exec("bbbac"), 2994);
+assertToStringEquals("bbbbac,a", res[992].exec("bbbbac"), 2995);
+assertToStringEquals("bbbbbac,a", res[992].exec("bbbbbac "), 2996);
+assertToStringEquals("bac,a", res[993].exec("bac"), 2997);
+assertToStringEquals("bbac,a", res[993].exec("bbac"), 2998);
+assertToStringEquals("bbbac,a", res[993].exec("bbbac"), 2999);
+assertToStringEquals("bbbbac,a", res[993].exec("bbbbac"), 3000);
+assertToStringEquals("bbbbbac,a", res[993].exec("bbbbbac "), 3001);
+assertToStringEquals("x", res[994].exec("x\nb\n"), 3002);
+assertToStringEquals("x", res[994].exec("a\x08x\n  "), 3003);
+assertNull(res[995].exec("\x00{ab} ", 3004));
+assertToStringEquals("CD,", res[996].exec("CD "), 3005);
+assertToStringEquals("CD,", res[997].exec("CD "), 3006);
+assertNull(res[997].exec("foo", 3007));
+assertNull(res[997].exec("catfood", 3008));
+assertNull(res[997].exec("arfootle", 3009));
+assertNull(res[997].exec("rfoosh", 3010));
+assertNull(res[997].exec("*** Failers", 3011));
+assertNull(res[997].exec("barfoo", 3012));
+assertNull(res[997].exec("towbarfoo", 3013));
+assertNull(res[997].exec("catfood", 3014));
+assertNull(res[997].exec("*** Failers", 3015));
+assertNull(res[997].exec("foo", 3016));
+assertNull(res[997].exec("barfoo", 3017));
+assertNull(res[997].exec("towbarfoo", 3018));
+assertNull(res[997].exec("fooabar", 3019));
+assertNull(res[997].exec("*** Failers", 3020));
+assertNull(res[997].exec("bar", 3021));
+assertNull(res[997].exec("foobbar", 3022));
+assertNull(res[997].exec("  ", 3023));
+assertNull(res[998].exec("abc", 3024));
+assertNull(res[998].exec("*** Failers", 3025));
+assertNull(res[998].exec("abc\n   ", 3026));
+assertNull(res[998].exec("qqq\nabc", 3027));
+assertNull(res[998].exec("abc\nzzz", 3028));
+assertNull(res[998].exec("qqq\nabc\nzzz", 3029));
+assertNull(res[998].exec("/this/is/a/very/long/line/in/deed/with/very/many/slashes/in/it/you/see/", 3030));
+assertNull(res[998].exec("/this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo", 3031));
+assertNull(res[998].exec("1.230003938", 3032));
+assertNull(res[998].exec("1.875000282", 3033));
+assertNull(res[998].exec("*** Failers ", 3034));
+assertNull(res[998].exec("1.235 ", 3035));
+assertNull(res[998].exec("now is the time for all good men to come to the aid of the party", 3036));
+assertNull(res[998].exec("*** Failers", 3037));
+assertNull(res[998].exec("this is not a line with only words and spaces!", 3038));
+assertToStringEquals("12345a,12345,a", res[999].exec("12345a"), 3039);
+assertToStringEquals("12345,1234,5", res[999].exec("12345+ "), 3040);
+assertToStringEquals("12345a,12345,a", res[999].exec("12345a"), 3041);
+assertNull(res[999].exec("*** Failers", 3042));
+assertToStringEquals("12345,1234,5", res[999].exec("12345+ "), 3043);
+assertNull(res[999].exec("aaab", 3044));
+assertNull(res[999].exec("aaab", 3045));
+assertNull(res[999].exec("aaab", 3046));
+assertNull(res[999].exec("aaabbbccc", 3047));
+assertNull(res[999].exec("aaabbbbccccd", 3048));
+assertToStringEquals("aaabbbbcccc,ccc", res[1000].exec("aaabbbbccccd"), 3049);
+assertToStringEquals("abc,b", res[1000].exec("((abc(ade)ufh()()x"), 3050);
+assertNull(res[1000].exec("", 3051));
+assertToStringEquals("abc,b", res[1000].exec("(abc)"), 3052);
+assertToStringEquals("abc,b", res[1000].exec("(abc(def)xyz)"), 3053);
+assertNull(res[1000].exec("*** Failers", 3054));
+assertNull(res[1000].exec("ab", 3055));
+assertNull(res[1000].exec("Ab", 3056));
+assertNull(res[1000].exec("*** Failers ", 3057));
+assertNull(res[1000].exec("aB", 3058));
+assertNull(res[1000].exec("AB", 3059));
+assertNull(res[1000].exec("    ", 3060));
+assertToStringEquals("bc,b", res[1000].exec("a bcd e"), 3061);
+assertNull(res[1000].exec("*** Failers", 3062));
+assertToStringEquals("c,", res[1000].exec("a b cd e"), 3063);
+assertToStringEquals("abc,b", res[1000].exec("abcd e   "), 3064);
+assertToStringEquals("bc,b", res[1000].exec("a bcde "), 3065);
+assertToStringEquals("bc,b", res[1000].exec("a bcde f"), 3066);
+assertNull(res[1000].exec("*** Failers", 3067));
+assertToStringEquals("abc,b", res[1000].exec("abcdef  "), 3068);
+assertToStringEquals("abc,b", res[1000].exec("abc"), 3069);
+assertToStringEquals("c,", res[1000].exec("aBc"), 3070);
+assertNull(res[1000].exec("*** Failers", 3071));
+assertNull(res[1000].exec("abC", 3072));
+assertNull(res[1000].exec("aBC  ", 3073));
+assertToStringEquals("bc,b", res[1000].exec("Abc"), 3074);
+assertToStringEquals("c,", res[1000].exec("ABc"), 3075);
+assertNull(res[1000].exec("ABC", 3076));
+assertNull(res[1000].exec("AbC", 3077));
+assertNull(res[1000].exec("", 3078));
+assertToStringEquals("abc,b", res[1000].exec("abc"), 3079);
+assertToStringEquals("c,", res[1000].exec("aBc"), 3080);
+assertNull(res[1000].exec("*** Failers ", 3081));
+assertNull(res[1000].exec("ABC", 3082));
+assertNull(res[1000].exec("abC", 3083));
+assertNull(res[1000].exec("aBC", 3084));
+assertNull(res[1000].exec("", 3085));
+assertToStringEquals("c,", res[1000].exec("aBc"), 3086);
+assertToStringEquals("c,", res[1000].exec("aBBc"), 3087);
+assertNull(res[1000].exec("*** Failers ", 3088));
+assertNull(res[1000].exec("aBC", 3089));
+assertNull(res[1000].exec("aBBC", 3090));
+assertNull(res[1000].exec("", 3091));
+assertToStringEquals("abc,b", res[1000].exec("abcd"), 3092);
+assertNull(res[1000].exec("abCd", 3093));
+assertNull(res[1000].exec("*** Failers", 3094));
+assertNull(res[1000].exec("aBCd", 3095));
+assertToStringEquals("abc,b", res[1000].exec("abcD     "), 3096);
+assertNull(res[1000].exec("", 3097));
+assertNull(res[1000].exec("more than million", 3098));
+assertNull(res[1000].exec("more than MILLION", 3099));
+assertNull(res[1000].exec("more \n than Million ", 3100));
+assertNull(res[1000].exec("*** Failers", 3101));
+assertNull(res[1000].exec("MORE THAN MILLION    ", 3102));
+assertNull(res[1000].exec("more \n than \n million ", 3103));
+assertNull(res[1000].exec("more than million", 3104));
+assertNull(res[1000].exec("more than MILLION", 3105));
+assertNull(res[1000].exec("more \n than Million ", 3106));
+assertNull(res[1000].exec("*** Failers", 3107));
+assertNull(res[1000].exec("MORE THAN MILLION    ", 3108));
+assertNull(res[1000].exec("more \n than \n million ", 3109));
+assertNull(res[1000].exec("", 3110));
+assertToStringEquals("abc,b", res[1000].exec("abc"), 3111);
+assertToStringEquals("bc,b", res[1000].exec("aBbc"), 3112);
+assertToStringEquals("c,", res[1000].exec("aBBc "), 3113);
+assertNull(res[1000].exec("*** Failers", 3114));
+assertToStringEquals("bc,b", res[1000].exec("Abc"), 3115);
+assertNull(res[1000].exec("abAb    ", 3116));
+assertNull(res[1000].exec("abbC ", 3117));
+assertNull(res[1000].exec("", 3118));
+assertToStringEquals("abc,b", res[1000].exec("abc"), 3119);
+assertToStringEquals("c,", res[1000].exec("aBc"), 3120);
+assertNull(res[1000].exec("*** Failers", 3121));
+assertNull(res[1000].exec("Ab ", 3122));
+assertNull(res[1000].exec("abC", 3123));
+assertNull(res[1000].exec("aBC     ", 3124));
+assertNull(res[1000].exec("", 3125));
+assertToStringEquals("c,", res[1000].exec("abxxc"), 3126);
+assertToStringEquals("c,", res[1000].exec("aBxxc"), 3127);
+assertNull(res[1000].exec("*** Failers", 3128));
+assertToStringEquals("c,", res[1000].exec("Abxxc"), 3129);
+assertToStringEquals("c,", res[1000].exec("ABxxc"), 3130);
+assertNull(res[1000].exec("abxxC      ", 3131));
+assertToStringEquals("abc,b", res[1000].exec("abc:"), 3132);
+assertNull(res[1000].exec("12", 3133));
+assertNull(res[1000].exec("*** Failers", 3134));
+assertNull(res[1000].exec("123", 3135));
+assertNull(res[1000].exec("xyz    ", 3136));
+assertToStringEquals("abc,b", res[1000].exec("abc:"), 3137);
+assertNull(res[1000].exec("12", 3138));
+assertNull(res[1000].exec("*** Failers", 3139));
+assertNull(res[1000].exec("123", 3140));
+assertNull(res[1000].exec("xyz    ", 3141));
+assertNull(res[1000].exec("", 3142));
+assertNull(res[1000].exec("foobar", 3143));
+assertToStringEquals("c,", res[1000].exec("cat"), 3144);
+assertToStringEquals("c,", res[1000].exec("fcat"), 3145);
+assertToStringEquals("c,", res[1000].exec("focat   "), 3146);
+assertNull(res[1000].exec("*** Failers", 3147));
+assertToStringEquals("c,", res[1000].exec("foocat  "), 3148);
+assertNull(res[1000].exec("foobar", 3149));
+assertToStringEquals("c,", res[1000].exec("cat"), 3150);
+assertToStringEquals("c,", res[1000].exec("fcat"), 3151);
+assertToStringEquals("c,", res[1000].exec("focat   "), 3152);
+assertNull(res[1000].exec("*** Failers", 3153));
+assertToStringEquals("c,", res[1000].exec("foocat  "), 3154);
+assertNull(res[1000].exec("a", 3155));
+assertNull(res[1000].exec("aa", 3156));
+assertNull(res[1000].exec("aaaa", 3157));
+assertNull(res[1000].exec("", 3158));
+assertToStringEquals("abc,abc", res[1001].exec("abc"), 3159);
+assertToStringEquals("abcabc,abc", res[1001].exec("abcabc"), 3160);
+assertToStringEquals("abcabcabc,abc", res[1001].exec("abcabcabc"), 3161);
+assertToStringEquals(",", res[1001].exec("xyz      "), 3162);
+assertToStringEquals("a,a", res[1002].exec("a"), 3163);
+assertToStringEquals("aaaaa,aaaaa", res[1002].exec("aaaaa "), 3164);
+assertToStringEquals("a,a", res[1003].exec("a"), 3165);
+assertToStringEquals("b,b", res[1003].exec("b"), 3166);
+assertToStringEquals("ababab,ababab", res[1003].exec("ababab"), 3167);
+assertToStringEquals("aaaab,aaaab", res[1003].exec("aaaabcde"), 3168);
+assertToStringEquals("bbbb,bbbb", res[1003].exec("bbbb    "), 3169);
+assertToStringEquals("b,b", res[1004].exec("b"), 3170);
+assertToStringEquals("bbbb,bbbb", res[1004].exec("bbbb"), 3171);
+assertToStringEquals(",", res[1004].exec("aaa   "), 3172);
+assertToStringEquals("cccc,cccc", res[1005].exec("cccc"), 3173);
+assertToStringEquals(",", res[1005].exec("abab  "), 3174);
+assertToStringEquals("a,a", res[1006].exec("a"), 3175);
+assertToStringEquals("aaaa,a", res[1006].exec("aaaa "), 3176);
+assertToStringEquals("a,a", res[1007].exec("a"), 3177);
+assertToStringEquals("b,b", res[1007].exec("b"), 3178);
+assertToStringEquals("abab,b", res[1007].exec("abab"), 3179);
+assertToStringEquals("baba,a", res[1007].exec("baba   "), 3180);
+assertToStringEquals("b,b", res[1008].exec("b"), 3181);
+assertToStringEquals("bbbb,b", res[1008].exec("bbbb"), 3182);
+assertToStringEquals(",", res[1008].exec("aaa   "), 3183);
+assertToStringEquals("c,c", res[1009].exec("c"), 3184);
+assertToStringEquals("cccc,c", res[1009].exec("cccc"), 3185);
+assertToStringEquals(",", res[1009].exec("baba   "), 3186);
+assertToStringEquals(",", res[1009].exec("a"), 3187);
+assertToStringEquals(",", res[1009].exec("aaabcde "), 3188);
+assertToStringEquals(",", res[1009].exec("aaaaa"), 3189);
+assertToStringEquals(",", res[1009].exec("aabbaa "), 3190);
+assertToStringEquals(",", res[1009].exec("aaaaa"), 3191);
+assertToStringEquals(",", res[1009].exec("aabbaa "), 3192);
+assertToStringEquals("12-sep-98,8", res[1009].exec("12-sep-98"), 3193);
+assertToStringEquals("12-09-98,8", res[1009].exec("12-09-98"), 3194);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3195);
+assertToStringEquals("sep-12-98,8", res[1009].exec("sep-12-98"), 3196);
+assertToStringEquals("    , ", res[1009].exec("    "), 3197);
+assertToStringEquals("s,s", res[1009].exec("saturday"), 3198);
+assertToStringEquals("sund,d", res[1009].exec("sunday"), 3199);
+assertToStringEquals("S,S", res[1009].exec("Saturday"), 3200);
+assertToStringEquals("Sund,d", res[1009].exec("Sunday"), 3201);
+assertToStringEquals("SATURDAY,Y", res[1009].exec("SATURDAY"), 3202);
+assertToStringEquals("SUNDAY,Y", res[1009].exec("SUNDAY"), 3203);
+assertToStringEquals("SunD,D", res[1009].exec("SunDay"), 3204);
+assertToStringEquals(",", res[1009].exec("abcx"), 3205);
+assertToStringEquals(",", res[1009].exec("aBCx"), 3206);
+assertToStringEquals(",", res[1009].exec("bbx"), 3207);
+assertToStringEquals("BBx,x", res[1009].exec("BBx"), 3208);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3209);
+assertToStringEquals(",", res[1009].exec("abcX"), 3210);
+assertToStringEquals(",", res[1009].exec("aBCX"), 3211);
+assertToStringEquals(",", res[1009].exec("bbX"), 3212);
+assertToStringEquals("BBX               , ", res[1009].exec("BBX               "), 3213);
+assertToStringEquals(",", res[1009].exec("ac"), 3214);
+assertToStringEquals(",", res[1009].exec("aC"), 3215);
+assertToStringEquals(",", res[1009].exec("bD"), 3216);
+assertToStringEquals("eleph,h", res[1009].exec("elephant"), 3217);
+assertToStringEquals("Europe , ", res[1009].exec("Europe "), 3218);
+assertToStringEquals("frog,g", res[1009].exec("frog"), 3219);
+assertToStringEquals("Fr,r", res[1009].exec("France"), 3220);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3221);
+assertToStringEquals("Afric,c", res[1009].exec("Africa     "), 3222);
+assertToStringEquals(",", res[1009].exec("ab"), 3223);
+assertToStringEquals(",", res[1009].exec("aBd"), 3224);
+assertToStringEquals("xy,y", res[1009].exec("xy"), 3225);
+assertToStringEquals("xY,Y", res[1009].exec("xY"), 3226);
+assertToStringEquals("ze,e", res[1009].exec("zebra"), 3227);
+assertToStringEquals("Z,Z", res[1009].exec("Zambesi"), 3228);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3229);
+assertToStringEquals(",", res[1009].exec("aCD  "), 3230);
+assertToStringEquals("XY  , ", res[1009].exec("XY  "), 3231);
+assertToStringEquals("foo\n,\n", res[1009].exec("foo\nbar"), 3232);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3233);
+assertToStringEquals(",", res[1009].exec("bar"), 3234);
+assertToStringEquals(",", res[1009].exec("baz\nbar   "), 3235);
+assertToStringEquals(",", res[1009].exec("barbaz"), 3236);
+assertToStringEquals(",", res[1009].exec("barbarbaz "), 3237);
+assertToStringEquals("koo,o", res[1009].exec("koobarbaz "), 3238);
+assertToStringEquals("*** F,F", res[1009].exec("*** Failers"), 3239);
+assertToStringEquals(",", res[1009].exec("baz"), 3240);
+assertToStringEquals("foo,o", res[1009].exec("foobarbaz "), 3241);
+assertToStringEquals("abc", res[1012].exec("abc"), 3242);
+assertToStringEquals("abc", res[1012].exec("xabcy"), 3243);
+assertToStringEquals("abc", res[1012].exec("ababc"), 3244);
+assertNull(res[1012].exec("*** Failers", 3245));
+assertNull(res[1012].exec("xbc", 3246));
+assertNull(res[1012].exec("axc", 3247));
+assertNull(res[1012].exec("abx", 3248));
+assertToStringEquals("abc", res[1013].exec("abc"), 3249);
+assertToStringEquals("abc", res[1014].exec("abc"), 3250);
+assertToStringEquals("abbc", res[1014].exec("abbc"), 3251);
+assertToStringEquals("abbbbc", res[1014].exec("abbbbc"), 3252);
+assertToStringEquals("a", res[1015].exec("abbbbc"), 3253);
+assertToStringEquals("abbb", res[1016].exec("abbbbc"), 3254);
+assertToStringEquals("abbbbc", res[1017].exec("abbbbc"), 3255);
+assertToStringEquals("abbc", res[1018].exec("abbc"), 3256);
+assertNull(res[1018].exec("*** Failers", 3257));
+assertNull(res[1018].exec("abc", 3258));
+assertNull(res[1018].exec("abq", 3259));
+assertToStringEquals("abbbbc", res[1020].exec("abbbbc"), 3260);
+assertToStringEquals("abbbbc", res[1021].exec("abbbbc"), 3261);
+assertToStringEquals("abbbbc", res[1022].exec("abbbbc"), 3262);
+assertToStringEquals("abbbbc", res[1023].exec("abbbbc"), 3263);
+assertNull(res[1024].exec("*** Failers", 3264));
+assertNull(res[1024].exec("abq", 3265));
+assertNull(res[1024].exec("abbbbc", 3266));
+assertToStringEquals("abbc", res[1025].exec("abbc"), 3267);
+assertToStringEquals("abc", res[1025].exec("abc"), 3268);
+assertToStringEquals("abc", res[1026].exec("abc"), 3269);
+assertToStringEquals("abc", res[1028].exec("abc"), 3270);
+assertToStringEquals("abc", res[1029].exec("abc"), 3271);
+assertToStringEquals("abc", res[1030].exec("abc"), 3272);
+assertNull(res[1030].exec("*** Failers", 3273));
+assertNull(res[1030].exec("abbbbc", 3274));
+assertNull(res[1030].exec("abcc", 3275));
+assertToStringEquals("abc", res[1031].exec("abcc"), 3276);
+assertToStringEquals("abc", res[1033].exec("aabc"), 3277);
+assertNull(res[1033].exec("*** Failers", 3278));
+assertToStringEquals("abc", res[1033].exec("aabc"), 3279);
+assertNull(res[1033].exec("aabcd", 3280));
+assertToStringEquals("", res[1034].exec("abc"), 3281);
+assertToStringEquals("", res[1035].exec("abc"), 3282);
+assertToStringEquals("abc", res[1036].exec("abc"), 3283);
+assertToStringEquals("axc", res[1036].exec("axc"), 3284);
+assertToStringEquals("axyzc", res[1037].exec("axyzc"), 3285);
+assertToStringEquals("abd", res[1038].exec("abd"), 3286);
+assertNull(res[1038].exec("*** Failers", 3287));
+assertNull(res[1038].exec("axyzd", 3288));
+assertNull(res[1038].exec("abc", 3289));
+assertToStringEquals("ace", res[1039].exec("ace"), 3290);
+assertToStringEquals("ac", res[1040].exec("aac"), 3291);
+assertToStringEquals("a-", res[1041].exec("a-"), 3292);
+assertToStringEquals("a-", res[1042].exec("a-"), 3293);
+assertToStringEquals("a]", res[1043].exec("a]"), 3294);
+assertNull(res[1044].exec("a]b", 3295));
+assertToStringEquals("aed", res[1045].exec("aed"), 3296);
+assertNull(res[1045].exec("*** Failers", 3297));
+assertNull(res[1045].exec("abd", 3298));
+assertNull(res[1045].exec("abd", 3299));
+assertToStringEquals("adc", res[1046].exec("adc"), 3300);
+assertNull(res[1047].exec("adc", 3301));
+assertNull(res[1047].exec("*** Failers", 3302));
+assertNull(res[1047].exec("a-c", 3303));
+assertNull(res[1047].exec("a]c", 3304));
+assertToStringEquals("a", res[1048].exec("a-"), 3305);
+assertToStringEquals("a", res[1048].exec("-a"), 3306);
+assertToStringEquals("a", res[1048].exec("-a-"), 3307);
+assertNull(res[1049].exec("*** Failers", 3308));
+assertNull(res[1049].exec("xy", 3309));
+assertNull(res[1049].exec("yz", 3310));
+assertNull(res[1049].exec("xyz", 3311));
+assertToStringEquals("a", res[1050].exec("*** Failers"), 3312);
+assertNull(res[1050].exec("a-", 3313));
+assertNull(res[1050].exec("-a", 3314));
+assertNull(res[1050].exec("-a-", 3315));
+assertToStringEquals("y", res[1051].exec("xy"), 3316);
+assertToStringEquals("y", res[1052].exec("yz"), 3317);
+assertToStringEquals("y", res[1053].exec("xyz"), 3318);
+assertToStringEquals("a", res[1054].exec("a"), 3319);
+assertToStringEquals("-", res[1055].exec("-"), 3320);
+assertToStringEquals("*", res[1055].exec("*** Failers"), 3321);
+assertToStringEquals("-", res[1055].exec("-"), 3322);
+assertNull(res[1055].exec("a", 3323));
+assertToStringEquals("a b", res[1056].exec("a b"), 3324);
+assertToStringEquals("a-b", res[1057].exec("a-b"), 3325);
+assertNull(res[1057].exec("*** Failers", 3326));
+assertToStringEquals("a-b", res[1057].exec("a-b"), 3327);
+assertNull(res[1057].exec("a b", 3328));
+assertToStringEquals("1", res[1058].exec("1"), 3329);
+assertToStringEquals("-", res[1059].exec("-"), 3330);
+assertToStringEquals("*", res[1059].exec("*** Failers"), 3331);
+assertToStringEquals("-", res[1059].exec("-"), 3332);
+assertNull(res[1059].exec("1", 3333));
+assertToStringEquals("a", res[1060].exec("a"), 3334);
+assertToStringEquals("-", res[1061].exec("-"), 3335);
+assertToStringEquals("*", res[1061].exec("*** Failers"), 3336);
+assertToStringEquals("-", res[1061].exec("-"), 3337);
+assertNull(res[1061].exec("a", 3338));
+assertToStringEquals("a b", res[1062].exec("a b"), 3339);
+assertToStringEquals("a-b", res[1063].exec("a-b"), 3340);
+assertNull(res[1063].exec("*** Failers", 3341));
+assertToStringEquals("a-b", res[1063].exec("a-b"), 3342);
+assertNull(res[1063].exec("a b", 3343));
+assertToStringEquals("1", res[1064].exec("1"), 3344);
+assertToStringEquals("-", res[1065].exec("-"), 3345);
+assertToStringEquals("*", res[1065].exec("*** Failers"), 3346);
+assertToStringEquals("-", res[1065].exec("-"), 3347);
+assertNull(res[1065].exec("1", 3348));
+assertToStringEquals("ab", res[1066].exec("abc"), 3349);
+assertToStringEquals("ab", res[1066].exec("abcd"), 3350);
+assertToStringEquals("ef,", res[1067].exec("def"), 3351);
+assertToStringEquals("a(b", res[1069].exec("a(b"), 3352);
+assertNull(res[1069].exec("ab", 3353));
+assertNull(res[1069].exec("a((b", 3354));
+assertNull(res[1070].exec("a\x08", 3355));
+assertToStringEquals("a,a,a", res[1071].exec("abc"), 3356);
+assertToStringEquals("abc,a,c", res[1072].exec("abc"), 3357);
+assertToStringEquals("abc", res[1073].exec("aabbabc"), 3358);
+assertToStringEquals("abc", res[1074].exec("aabbabc"), 3359);
+assertToStringEquals("abc", res[1075].exec("abcabc"), 3360);
+assertToStringEquals("ab,b", res[1076].exec("ab"), 3361);
+assertToStringEquals("ab,b", res[1077].exec("ab"), 3362);
+assertToStringEquals("ab,b", res[1078].exec("ab"), 3363);
+assertToStringEquals("ab,b", res[1079].exec("ab"), 3364);
+assertToStringEquals("a,a", res[1080].exec("ab"), 3365);
+assertToStringEquals("a,a", res[1081].exec("ab"), 3366);
+assertToStringEquals("cde", res[1082].exec("cde"), 3367);
+assertNull(res[1083].exec("*** Failers", 3368));
+assertNull(res[1083].exec("b", 3369));
+assertToStringEquals("abbbcd,c", res[1085].exec("abbbcd"), 3370);
+assertToStringEquals("abcd,a", res[1086].exec("abcd"), 3371);
+assertToStringEquals("e", res[1087].exec("e"), 3372);
+assertToStringEquals("ef,e", res[1088].exec("ef"), 3373);
+assertToStringEquals("abcdefg", res[1089].exec("abcdefg"), 3374);
+assertToStringEquals("ab", res[1090].exec("xabyabbbz"), 3375);
+assertToStringEquals("a", res[1090].exec("xayabbbz"), 3376);
+assertToStringEquals("cde,cd", res[1091].exec("abcde"), 3377);
+assertToStringEquals("hij", res[1092].exec("hij"), 3378);
+assertToStringEquals("ef,", res[1094].exec("abcdef"), 3379);
+assertToStringEquals("bcd,b", res[1095].exec("abcd"), 3380);
+assertToStringEquals("abc,a", res[1096].exec("abc"), 3381);
+assertToStringEquals("abc,bc", res[1097].exec("abc"), 3382);
+assertToStringEquals("abcd,bc,d", res[1098].exec("abcd"), 3383);
+assertToStringEquals("abcd,bc,d", res[1099].exec("abcd"), 3384);
+assertToStringEquals("abcd,b,cd", res[1100].exec("abcd"), 3385);
+assertToStringEquals("adcdcde", res[1101].exec("adcdcde"), 3386);
+assertNull(res[1102].exec("*** Failers", 3387));
+assertNull(res[1102].exec("abcde", 3388));
+assertNull(res[1102].exec("adcdcde", 3389));
+assertToStringEquals("abc,ab", res[1103].exec("abc"), 3390);
+assertToStringEquals("abcd,abc,a,b,d", res[1104].exec("abcd"), 3391);
+assertToStringEquals("alpha", res[1105].exec("alpha"), 3392);
+assertToStringEquals("bh,", res[1106].exec("abh"), 3393);
+assertToStringEquals("effgz,effgz,", res[1107].exec("effgz"), 3394);
+assertToStringEquals("ij,ij,j", res[1107].exec("ij"), 3395);
+assertToStringEquals("effgz,effgz,", res[1107].exec("reffgz"), 3396);
+assertNull(res[1107].exec("*** Failers", 3397));
+assertNull(res[1107].exec("effg", 3398));
+assertNull(res[1107].exec("bcdd", 3399));
+assertToStringEquals("a,a,a,a,a,a,a,a,a,a,a", res[1108].exec("a"), 3400);
+assertToStringEquals("a,a,a,a,a,a,a,a,a,a", res[1109].exec("a"), 3401);
+assertNull(res[1110].exec("*** Failers", 3402));
+assertNull(res[1110].exec("aa", 3403));
+assertNull(res[1110].exec("uh-uh", 3404));
+assertToStringEquals("multiple words", res[1111].exec("multiple words, yeah"), 3405);
+assertToStringEquals("abcde,ab,de", res[1112].exec("abcde"), 3406);
+assertToStringEquals("(a, b),a,b", res[1113].exec("(a, b)"), 3407);
+assertToStringEquals("abcd", res[1115].exec("abcd"), 3408);
+assertToStringEquals("abcd,bc", res[1116].exec("abcd"), 3409);
+assertToStringEquals("ac", res[1117].exec("ac"), 3410);
+assertToStringEquals("ABC", res[1118].exec("ABC"), 3411);
+assertToStringEquals("ABC", res[1118].exec("XABCY"), 3412);
+assertToStringEquals("ABC", res[1118].exec("ABABC"), 3413);
+assertNull(res[1118].exec("*** Failers", 3414));
+assertNull(res[1118].exec("aaxabxbaxbbx", 3415));
+assertNull(res[1118].exec("XBC", 3416));
+assertNull(res[1118].exec("AXC", 3417));
+assertNull(res[1118].exec("ABX", 3418));
+assertToStringEquals("ABC", res[1119].exec("ABC"), 3419);
+assertToStringEquals("ABC", res[1120].exec("ABC"), 3420);
+assertToStringEquals("ABBC", res[1120].exec("ABBC"), 3421);
+assertToStringEquals("ABBBBC", res[1121].exec("ABBBBC"), 3422);
+assertToStringEquals("ABBBBC", res[1122].exec("ABBBBC"), 3423);
+assertToStringEquals("ABBC", res[1123].exec("ABBC"), 3424);
+assertNull(res[1124].exec("*** Failers", 3425));
+assertNull(res[1124].exec("ABC", 3426));
+assertNull(res[1124].exec("ABQ", 3427));
+assertToStringEquals("ABBBBC", res[1126].exec("ABBBBC"), 3428);
+assertToStringEquals("ABBBBC", res[1127].exec("ABBBBC"), 3429);
+assertToStringEquals("ABBBBC", res[1128].exec("ABBBBC"), 3430);
+assertToStringEquals("ABBBBC", res[1129].exec("ABBBBC"), 3431);
+assertNull(res[1130].exec("*** Failers", 3432));
+assertNull(res[1130].exec("ABQ", 3433));
+assertNull(res[1130].exec("ABBBBC", 3434));
+assertToStringEquals("ABBC", res[1131].exec("ABBC"), 3435);
+assertToStringEquals("ABC", res[1131].exec("ABC"), 3436);
+assertToStringEquals("ABC", res[1132].exec("ABC"), 3437);
+assertToStringEquals("ABC", res[1134].exec("ABC"), 3438);
+assertToStringEquals("ABC", res[1135].exec("ABC"), 3439);
+assertToStringEquals("ABC", res[1136].exec("ABC"), 3440);
+assertNull(res[1136].exec("*** Failers", 3441));
+assertNull(res[1136].exec("ABBBBC", 3442));
+assertNull(res[1136].exec("ABCC", 3443));
+assertToStringEquals("ABC", res[1137].exec("ABCC"), 3444);
+assertToStringEquals("ABC", res[1139].exec("AABC"), 3445);
+assertToStringEquals("", res[1140].exec("ABC"), 3446);
+assertToStringEquals("", res[1141].exec("ABC"), 3447);
+assertToStringEquals("ABC", res[1142].exec("ABC"), 3448);
+assertToStringEquals("AXC", res[1142].exec("AXC"), 3449);
+assertToStringEquals("AXYZC", res[1143].exec("AXYZC"), 3450);
+assertNull(res[1144].exec("*** Failers", 3451));
+assertToStringEquals("AABC", res[1144].exec("AABC"), 3452);
+assertNull(res[1144].exec("AXYZD", 3453));
+assertToStringEquals("ABD", res[1145].exec("ABD"), 3454);
+assertToStringEquals("ACE", res[1146].exec("ACE"), 3455);
+assertNull(res[1146].exec("*** Failers", 3456));
+assertNull(res[1146].exec("ABC", 3457));
+assertNull(res[1146].exec("ABD", 3458));
+assertToStringEquals("AC", res[1147].exec("AAC"), 3459);
+assertToStringEquals("A-", res[1148].exec("A-"), 3460);
+assertToStringEquals("A-", res[1149].exec("A-"), 3461);
+assertToStringEquals("A]", res[1150].exec("A]"), 3462);
+assertNull(res[1151].exec("A]B", 3463));
+assertToStringEquals("AED", res[1152].exec("AED"), 3464);
+assertToStringEquals("ADC", res[1153].exec("ADC"), 3465);
+assertNull(res[1153].exec("*** Failers", 3466));
+assertNull(res[1153].exec("ABD", 3467));
+assertNull(res[1153].exec("A-C", 3468));
+assertNull(res[1154].exec("ADC", 3469));
+assertToStringEquals("AB", res[1155].exec("ABC"), 3470);
+assertToStringEquals("AB", res[1155].exec("ABCD"), 3471);
+assertToStringEquals("EF,", res[1156].exec("DEF"), 3472);
+assertNull(res[1157].exec("*** Failers", 3473));
+assertNull(res[1157].exec("A]C", 3474));
+assertNull(res[1157].exec("B", 3475));
+assertToStringEquals("A(B", res[1158].exec("A(B"), 3476);
+assertNull(res[1158].exec("AB", 3477));
+assertNull(res[1158].exec("A((B", 3478));
+assertNull(res[1159].exec("AB", 3479));
+assertToStringEquals("A,A,A", res[1160].exec("ABC"), 3480);
+assertToStringEquals("ABC,A,C", res[1161].exec("ABC"), 3481);
+assertToStringEquals("ABC", res[1162].exec("AABBABC"), 3482);
+assertToStringEquals("ABC", res[1163].exec("AABBABC"), 3483);
+assertToStringEquals("ABC", res[1164].exec("ABCABC"), 3484);
+assertToStringEquals("ABC", res[1165].exec("ABCABC"), 3485);
+assertToStringEquals("ABC", res[1166].exec("ABCABC"), 3486);
+assertToStringEquals("AB,B", res[1167].exec("AB"), 3487);
+assertToStringEquals("AB,B", res[1168].exec("AB"), 3488);
+assertToStringEquals("AB,B", res[1169].exec("AB"), 3489);
+assertToStringEquals("AB,B", res[1170].exec("AB"), 3490);
+assertToStringEquals("A,A", res[1171].exec("AB"), 3491);
+assertToStringEquals("A,A", res[1172].exec("AB"), 3492);
+assertToStringEquals(",", res[1173].exec("AB"), 3493);
+assertToStringEquals("CDE", res[1174].exec("CDE"), 3494);
+assertToStringEquals("ABBBCD,C", res[1177].exec("ABBBCD"), 3495);
+assertToStringEquals("ABCD,A", res[1178].exec("ABCD"), 3496);
+assertToStringEquals("E", res[1179].exec("E"), 3497);
+assertToStringEquals("EF,E", res[1180].exec("EF"), 3498);
+assertToStringEquals("ABCDEFG", res[1181].exec("ABCDEFG"), 3499);
+assertToStringEquals("AB", res[1182].exec("XABYABBBZ"), 3500);
+assertToStringEquals("A", res[1182].exec("XAYABBBZ"), 3501);
+assertToStringEquals("CDE,CD", res[1183].exec("ABCDE"), 3502);
+assertToStringEquals("HIJ", res[1184].exec("HIJ"), 3503);
+assertNull(res[1185].exec("ABCDE", 3504));
+assertToStringEquals("EF,", res[1186].exec("ABCDEF"), 3505);
+assertToStringEquals("BCD,B", res[1187].exec("ABCD"), 3506);
+assertToStringEquals("ABC,A", res[1188].exec("ABC"), 3507);
+assertToStringEquals("ABC,BC", res[1189].exec("ABC"), 3508);
+assertToStringEquals("ABCD,BC,D", res[1190].exec("ABCD"), 3509);
+assertToStringEquals("ABCD,BC,D", res[1191].exec("ABCD"), 3510);
+assertToStringEquals("ABCD,B,CD", res[1192].exec("ABCD"), 3511);
+assertToStringEquals("ADCDCDE", res[1193].exec("ADCDCDE"), 3512);
+assertToStringEquals("ABC,AB", res[1195].exec("ABC"), 3513);
+assertToStringEquals("ABCD,ABC,A,B,D", res[1196].exec("ABCD"), 3514);
+assertToStringEquals("ALPHA", res[1197].exec("ALPHA"), 3515);
+assertToStringEquals("BH,", res[1198].exec("ABH"), 3516);
+assertToStringEquals("EFFGZ,EFFGZ,", res[1199].exec("EFFGZ"), 3517);
+assertToStringEquals("IJ,IJ,J", res[1199].exec("IJ"), 3518);
+assertToStringEquals("EFFGZ,EFFGZ,", res[1199].exec("REFFGZ"), 3519);
+assertNull(res[1199].exec("*** Failers", 3520));
+assertNull(res[1199].exec("ADCDCDE", 3521));
+assertNull(res[1199].exec("EFFG", 3522));
+assertNull(res[1199].exec("BCDD", 3523));
+assertToStringEquals("A,A,A,A,A,A,A,A,A,A,A", res[1200].exec("A"), 3524);
+assertToStringEquals("A,A,A,A,A,A,A,A,A,A", res[1201].exec("A"), 3525);
+assertToStringEquals("A,A", res[1202].exec("A"), 3526);
+assertToStringEquals("C,C", res[1203].exec("C"), 3527);
+assertNull(res[1204].exec("*** Failers", 3528));
+assertNull(res[1204].exec("AA", 3529));
+assertNull(res[1204].exec("UH-UH", 3530));
+assertToStringEquals("MULTIPLE WORDS", res[1205].exec("MULTIPLE WORDS, YEAH"), 3531);
+assertToStringEquals("ABCDE,AB,DE", res[1206].exec("ABCDE"), 3532);
+assertToStringEquals("(A, B),A,B", res[1207].exec("(A, B)"), 3533);
+assertToStringEquals("ABCD", res[1209].exec("ABCD"), 3534);
+assertToStringEquals("ABCD,BC", res[1210].exec("ABCD"), 3535);
+assertToStringEquals("AC", res[1211].exec("AC"), 3536);
+assertToStringEquals("ad", res[1212].exec("abad"), 3537);
+assertToStringEquals("ad", res[1213].exec("abad"), 3538);
+assertToStringEquals("ad", res[1214].exec("abad"), 3539);
+assertToStringEquals("ace,e", res[1215].exec("ace"), 3540);
+assertToStringEquals("ace,e", res[1216].exec("ace"), 3541);
+assertToStringEquals("ace,e", res[1217].exec("ace"), 3542);
+assertToStringEquals("acd,d", res[1217].exec("acdbcdbe"), 3543);
+assertToStringEquals("acdbcdbe,e", res[1218].exec("acdbcdbe"), 3544);
+assertToStringEquals("acdb,b", res[1219].exec("acdbcdbe"), 3545);
+assertToStringEquals("acdbcdb,b", res[1220].exec("acdbcdbe"), 3546);
+assertToStringEquals("acdbcd,d", res[1221].exec("acdbcdbe"), 3547);
+assertToStringEquals("foobar,bar,,bar", res[1222].exec("foobar"), 3548);
+assertToStringEquals("acdbcdbe,e", res[1223].exec("acdbcdbe"), 3549);
+assertToStringEquals("acdbcdbe,e", res[1224].exec("acdbcdbe"), 3550);
+assertToStringEquals("acdbcdbe,e", res[1225].exec("acdbcdbe"), 3551);
+assertToStringEquals("acdbcdb,b", res[1226].exec("acdbcdbe"), 3552);
+assertToStringEquals("acdbcdbe,e", res[1227].exec("acdbcdbe"), 3553);
+assertToStringEquals("acdbcdb,b", res[1228].exec("acdbcdbe"), 3554);
+assertToStringEquals("ace,c,e", res[1229].exec("ace"), 3555);
+assertToStringEquals("AB,A", res[1230].exec("AB"), 3556);
+assertToStringEquals(".,.,", res[1231].exec("."), 3557);
+assertToStringEquals("<&", res[1232].exec("<&OUT"), 3558);
+assertToStringEquals("foobar,,,,b,a,r", res[1233].exec("foobar"), 3559);
+assertToStringEquals(",,,,,,", res[1233].exec("ab"), 3560);
+assertToStringEquals(",,,,,,", res[1233].exec("*** Failers"), 3561);
+assertToStringEquals(",,,,,,", res[1233].exec("cb"), 3562);
+assertToStringEquals(",,,,,,", res[1233].exec("b"), 3563);
+assertToStringEquals(",,,,,,", res[1233].exec("ab"), 3564);
+assertToStringEquals(",,,,,,", res[1233].exec("b"), 3565);
+assertToStringEquals(",,,,,,", res[1233].exec("b"), 3566);
+assertToStringEquals("aba", res[1234].exec("aba"), 3567);
+assertToStringEquals("a", res[1235].exec("aba"), 3568);
+assertToStringEquals(",", res[1236].exec("abc"), 3569);
+assertToStringEquals("aax,a", res[1237].exec("aax"), 3570);
+assertToStringEquals("aax,a,a", res[1238].exec("aax"), 3571);
+assertToStringEquals("aax,a,a", res[1239].exec("aax"), 3572);
+assertToStringEquals("ab,", res[1240].exec("cab"), 3573);
+assertToStringEquals("ab,", res[1241].exec("cab"), 3574);
+assertToStringEquals("ab,", res[1241].exec("ab"), 3575);
+assertToStringEquals("ab,", res[1241].exec("ab"), 3576);
+assertNull(res[1241].exec("Ab", 3577));
+assertNull(res[1241].exec("Ab", 3578));
+assertNull(res[1241].exec("*** Failers", 3579));
+assertNull(res[1241].exec("cb", 3580));
+assertNull(res[1241].exec("aB", 3581));
+assertToStringEquals("ab,", res[1241].exec("ab"), 3582);
+assertToStringEquals("ab,", res[1241].exec("ab"), 3583);
+assertNull(res[1241].exec("Ab", 3584));
+assertNull(res[1241].exec("Ab", 3585));
+assertNull(res[1241].exec("*** Failers", 3586));
+assertNull(res[1241].exec("aB", 3587));
+assertNull(res[1241].exec("aB", 3588));
+assertToStringEquals("ab,", res[1241].exec("ab"), 3589);
+assertToStringEquals("ab,", res[1241].exec("ab"), 3590);
+assertNull(res[1241].exec("aB", 3591));
+assertNull(res[1241].exec("aB", 3592));
+assertNull(res[1241].exec("*** Failers", 3593));
+assertNull(res[1241].exec("aB", 3594));
+assertNull(res[1241].exec("Ab", 3595));
+assertNull(res[1241].exec("aB", 3596));
+assertNull(res[1241].exec("aB", 3597));
+assertNull(res[1241].exec("*** Failers", 3598));
+assertNull(res[1241].exec("Ab", 3599));
+assertNull(res[1241].exec("AB", 3600));
+assertToStringEquals("ab,", res[1241].exec("ab"), 3601);
+assertToStringEquals("ab,", res[1241].exec("ab"), 3602);
+assertNull(res[1241].exec("aB", 3603));
+assertNull(res[1241].exec("aB", 3604));
+assertNull(res[1241].exec("*** Failers", 3605));
+assertNull(res[1241].exec("AB", 3606));
+assertNull(res[1241].exec("Ab", 3607));
+assertNull(res[1241].exec("aB", 3608));
+assertNull(res[1241].exec("aB", 3609));
+assertNull(res[1241].exec("*** Failers", 3610));
+assertNull(res[1241].exec("Ab", 3611));
+assertNull(res[1241].exec("AB", 3612));
+assertNull(res[1241].exec("*** Failers", 3613));
+assertNull(res[1241].exec("AB", 3614));
+assertNull(res[1241].exec("a\nB", 3615));
+assertNull(res[1241].exec("a\nB", 3616));
+assertToStringEquals("cabbbb", res[1242].exec("cabbbb"), 3617);
+assertToStringEquals("caaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", res[1243].exec("caaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), 3618);
+assertToStringEquals("foobar1234baz", res[1244].exec("foobar1234baz"), 3619);
+assertToStringEquals("x~~,~~", res[1245].exec("x~~"), 3620);
+assertToStringEquals("aaac", res[1246].exec("aaac"), 3621);
+assertToStringEquals("aaac", res[1247].exec("aaac"), 3622);
+assertNull(res[1247].exec("*** Failers", 3623));
+assertNull(res[1247].exec("B\nB", 3624));
+assertNull(res[1247].exec("dbcb", 3625));
+assertNull(res[1247].exec("dbaacb", 3626));
+assertNull(res[1247].exec("dbaacb", 3627));
+assertNull(res[1247].exec("cdaccb", 3628));
+assertNull(res[1248].exec("*** Failers", 3629));
+assertNull(res[1248].exec("dbcb", 3630));
+assertNull(res[1248].exec("a--", 3631));
+assertNull(res[1248].exec("a\nb\nc\n", 3632));
+assertNull(res[1248].exec("a\nb\nc\n", 3633));
+assertNull(res[1248].exec("a\nb\n", 3634));
+assertNull(res[1248].exec("a\nb\n", 3635));
+assertNull(res[1248].exec("a\nb\n", 3636));
+assertNull(res[1248].exec("a\nb\n", 3637));
+assertNull(res[1248].exec("a\nb\nc\n", 3638));
+assertNull(res[1248].exec("a\nb\nc\n", 3639));
+assertNull(res[1248].exec("a\nb\nc\n", 3640));
+assertNull(res[1248].exec("a\nb\nc\n", 3641));
+assertNull(res[1250].exec("*** Failers", 3642));
+assertNull(res[1250].exec("a\nb\nc\n", 3643));
+assertNull(res[1250].exec("a\nb\nc\n", 3644));
+assertNull(res[1250].exec("a\nb\nc\n", 3645));
+assertNull(res[1250].exec("a", 3646));
+assertNull(res[1250].exec("*** Failers", 3647));
+assertNull(res[1250].exec("a", 3648));
+assertNull(res[1250].exec("a", 3649));
+assertNull(res[1250].exec("a", 3650));
+assertToStringEquals("one:,one:", res[1251].exec("one:"), 3651);
+assertNull(res[1251].exec("a", 3652));
+assertToStringEquals("abcd,,abcd", res[1252].exec("abcd"), 3653);
+assertToStringEquals("xy:z:::abcd,xy:z:::,abcd", res[1252].exec("xy:z:::abcd"), 3654);
+assertToStringEquals("aexyc,c", res[1253].exec("aexycd"), 3655);
+assertToStringEquals("aab,aa", res[1254].exec("caab"), 3656);
+assertToStringEquals("abcd,,abcd", res[1255].exec("abcd"), 3657);
+assertToStringEquals("xy:z:::abcd,xy:z:::,abcd", res[1255].exec("xy:z:::abcd"), 3658);
+assertToStringEquals("Failers,,Failers", res[1255].exec("*** Failers"), 3659);
+assertNull(res[1255].exec("abcd:", 3660));
+assertNull(res[1255].exec("abcd:", 3661));
+assertToStringEquals("aexyc,c", res[1256].exec("aexycd"), 3662);
+assertNull(res[1257].exec("aaab", 3663));
+assertToStringEquals(":[,:[", res[1258].exec("a:[b]:"), 3664);
+assertToStringEquals("=[,=[", res[1259].exec("a=[b]="), 3665);
+assertToStringEquals(".[,.[", res[1260].exec("a.[b]."), 3666);
+assertNull(res[1260].exec("aaab", 3667));
+assertNull(res[1260].exec("aaab", 3668));
+assertNull(res[1260].exec("((abc(ade)ufh()()x", 3669));
+assertNull(res[1261].exec("*** Failers", 3670));
+assertNull(res[1261].exec("aaab", 3671));
+assertNull(res[1261].exec("a\nb\n", 3672));
+assertNull(res[1262].exec("a\nb\n", 3673));
+assertNull(res[1264].exec("a\nb", 3674));
+assertNull(res[1265].exec("a\nb", 3675));
+assertNull(res[1265].exec("*** Failers", 3676));
+assertNull(res[1265].exec("alphabetabcd", 3677));
+assertNull(res[1265].exec("endingwxyz", 3678));
+assertNull(res[1265].exec("*** Failers", 3679));
+assertNull(res[1265].exec("a rather long string that doesn't end with one of them", 3680));
+assertNull(res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark otherword", 3681));
+assertNull(res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark", 3682));
+assertNull(res[1265].exec("word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope", 3683));
+assertNull(res[1265].exec("999foo", 3684));
+assertNull(res[1265].exec("123999foo ", 3685));
+assertNull(res[1265].exec("*** Failers", 3686));
+assertNull(res[1265].exec("123abcfoo", 3687));
+assertNull(res[1265].exec("999foo", 3688));
+assertNull(res[1265].exec("123999foo ", 3689));
+assertNull(res[1265].exec("*** Failers", 3690));
+assertNull(res[1265].exec("123abcfoo", 3691));
+assertNull(res[1265].exec("123abcfoo", 3692));
+assertNull(res[1265].exec("123456foo ", 3693));
+assertNull(res[1265].exec("*** Failers", 3694));
+assertNull(res[1265].exec("123999foo  ", 3695));
+assertNull(res[1265].exec("123abcfoo   ", 3696));
+assertNull(res[1265].exec("123456foo ", 3697));
+assertNull(res[1265].exec("*** Failers", 3698));
+assertNull(res[1265].exec("123999foo  ", 3699));
+assertToStringEquals("ZA,A,", res[1266].exec("ZABCDEFG"), 3700);
+assertToStringEquals("ZA,A,", res[1267].exec("ZABCDEFG"), 3701);
+assertToStringEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3702);
+assertToStringEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3703);
+assertToStringEquals("ZA,A,,", res[1268].exec("ZABCDEFG"), 3704);
+assertToStringEquals("a", res[1269].exec("abbab"), 3705);
+assertToStringEquals("", res[1269].exec("abcde"), 3706);
+assertToStringEquals("", res[1269].exec("-things"), 3707);
+assertToStringEquals("", res[1269].exec("0digit"), 3708);
+assertToStringEquals("", res[1269].exec("*** Failers"), 3709);
+assertToStringEquals("", res[1269].exec("bcdef    "), 3710);
+assertToStringEquals("a", res[1270].exec("abcde"), 3711);
+assertToStringEquals("-", res[1270].exec("-things"), 3712);
+assertToStringEquals("0", res[1270].exec("0digit"), 3713);
+assertNull(res[1270].exec("*** Failers", 3714));
+assertNull(res[1270].exec("bcdef    ", 3715));
+assertNull(res[1271].exec("> \x09\n\x0c\x0d\x0b<", 3716));
+assertNull(res[1271].exec(" ", 3717));
+assertNull(res[1272].exec("> \x09\n\x0c\x0d\x0b<", 3718));
+assertNull(res[1272].exec(" ", 3719));
+assertToStringEquals(" \x09\n\x0c\x0d\x0b", res[1273].exec("> \x09\n\x0c\x0d\x0b<"), 3720);
+assertToStringEquals(" ", res[1273].exec(" "), 3721);
+assertToStringEquals(" \x09\n\x0c\x0d\x0b", res[1274].exec("> \x09\n\x0c\x0d\x0b<"), 3722);
+assertToStringEquals(" ", res[1274].exec(" "), 3723);
+assertNull(res[1275].exec("ab", 3724));
+assertNull(res[1278].exec("abcabcabc", 3725));
+assertNull(res[1278].exec("abc(*+|abc ", 3726));
+assertNull(res[1279].exec("abc abcabc", 3727));
+assertNull(res[1279].exec("*** Failers", 3728));
+assertNull(res[1279].exec("abcabcabc  ", 3729));
+assertNull(res[1280].exec("abc#not comment\n    literal     ", 3730));
+assertNull(res[1281].exec("abc#not comment\n    literal     ", 3731));
+assertNull(res[1282].exec("abc#not comment\n    literal     ", 3732));
+assertNull(res[1283].exec("abc#not comment\n    literal     ", 3733));
+assertNull(res[1284].exec("abc\\$xyz", 3734));
+assertNull(res[1285].exec("abc$xyz", 3735));
+assertNull(res[1286].exec("abc", 3736));
+assertNull(res[1286].exec("*** Failers", 3737));
+assertNull(res[1286].exec("xyzabc  ", 3738));
+assertNull(res[1287].exec("abc1abc2xyzabc3", 3739));
+assertToStringEquals("abc1", res[1288].exec("abc1abc2xyzabc3 "), 3740);
+assertNull(res[1288].exec("XabcdY", 3741));
+assertNull(res[1288].exec("*** Failers ", 3742));
+assertNull(res[1288].exec("Xa b c d Y ", 3743));
+assertToStringEquals("abcY", res[1288].exec("XabcY"), 3744);
+assertNull(res[1288].exec("AxyzB ", 3745));
+assertNull(res[1288].exec("XabCY", 3746));
+assertNull(res[1288].exec("*** Failers", 3747));
+assertToStringEquals("abcY", res[1288].exec("XabcY  "), 3748);
+assertNull(res[1288].exec("abCE", 3749));
+assertNull(res[1288].exec("DE", 3750));
+assertNull(res[1288].exec("*** Failers", 3751));
+assertToStringEquals("abcE", res[1288].exec("abcE"), 3752);
+assertNull(res[1288].exec("abCe  ", 3753));
+assertNull(res[1288].exec("dE", 3754));
+assertNull(res[1288].exec("De    ", 3755));
+assertNull(res[1289].exec("z", 3756));
+assertNull(res[1289].exec("a", 3757));
+assertNull(res[1289].exec("-", 3758));
+assertNull(res[1289].exec("d", 3759));
+assertNull(res[1289].exec("] ", 3760));
+assertNull(res[1289].exec("*** Failers", 3761));
+assertNull(res[1289].exec("b     ", 3762));
+assertToStringEquals("z", res[1290].exec("z"), 3763);
+assertToStringEquals("C", res[1290].exec("C "), 3764);
+assertToStringEquals("M", res[1291].exec("M "), 3765);
+assertNull(res[1292].exec("", 3766));
+assertNull(res[1292].exec("REGular", 3767));
+assertNull(res[1292].exec("regulaer", 3768));
+assertNull(res[1292].exec("Regex  ", 3769));
+assertNull(res[1292].exec("regul\ufffdr ", 3770));
+assertNull(res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3771));
+assertNull(res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3772));
+assertNull(res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3773));
+assertNull(res[1292].exec("\ufffd\ufffd\ufffd\ufffd\ufffd", 3774));
+assertNull(res[1292].exec("\x84XAZXB", 3775));
+assertNull(res[1292].exec("123a", 3776));
+assertNull(res[1292].exec("ac", 3777));
+assertToStringEquals("b,", res[1292].exec("bbbbc"), 3778);
+assertToStringEquals("ab,a", res[1292].exec("abc"), 3779);
+assertNull(res[1292].exec("*** Failers", 3780));
+assertToStringEquals("b,", res[1292].exec("bca"), 3781);
+assertNull(res[1292].exec("", 3782));
+assertToStringEquals("ab,a", res[1292].exec("abc"), 3783);
+assertNull(res[1292].exec("*** Failers", 3784));
+assertToStringEquals("b,", res[1292].exec("bca"), 3785);
+assertToStringEquals("ab,a", res[1292].exec("abc"), 3786);
+assertNull(res[1292].exec("*** Failers", 3787));
+assertNull(res[1292].exec("def  ", 3788));
+assertNull(res[1292].exec("", 3789));
+assertToStringEquals("ab,a", res[1292].exec("abc"), 3790);
+assertNull(res[1292].exec("*** Failers", 3791));
+assertNull(res[1292].exec("def  ", 3792));
+assertNull(res[1292].exec("", 3793));
+assertToStringEquals("line\nbreak", res[1293].exec("this is a line\nbreak"), 3794);
+assertToStringEquals("line\nbreak", res[1293].exec("line one\nthis is a line\nbreak in the second line "), 3795);
+assertToStringEquals("line\nbreak", res[1294].exec("this is a line\nbreak"), 3796);
+assertNull(res[1294].exec("** Failers ", 3797));
+assertToStringEquals("line\nbreak", res[1294].exec("line one\nthis is a line\nbreak in the second line "), 3798);
+assertToStringEquals("line\nbreak", res[1295].exec("this is a line\nbreak"), 3799);
+assertNull(res[1295].exec("** Failers ", 3800));
+assertToStringEquals("line\nbreak", res[1295].exec("line one\nthis is a line\nbreak in the second line "), 3801);
+assertNull(res[1296].exec("123P", 3802));
+assertNull(res[1296].exec("a4PR", 3803));
+assertNull(res[1297].exec("123P", 3804));
+assertNull(res[1297].exec("4PR", 3805));
+assertToStringEquals("", res[1298].exec("a\nb\nc\n"), 3806);
+assertToStringEquals("", res[1298].exec(" "), 3807);
+assertToStringEquals("", res[1298].exec("A\nC\nC\n "), 3808);
+assertToStringEquals("", res[1298].exec("AB"), 3809);
+assertToStringEquals("", res[1298].exec("aB  "), 3810);
+assertToStringEquals("", res[1298].exec("AB"), 3811);
+assertToStringEquals("", res[1298].exec("aB  "), 3812);
+assertToStringEquals("", res[1298].exec("AB"), 3813);
+assertToStringEquals("", res[1298].exec("aB  "), 3814);
+assertToStringEquals("", res[1298].exec("AB"), 3815);
+assertToStringEquals("", res[1298].exec("aB  "), 3816);
+assertToStringEquals("Content-Type:xxxxxyyy ", res[1299].exec("Content-Type:xxxxxyyy "), 3817);
+assertToStringEquals("Content-Type:xxxxxyyyz", res[1300].exec("Content-Type:xxxxxyyyz"), 3818);
+assertToStringEquals("Content-Type:xxxyyy ", res[1301].exec("Content-Type:xxxyyy "), 3819);
+assertToStringEquals("Content-Type:xxxyyyz", res[1302].exec("Content-Type:xxxyyyz"), 3820);
+assertToStringEquals("abc", res[1303].exec("xyz\nabc"), 3821);
+assertToStringEquals("abc", res[1303].exec("xyz\nabc<lf>"), 3822);
+assertToStringEquals("abc", res[1303].exec("xyz\x0d\nabc<lf>"), 3823);
+assertToStringEquals("abc", res[1303].exec("xyz\x0dabc<cr>"), 3824);
+assertToStringEquals("abc", res[1303].exec("xyz\x0d\nabc<crlf>"), 3825);
+assertNull(res[1303].exec("** Failers ", 3826));
+assertToStringEquals("abc", res[1303].exec("xyz\nabc<cr>"), 3827);
+assertToStringEquals("abc", res[1303].exec("xyz\x0d\nabc<cr>"), 3828);
+assertToStringEquals("abc", res[1303].exec("xyz\nabc<crlf>"), 3829);
+assertToStringEquals("abc", res[1303].exec("xyz\x0dabc<crlf>"), 3830);
+assertToStringEquals("abc", res[1303].exec("xyz\x0dabc<lf>"), 3831);
+assertToStringEquals("abc", res[1304].exec("xyzabc"), 3832);
+assertToStringEquals("abc", res[1304].exec("xyzabc\n "), 3833);
+assertToStringEquals("abc", res[1304].exec("xyzabc\npqr "), 3834);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d<cr> "), 3835);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0dpqr<cr> "), 3836);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d\n<crlf> "), 3837);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d\npqr<crlf> "), 3838);
+assertNull(res[1304].exec("** Failers", 3839));
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d "), 3840);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0dpqr "), 3841);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d\n "), 3842);
+assertToStringEquals("abc", res[1304].exec("xyzabc\x0d\npqr "), 3843);
+assertToStringEquals("abc", res[1305].exec("xyz\x0dabcdef"), 3844);
+assertToStringEquals("abc", res[1305].exec("xyz\nabcdef<lf>"), 3845);
+assertNull(res[1305].exec("** Failers  ", 3846));
+assertToStringEquals("abc", res[1305].exec("xyz\nabcdef"), 3847);
+assertNull(res[1305].exec("   ", 3848));
+assertToStringEquals("abc", res[1306].exec("xyz\nabcdef"), 3849);
+assertToStringEquals("abc", res[1306].exec("xyz\x0dabcdef<cr>"), 3850);
+assertNull(res[1306].exec("** Failers  ", 3851));
+assertToStringEquals("abc", res[1306].exec("xyz\x0dabcdef"), 3852);
+assertNull(res[1306].exec("   ", 3853));
+assertToStringEquals("abc", res[1307].exec("xyz\x0d\nabcdef"), 3854);
+assertToStringEquals("abc", res[1307].exec("xyz\x0dabcdef<cr>"), 3855);
+assertNull(res[1307].exec("** Failers  ", 3856));
+assertToStringEquals("abc", res[1307].exec("xyz\x0dabcdef"), 3857);
+assertToStringEquals("abc", res[1308].exec("abc\ndef"), 3858);
+assertToStringEquals("abc", res[1308].exec("abc\x0ddef"), 3859);
+assertToStringEquals("abc", res[1308].exec("abc\x0d\ndef"), 3860);
+assertToStringEquals("<cr>abc", res[1308].exec("<cr>abc\ndef"), 3861);
+assertToStringEquals("<cr>abc", res[1308].exec("<cr>abc\x0ddef"), 3862);
+assertToStringEquals("<cr>abc", res[1308].exec("<cr>abc\x0d\ndef"), 3863);
+assertToStringEquals("<crlf>abc", res[1308].exec("<crlf>abc\ndef"), 3864);
+assertToStringEquals("<crlf>abc", res[1308].exec("<crlf>abc\x0ddef"), 3865);
+assertToStringEquals("<crlf>abc", res[1308].exec("<crlf>abc\x0d\ndef"), 3866);
+assertNull(res[1309].exec("abc\ndef", 3867));
+assertNull(res[1309].exec("abc\x0ddef", 3868));
+assertNull(res[1309].exec("abc\x0d\ndef", 3869));
+assertToStringEquals("abc=xyz\\,", res[1310].exec("abc=xyz\\\npqr"), 3870);
+assertToStringEquals("aaaa,a,", res[1311].exec("aaaa"), 3871);
+assertToStringEquals("aaaa", res[1312].exec("aaaa"), 3872);
+assertToStringEquals("aaaa,a,", res[1313].exec("aaaa"), 3873);
+assertToStringEquals("aaaa", res[1314].exec("aaaa"), 3874);
+assertNull(res[1317].exec("a\x0db", 3875));
+assertNull(res[1317].exec("a\nb<cr> ", 3876));
+assertNull(res[1317].exec("** Failers", 3877));
+assertNull(res[1317].exec("a\nb", 3878));
+assertNull(res[1317].exec("a\nb<any>", 3879));
+assertNull(res[1317].exec("a\x0db<cr>   ", 3880));
+assertNull(res[1317].exec("a\x0db<any>   ", 3881));
+assertToStringEquals("abc1", res[1318].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 \x85abc7 JUNK"), 3882);
+assertToStringEquals("abc1", res[1319].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6\x85 abc9"), 3883);
+assertNull(res[1320].exec("a\nb", 3884));
+assertNull(res[1320].exec("a\x0db", 3885));
+assertNull(res[1320].exec("a\x0d\nb", 3886));
+assertNull(res[1320].exec("a\x0bb", 3887));
+assertNull(res[1320].exec("a\x0cb", 3888));
+assertNull(res[1320].exec("a\x85b   ", 3889));
+assertNull(res[1320].exec("** Failers", 3890));
+assertNull(res[1320].exec("a\n\x0db    ", 3891));
+assertToStringEquals("ab", res[1321].exec("ab"), 3892);
+assertNull(res[1321].exec("a\nb", 3893));
+assertNull(res[1321].exec("a\x0db", 3894));
+assertNull(res[1321].exec("a\x0d\nb", 3895));
+assertNull(res[1321].exec("a\x0bb", 3896));
+assertNull(res[1321].exec("a\x0cb", 3897));
+assertNull(res[1321].exec("a\x85b   ", 3898));
+assertNull(res[1321].exec("a\n\x0db    ", 3899));
+assertNull(res[1321].exec("a\n\x0d\x85\x0cb ", 3900));
+assertNull(res[1322].exec("a\nb", 3901));
+assertNull(res[1322].exec("a\x0db", 3902));
+assertNull(res[1322].exec("a\x0d\nb", 3903));
+assertNull(res[1322].exec("a\x0bb", 3904));
+assertNull(res[1322].exec("a\x0cb", 3905));
+assertNull(res[1322].exec("a\x85b   ", 3906));
+assertNull(res[1322].exec("a\n\x0db    ", 3907));
+assertNull(res[1322].exec("a\n\x0d\x85\x0cb ", 3908));
+assertNull(res[1322].exec("** Failers", 3909));
+assertNull(res[1322].exec("ab  ", 3910));
+assertNull(res[1323].exec("a\nb", 3911));
+assertNull(res[1323].exec("a\n\x0db", 3912));
+assertNull(res[1323].exec("a\n\x0d\x85b", 3913));
+assertNull(res[1323].exec("a\x0d\n\x0d\nb ", 3914));
+assertNull(res[1323].exec("a\x0d\n\x0d\n\x0d\nb ", 3915));
+assertNull(res[1323].exec("a\n\x0d\n\x0db", 3916));
+assertNull(res[1323].exec("a\n\n\x0d\nb ", 3917));
+assertNull(res[1323].exec("** Failers", 3918));
+assertNull(res[1323].exec("a\n\n\n\x0db", 3919));
+assertNull(res[1323].exec("a\x0d", 3920));
+assertToStringEquals("aRb", res[1324].exec("aRb"), 3921);
+assertNull(res[1324].exec("** Failers", 3922));
+assertNull(res[1324].exec("a\nb  ", 3923));
+assertToStringEquals("afoo", res[1325].exec("afoo"), 3924);
+assertNull(res[1325].exec("** Failers ", 3925));
+assertNull(res[1325].exec("\x0d\nfoo ", 3926));
+assertNull(res[1325].exec("\nfoo ", 3927));
+assertToStringEquals("afoo", res[1326].exec("afoo"), 3928);
+assertNull(res[1326].exec("\nfoo ", 3929));
+assertNull(res[1326].exec("** Failers ", 3930));
+assertNull(res[1326].exec("\x0d\nfoo ", 3931));
+assertToStringEquals("afoo", res[1327].exec("afoo"), 3932);
+assertNull(res[1327].exec("** Failers ", 3933));
+assertNull(res[1327].exec("\nfoo ", 3934));
+assertNull(res[1327].exec("\x0d\nfoo ", 3935));
+assertToStringEquals("afoo", res[1328].exec("afoo"), 3936);
+assertNull(res[1328].exec("\x0d\nfoo ", 3937));
+assertNull(res[1328].exec("\nfoo ", 3938));
+assertToStringEquals("", res[1329].exec("abc\x0d\x0dxyz"), 3939);
+assertToStringEquals("", res[1329].exec("abc\n\x0dxyz  "), 3940);
+assertNull(res[1329].exec("** Failers ", 3941));
+assertToStringEquals("", res[1329].exec("abc\x0d\nxyz"), 3942);
+assertToStringEquals("X", res[1330].exec("XABC"), 3943);
+assertNull(res[1330].exec("** Failers ", 3944));
+assertToStringEquals("X", res[1330].exec("XABCB"), 3945);
+assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3946));
+assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3947));
+assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3948));
 assertThrows("var re = /(?|(abc)|(xyz))/;", 3949);
 assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;", 3950);
-assertEquals(null, res[1330].exec("xabcx", 3951));
-assertEquals(null, res[1330].exec("xxyzx ", 3952));
+assertNull(res[1330].exec("xabcx", 3951));
+assertNull(res[1330].exec("xxyzx ", 3952));
 assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;", 3953);
-assertEquals(null, res[1330].exec("xabcpqrx", 3954));
-assertEquals(null, res[1330].exec("xxyzx ", 3955));
-assertEquals(null, res[1330].exec("abcabc", 3956));
-assertEquals(null, res[1330].exec("xyzabc ", 3957));
-assertEquals(null, res[1330].exec("** Failers ", 3958));
-assertEquals(null, res[1330].exec("xyzxyz ", 3959));
-assertEquals(null, res[1331].exec("X X\n", 3960));
-assertEquals(null, res[1331].exec("X\x09X\x0b", 3961));
-assertEquals(null, res[1331].exec("** Failers", 3962));
-assertEquals(null, res[1331].exec("\xa0 X\n   ", 3963));
-assertEquals(null, res[1332].exec("\x09 \xa0X\n\x0b\x0c\x0d\n", 3964));
-assertEquals(null, res[1332].exec("\x09 \xa0\n\x0b\x0c\x0d\n", 3965));
-assertEquals(null, res[1332].exec("\x09 \xa0\n\x0b\x0c", 3966));
-assertEquals(null, res[1332].exec("** Failers ", 3967));
-assertEquals(null, res[1332].exec("\x09 \xa0\n\x0b", 3968));
-assertEquals(null, res[1332].exec(" ", 3969));
-assertEquals(null, res[1333].exec("XY  ABCDE", 3970));
-assertEquals(null, res[1333].exec("XY  PQR ST ", 3971));
-assertEquals(null, res[1334].exec("XY  AB    PQRS", 3972));
-assertEquals(null, res[1335].exec(">XNNNYZ", 3973));
-assertEquals(null, res[1335].exec(">  X NYQZ", 3974));
-assertEquals(null, res[1335].exec("** Failers", 3975));
-assertEquals(null, res[1335].exec(">XYZ   ", 3976));
-assertEquals(null, res[1335].exec(">  X NY Z", 3977));
-assertEquals(null, res[1336].exec(">XY\nZ\nA\x0bNN\x0c", 3978));
-assertEquals(null, res[1336].exec(">\n\x0dX\nY\n\x0bZZZ\nAAA\x0bNNN\x0c", 3979));
-assertEquals(null, res[1337].exec("\x0d\nA", 3980));
-assertEquals("\nA", res[1338].exec("\x0d\nA "), 3981);
-assertEquals("\nA", res[1339].exec("\x0d\nA "), 3982);
-assertEquals("\nA,\n", res[1340].exec("\x0d\nA "), 3983);
-assertEquals(null, res[1341].exec("a\x0db", 3984));
-assertEquals(null, res[1341].exec("a\nb", 3985));
-assertEquals(null, res[1341].exec("a\x0d\nb", 3986));
-assertEquals(null, res[1341].exec("** Failers", 3987));
-assertEquals(null, res[1341].exec("a\x85b", 3988));
-assertEquals(null, res[1341].exec("a\x0bb     ", 3989));
-assertEquals(null, res[1342].exec("a\x0db", 3990));
-assertEquals(null, res[1342].exec("a\nb", 3991));
-assertEquals(null, res[1342].exec("a\x0d\nb", 3992));
-assertEquals(null, res[1342].exec("a\x85b", 3993));
-assertEquals(null, res[1342].exec("a\x0bb     ", 3994));
-assertEquals(null, res[1342].exec("** Failers ", 3995));
-assertEquals(null, res[1342].exec("a\x85b<bsr_anycrlf>", 3996));
-assertEquals(null, res[1342].exec("a\x0bb<bsr_anycrlf>", 3997));
-assertEquals(null, res[1343].exec("a\x0db", 3998));
-assertEquals(null, res[1343].exec("a\nb", 3999));
-assertEquals(null, res[1343].exec("a\x0d\nb", 4000));
-assertEquals(null, res[1343].exec("** Failers", 4001));
-assertEquals(null, res[1343].exec("a\x85b", 4002));
-assertEquals(null, res[1343].exec("a\x0bb     ", 4003));
-assertEquals(null, res[1344].exec("a\x0db", 4004));
-assertEquals(null, res[1344].exec("a\nb", 4005));
-assertEquals(null, res[1344].exec("a\x0d\nb", 4006));
-assertEquals(null, res[1344].exec("a\x85b", 4007));
-assertEquals(null, res[1344].exec("a\x0bb     ", 4008));
-assertEquals(null, res[1344].exec("** Failers ", 4009));
-assertEquals(null, res[1344].exec("a\x85b<bsr_anycrlf>", 4010));
-assertEquals(null, res[1344].exec("a\x0bb<bsr_anycrlf>", 4011));
-assertEquals(null, res[1345].exec("a\x0d\n\nb", 4012));
-assertEquals(null, res[1345].exec("a\n\x0d\x0db", 4013));
-assertEquals(null, res[1345].exec("a\x0d\n\x0d\n\x0d\n\x0d\nb", 4014));
-assertEquals(null, res[1345].exec("** Failers", 4015));
-assertEquals(null, res[1345].exec("a\x8585b", 4016));
-assertEquals(null, res[1345].exec("a\x0b\x00bb     ", 4017));
-assertEquals(null, res[1346].exec("a\x0d\x0db", 4018));
-assertEquals(null, res[1346].exec("a\n\n\nb", 4019));
-assertEquals(null, res[1346].exec("a\x0d\n\n\x0d\x0db", 4020));
-assertEquals(null, res[1346].exec("a\x8585b", 4021));
-assertEquals(null, res[1346].exec("a\x0b\x00bb     ", 4022));
-assertEquals(null, res[1346].exec("** Failers ", 4023));
-assertEquals(null, res[1346].exec("a\x0d\x0d\x0d\x0d\x0db ", 4024));
-assertEquals(null, res[1346].exec("a\x8585b<bsr_anycrlf>", 4025));
-assertEquals(null, res[1346].exec("a\x0b\x00bb<bsr_anycrlf>", 4026));
-assertEquals("abc", res[1347].exec("abc "), 4027);
-assertEquals(null, res[1348].exec("** Failers", 4028));
-assertEquals(null, res[1348].exec("ab", 4029));
-assertEquals(null, res[1349].exec("** Failers", 4030));
-assertEquals(null, res[1349].exec("ab ", 4031));
-assertEquals(null, res[1349].exec("** Failers", 4032));
-assertEquals(null, res[1349].exec("ab ", 4033));
-assertEquals("aXb", res[1350].exec("aXb"), 4034);
-assertEquals("a\nb", res[1350].exec("a\nb "), 4035);
-assertEquals(null, res[1350].exec("** Failers", 4036));
-assertEquals(null, res[1350].exec("ab  ", 4037));
-assertEquals("aXb", res[1351].exec("aXb"), 4038);
-assertEquals("a\nX\nXb", res[1351].exec("a\nX\nXb "), 4039);
-assertEquals(null, res[1351].exec("** Failers", 4040));
-assertEquals(null, res[1351].exec("ab  ", 4041));
-assertEquals(null, res[1352].exec("ab", 4042));
-assertEquals(null, res[1352].exec("ax{100}b  ", 4043));
-assertEquals(null, res[1352].exec("ax{100}x{100}b  ", 4044));
-assertEquals(null, res[1352].exec("ax{100}b  ", 4045));
-assertEquals(null, res[1352].exec("ax{100}x{100}b  ", 4046));
-assertEquals(null, res[1352].exec("*** Failers ", 4047));
-assertEquals(null, res[1352].exec("ab", 4048));
-assertEquals(null, res[1352].exec(" ", 4049));
-assertEquals("X", res[1353].exec("Xoanon"), 4050);
-assertEquals("X", res[1353].exec("+Xoanon"), 4051);
-assertEquals("X", res[1353].exec("x{300}Xoanon "), 4052);
-assertEquals(null, res[1353].exec("*** Failers ", 4053));
-assertEquals(null, res[1353].exec("YXoanon  ", 4054));
-assertEquals("X", res[1354].exec("YXoanon"), 4055);
-assertEquals(null, res[1354].exec("*** Failers", 4056));
-assertEquals(null, res[1354].exec("Xoanon", 4057));
-assertEquals(null, res[1354].exec("+Xoanon    ", 4058));
-assertEquals(null, res[1354].exec("x{300}Xoanon ", 4059));
-assertEquals("X", res[1355].exec("X+oanon"), 4060);
-assertEquals(null, res[1355].exec("ZXx{300}oanon ", 4061));
-assertEquals("X", res[1355].exec("FAX "), 4062);
-assertEquals(null, res[1355].exec("*** Failers ", 4063));
-assertEquals(null, res[1355].exec("Xoanon  ", 4064));
-assertEquals("X", res[1356].exec("Xoanon  "), 4065);
-assertEquals(null, res[1356].exec("*** Failers", 4066));
-assertEquals(null, res[1356].exec("X+oanon", 4067));
-assertEquals("X", res[1356].exec("ZXx{300}oanon "), 4068);
-assertEquals(null, res[1356].exec("FAX ", 4069));
-assertEquals("b", res[1357].exec("abcd"), 4070);
-assertEquals("x", res[1357].exec("ax{100}   "), 4071);
-assertEquals("b", res[1357].exec("ab99"), 4072);
-assertEquals("x", res[1357].exec("x{123}x{123}45"), 4073);
-assertEquals("x", res[1357].exec("x{400}x{401}x{402}6  "), 4074);
-assertEquals("*", res[1357].exec("*** Failers"), 4075);
-assertEquals("d", res[1357].exec("d99"), 4076);
-assertEquals("x", res[1357].exec("x{123}x{122}4   "), 4077);
-assertEquals("x", res[1357].exec("x{400}x{403}6  "), 4078);
-assertEquals("x", res[1357].exec("x{400}x{401}x{402}x{402}6  "), 4079);
-assertEquals(null, res[1358].exec("\ufffd]", 4080));
-assertEquals(null, res[1358].exec("\ufffd", 4081));
-assertEquals(null, res[1358].exec("\ufffd\ufffd\ufffd", 4082));
-assertEquals(null, res[1358].exec("\ufffd\ufffd\ufffd?", 4083));
-assertEquals("acb", res[1359].exec("acb"), 4084);
-assertEquals("ab", res[1359].exec("ab"), 4085);
-assertEquals(null, res[1359].exec("ax{100}b ", 4086));
-assertEquals(null, res[1359].exec("*** Failers", 4087));
-assertEquals(null, res[1359].exec("a\nb  ", 4088));
-assertEquals(null, res[1360].exec("ax{4000}xyb ", 4089));
-assertEquals(null, res[1360].exec("ax{4000}yb ", 4090));
-assertEquals(null, res[1360].exec("ax{4000}x{100}yb ", 4091));
-assertEquals(null, res[1360].exec("*** Failers", 4092));
-assertEquals(null, res[1360].exec("ax{4000}b ", 4093));
-assertEquals(null, res[1360].exec("ac\ncb ", 4094));
-assertEquals("a\xc0,,\xc0", res[1361].exec("a\xc0\x88b"), 4095);
-assertEquals("ax,,x", res[1362].exec("ax{100}b"), 4096);
-assertEquals("a\xc0\x88b,\xc0\x88,b", res[1363].exec("a\xc0\x88b"), 4097);
-assertEquals("ax{100}b,x{100},b", res[1364].exec("ax{100}b"), 4098);
-assertEquals("a\xc0\x92,\xc0,\x92", res[1365].exec("a\xc0\x92bcd"), 4099);
-assertEquals("ax{,x,{", res[1366].exec("ax{240}bcd"), 4100);
-assertEquals("a\xc0\x92,\xc0,\x92", res[1367].exec("a\xc0\x92bcd"), 4101);
-assertEquals("ax{,x,{", res[1368].exec("ax{240}bcd"), 4102);
-assertEquals("a\xc0,,\xc0", res[1369].exec("a\xc0\x92bcd"), 4103);
-assertEquals("ax,,x", res[1370].exec("ax{240}bcd"), 4104);
-assertEquals(null, res[1371].exec("ax{1234}xyb ", 4105));
-assertEquals(null, res[1371].exec("ax{1234}x{4321}yb ", 4106));
-assertEquals(null, res[1371].exec("ax{1234}x{4321}x{3412}b ", 4107));
-assertEquals(null, res[1371].exec("*** Failers", 4108));
-assertEquals(null, res[1371].exec("ax{1234}b ", 4109));
-assertEquals(null, res[1371].exec("ac\ncb ", 4110));
-assertEquals("ax{1234}xyb,x{1234}xy", res[1372].exec("ax{1234}xyb "), 4111);
-assertEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[1372].exec("ax{1234}x{4321}yb "), 4112);
-assertEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[1372].exec("ax{1234}x{4321}x{3412}b "), 4113);
-assertEquals("axxxxbcdefghijb,xxxxbcdefghij", res[1372].exec("axxxxbcdefghijb "), 4114);
-assertEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[1372].exec("ax{1234}x{4321}x{3412}x{3421}b "), 4115);
-assertEquals(null, res[1372].exec("*** Failers", 4116));
-assertEquals("ax{1234}b,x{1234}", res[1372].exec("ax{1234}b "), 4117);
-assertEquals("ax{1234}xyb,x{1234}xy", res[1373].exec("ax{1234}xyb "), 4118);
-assertEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[1373].exec("ax{1234}x{4321}yb "), 4119);
-assertEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[1373].exec("ax{1234}x{4321}x{3412}b "), 4120);
-assertEquals("axxxxb,xxxx", res[1373].exec("axxxxbcdefghijb "), 4121);
-assertEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[1373].exec("ax{1234}x{4321}x{3412}x{3421}b "), 4122);
-assertEquals(null, res[1373].exec("*** Failers", 4123));
-assertEquals("ax{1234}b,x{1234}", res[1373].exec("ax{1234}b "), 4124);
-assertEquals(null, res[1374].exec("ax{1234}xyb ", 4125));
-assertEquals(null, res[1374].exec("ax{1234}x{4321}yb ", 4126));
-assertEquals(null, res[1374].exec("ax{1234}x{4321}x{3412}b ", 4127));
-assertEquals("axxxxb,xxxx", res[1374].exec("axxxxbcdefghijb "), 4128);
-assertEquals(null, res[1374].exec("ax{1234}x{4321}x{3412}x{3421}b ", 4129));
-assertEquals("axbxxb,xbxx", res[1374].exec("axbxxbcdefghijb "), 4130);
-assertEquals("axxxxxb,xxxxx", res[1374].exec("axxxxxbcdefghijb "), 4131);
-assertEquals(null, res[1374].exec("*** Failers", 4132));
-assertEquals(null, res[1374].exec("ax{1234}b ", 4133));
-assertEquals(null, res[1374].exec("axxxxxxbcdefghijb ", 4134));
-assertEquals(null, res[1375].exec("ax{1234}xyb ", 4135));
-assertEquals(null, res[1375].exec("ax{1234}x{4321}yb ", 4136));
-assertEquals(null, res[1375].exec("ax{1234}x{4321}x{3412}b ", 4137));
-assertEquals("axxxxb,xxxx", res[1375].exec("axxxxbcdefghijb "), 4138);
-assertEquals(null, res[1375].exec("ax{1234}x{4321}x{3412}x{3421}b ", 4139));
-assertEquals("axbxxb,xbxx", res[1375].exec("axbxxbcdefghijb "), 4140);
-assertEquals("axxxxxb,xxxxx", res[1375].exec("axxxxxbcdefghijb "), 4141);
-assertEquals(null, res[1375].exec("*** Failers", 4142));
-assertEquals(null, res[1375].exec("ax{1234}b ", 4143));
-assertEquals(null, res[1375].exec("axxxxxxbcdefghijb ", 4144));
-assertEquals(null, res[1375].exec("*** Failers", 4145));
-assertEquals(null, res[1375].exec("x{100}", 4146));
-assertEquals(null, res[1375].exec("aXbcd", 4147));
-assertEquals(null, res[1375].exec("ax{100}bcd", 4148));
-assertEquals(null, res[1375].exec("ax{100000}bcd", 4149));
-assertEquals(null, res[1375].exec("x{100}x{100}x{100}b", 4150));
-assertEquals(null, res[1375].exec("*** Failers ", 4151));
-assertEquals(null, res[1375].exec("x{100}x{100}b", 4152));
-assertEquals(null, res[1375].exec("x{ab} ", 4153));
-assertEquals(null, res[1375].exec("\xc2\xab", 4154));
-assertEquals(null, res[1375].exec("*** Failers ", 4155));
-assertEquals(null, res[1375].exec("\x00{ab}", 4156));
-assertEquals(null, res[1375].exec("WXYZ", 4157));
-assertEquals(null, res[1375].exec("x{256}XYZ ", 4158));
-assertEquals(null, res[1375].exec("*** Failers", 4159));
-assertEquals(null, res[1375].exec("XYZ ", 4160));
-assertEquals("bcd", res[1376].exec("bcd"), 4161);
-assertEquals("00}", res[1376].exec("x{100}aYx{256}Z "), 4162);
-assertEquals("x{", res[1377].exec("x{100}bc"), 4163);
-assertEquals("x{100}bcA", res[1378].exec("x{100}bcAa"), 4164);
-assertEquals("x{", res[1379].exec("x{100}bca"), 4165);
-assertEquals("bcd", res[1380].exec("bcd"), 4166);
-assertEquals("00}", res[1380].exec("x{100}aYx{256}Z "), 4167);
-assertEquals("x{", res[1381].exec("x{100}bc"), 4168);
-assertEquals("x{100}bc", res[1382].exec("x{100}bcAa"), 4169);
-assertEquals("x{", res[1383].exec("x{100}bca"), 4170);
-assertEquals(null, res[1383].exec("abcd", 4171));
-assertEquals(null, res[1383].exec("abcd", 4172));
-assertEquals("x{", res[1383].exec("x{100}x{100} "), 4173);
-assertEquals("x{", res[1383].exec("x{100}x{100} "), 4174);
-assertEquals("x{", res[1383].exec("x{100}x{100}x{100}x{100} "), 4175);
-assertEquals(null, res[1383].exec("abce", 4176));
-assertEquals("x{", res[1383].exec("x{100}x{100}x{100}x{100} "), 4177);
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4178));
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4179));
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4180));
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}XX", 4181));
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 4182));
-assertEquals(null, res[1383].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 4183));
-assertEquals("Xy", res[1383].exec("Xyyyax{100}x{100}bXzzz"), 4184);
-assertEquals("X", res[1386].exec("1X2"), 4185);
-assertEquals("x", res[1386].exec("1x{100}2 "), 4186);
-assertEquals(">X", res[1387].exec("> >X Y"), 4187);
-assertEquals(">x", res[1387].exec("> >x{100} Y"), 4188);
-assertEquals("1", res[1388].exec("x{100}3"), 4189);
-assertEquals(" ", res[1389].exec("x{100} X"), 4190);
-assertEquals("abcd", res[1390].exec("12abcd34"), 4191);
-assertEquals("*** Failers", res[1390].exec("*** Failers"), 4192);
-assertEquals("  ", res[1390].exec("1234  "), 4193);
-assertEquals("abc", res[1391].exec("12abcd34"), 4194);
-assertEquals("ab", res[1391].exec("12ab34"), 4195);
-assertEquals("***", res[1391].exec("*** Failers  "), 4196);
-assertEquals(null, res[1391].exec("1234", 4197));
-assertEquals("  ", res[1391].exec("12a34  "), 4198);
-assertEquals("ab", res[1392].exec("12abcd34"), 4199);
-assertEquals("ab", res[1392].exec("12ab34"), 4200);
-assertEquals("**", res[1392].exec("*** Failers  "), 4201);
-assertEquals(null, res[1392].exec("1234", 4202));
-assertEquals("  ", res[1392].exec("12a34  "), 4203);
-assertEquals("12", res[1393].exec("12abcd34"), 4204);
-assertEquals(null, res[1393].exec("*** Failers", 4205));
-assertEquals("12", res[1394].exec("12abcd34"), 4206);
-assertEquals("123", res[1394].exec("1234abcd"), 4207);
-assertEquals(null, res[1394].exec("*** Failers  ", 4208));
-assertEquals(null, res[1394].exec("1.4 ", 4209));
-assertEquals("12", res[1395].exec("12abcd34"), 4210);
-assertEquals("12", res[1395].exec("1234abcd"), 4211);
-assertEquals(null, res[1395].exec("*** Failers  ", 4212));
-assertEquals(null, res[1395].exec("1.4 ", 4213));
-assertEquals("12abcd34", res[1396].exec("12abcd34"), 4214);
-assertEquals("***", res[1396].exec("*** Failers"), 4215);
-assertEquals(null, res[1396].exec("     ", 4216));
-assertEquals("12a", res[1397].exec("12abcd34"), 4217);
-assertEquals("123", res[1397].exec("1234abcd"), 4218);
-assertEquals("***", res[1397].exec("*** Failers"), 4219);
-assertEquals(null, res[1397].exec("       ", 4220));
-assertEquals("12", res[1398].exec("12abcd34"), 4221);
-assertEquals("12", res[1398].exec("1234abcd"), 4222);
-assertEquals("**", res[1398].exec("*** Failers"), 4223);
-assertEquals(null, res[1398].exec("       ", 4224));
-assertEquals(">      <", res[1399].exec("12>      <34"), 4225);
-assertEquals(null, res[1399].exec("*** Failers", 4226));
-assertEquals(">  <", res[1400].exec("ab>  <cd"), 4227);
-assertEquals(">   <", res[1400].exec("ab>   <ce"), 4228);
-assertEquals(null, res[1400].exec("*** Failers", 4229));
-assertEquals(null, res[1400].exec("ab>    <cd ", 4230));
-assertEquals(">  <", res[1401].exec("ab>  <cd"), 4231);
-assertEquals(">   <", res[1401].exec("ab>   <ce"), 4232);
-assertEquals(null, res[1401].exec("*** Failers", 4233));
-assertEquals(null, res[1401].exec("ab>    <cd ", 4234));
-assertEquals("12", res[1402].exec("12      34"), 4235);
-assertEquals("Failers", res[1402].exec("*** Failers"), 4236);
-assertEquals(null, res[1402].exec("+++=*! ", 4237));
-assertEquals("ab", res[1403].exec("ab  cd"), 4238);
-assertEquals("abc", res[1403].exec("abcd ce"), 4239);
-assertEquals("Fai", res[1403].exec("*** Failers"), 4240);
-assertEquals(null, res[1403].exec("a.b.c", 4241));
-assertEquals("ab", res[1404].exec("ab  cd"), 4242);
-assertEquals("ab", res[1404].exec("abcd ce"), 4243);
-assertEquals("Fa", res[1404].exec("*** Failers"), 4244);
-assertEquals(null, res[1404].exec("a.b.c", 4245));
-assertEquals("====", res[1405].exec("12====34"), 4246);
-assertEquals("*** ", res[1405].exec("*** Failers"), 4247);
-assertEquals(" ", res[1405].exec("abcd "), 4248);
-assertEquals("===", res[1406].exec("ab====cd"), 4249);
-assertEquals("==", res[1406].exec("ab==cd"), 4250);
-assertEquals("***", res[1406].exec("*** Failers"), 4251);
-assertEquals(null, res[1406].exec("a.b.c", 4252));
-assertEquals("==", res[1407].exec("ab====cd"), 4253);
-assertEquals("==", res[1407].exec("ab==cd"), 4254);
-assertEquals("**", res[1407].exec("*** Failers"), 4255);
-assertEquals(null, res[1407].exec("a.b.c", 4256));
-assertEquals(null, res[1407].exec("x{100}", 4257));
-assertEquals(null, res[1407].exec("Zx{100}", 4258));
-assertEquals(null, res[1407].exec("x{100}Z", 4259));
-assertEquals("**", res[1407].exec("*** Failers "), 4260);
-assertEquals(null, res[1407].exec("Zx{100}", 4261));
-assertEquals(null, res[1407].exec("x{100}", 4262));
-assertEquals(null, res[1407].exec("x{100}Z", 4263));
-assertEquals("**", res[1407].exec("*** Failers "), 4264);
-assertEquals(null, res[1407].exec("abcx{200}X", 4265));
-assertEquals(null, res[1407].exec("abcx{100}X ", 4266));
-assertEquals("**", res[1407].exec("*** Failers"), 4267);
-assertEquals("  ", res[1407].exec("X  "), 4268);
-assertEquals(null, res[1407].exec("abcx{200}X", 4269));
-assertEquals(null, res[1407].exec("abcx{100}X ", 4270));
-assertEquals(null, res[1407].exec("abQX ", 4271));
-assertEquals("**", res[1407].exec("*** Failers"), 4272);
-assertEquals("  ", res[1407].exec("X  "), 4273);
-assertEquals(null, res[1407].exec("abcx{100}x{200}x{100}X", 4274));
-assertEquals("**", res[1407].exec("*** Failers"), 4275);
-assertEquals(null, res[1407].exec("abcx{200}X", 4276));
-assertEquals("  ", res[1407].exec("X  "), 4277);
-assertEquals(null, res[1407].exec("AX", 4278));
-assertEquals(null, res[1407].exec("x{150}X", 4279));
-assertEquals(null, res[1407].exec("x{500}X ", 4280));
-assertEquals("**", res[1407].exec("*** Failers"), 4281);
-assertEquals(null, res[1407].exec("x{100}X", 4282));
-assertEquals("  ", res[1407].exec("x{200}X   "), 4283);
-assertEquals(null, res[1407].exec("AX", 4284));
-assertEquals(null, res[1407].exec("x{150}X", 4285));
-assertEquals(null, res[1407].exec("x{500}X ", 4286));
-assertEquals("**", res[1407].exec("*** Failers"), 4287);
-assertEquals(null, res[1407].exec("x{100}X", 4288));
-assertEquals("  ", res[1407].exec("x{200}X   "), 4289);
-assertEquals(null, res[1407].exec("QX ", 4290));
-assertEquals(null, res[1407].exec("AX", 4291));
-assertEquals(null, res[1407].exec("x{500}X ", 4292));
-assertEquals("**", res[1407].exec("*** Failers"), 4293);
-assertEquals(null, res[1407].exec("x{100}X", 4294));
-assertEquals(null, res[1407].exec("x{150}X", 4295));
-assertEquals("  ", res[1407].exec("x{200}X   "), 4296);
-assertEquals(null, res[1407].exec("z", 4297));
-assertEquals(null, res[1407].exec("Z ", 4298));
-assertEquals(null, res[1407].exec("x{100}", 4299));
-assertEquals("**", res[1407].exec("*** Failers"), 4300);
-assertEquals(null, res[1407].exec("x{102}", 4301));
-assertEquals("  ", res[1407].exec("y    "), 4302);
-assertEquals("\xff", res[1408].exec(">\xff<"), 4303);
-assertEquals(null, res[1409].exec(">x{ff}<", 4304));
-assertEquals("X", res[1410].exec("XYZ"), 4305);
-assertEquals("X", res[1411].exec("XYZ"), 4306);
-assertEquals("x", res[1411].exec("x{123} "), 4307);
-assertEquals(",", res[1416].exec("catac"), 4308);
-assertEquals(",", res[1416].exec("ax{256}a "), 4309);
-assertEquals(",", res[1416].exec("x{85}"), 4310);
-assertEquals("abc1", res[1417].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 x{0085}abc7 x{2028}abc8 x{2029}abc9 JUNK"), 4311);
-assertEquals("abc1", res[1418].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6x{0085} abc7x{2028} abc8x{2029} abc9"), 4312);
-assertEquals(null, res[1419].exec("a\nb", 4313));
-assertEquals(null, res[1419].exec("a\x0db", 4314));
-assertEquals(null, res[1419].exec("a\x0d\nb", 4315));
-assertEquals(null, res[1419].exec("a\x0bb", 4316));
-assertEquals(null, res[1419].exec("a\x0cb", 4317));
-assertEquals(null, res[1419].exec("ax{85}b   ", 4318));
-assertEquals(null, res[1419].exec("ax{2028}b ", 4319));
-assertEquals(null, res[1419].exec("ax{2029}b ", 4320));
-assertEquals(null, res[1419].exec("** Failers", 4321));
-assertEquals(null, res[1419].exec("a\n\x0db    ", 4322));
-assertEquals("ab", res[1420].exec("ab"), 4323);
-assertEquals(null, res[1420].exec("a\nb", 4324));
-assertEquals(null, res[1420].exec("a\x0db", 4325));
-assertEquals(null, res[1420].exec("a\x0d\nb", 4326));
-assertEquals(null, res[1420].exec("a\x0bb", 4327));
-assertEquals(null, res[1420].exec("a\x0cx{2028}x{2029}b", 4328));
-assertEquals(null, res[1420].exec("ax{85}b   ", 4329));
-assertEquals(null, res[1420].exec("a\n\x0db    ", 4330));
-assertEquals(null, res[1420].exec("a\n\x0dx{85}\x0cb ", 4331));
-assertEquals(null, res[1421].exec("a\nb", 4332));
-assertEquals(null, res[1421].exec("a\x0db", 4333));
-assertEquals(null, res[1421].exec("a\x0d\nb", 4334));
-assertEquals(null, res[1421].exec("a\x0bb", 4335));
-assertEquals(null, res[1421].exec("a\x0cx{2028}x{2029}b", 4336));
-assertEquals(null, res[1421].exec("ax{85}b   ", 4337));
-assertEquals(null, res[1421].exec("a\n\x0db    ", 4338));
-assertEquals(null, res[1421].exec("a\n\x0dx{85}\x0cb ", 4339));
-assertEquals(null, res[1421].exec("** Failers", 4340));
-assertEquals(null, res[1421].exec("ab  ", 4341));
-assertEquals(null, res[1422].exec("a\nb", 4342));
-assertEquals(null, res[1422].exec("a\n\x0db", 4343));
-assertEquals(null, res[1422].exec("a\n\x0dx{85}b", 4344));
-assertEquals(null, res[1422].exec("a\x0d\n\x0d\nb ", 4345));
-assertEquals(null, res[1422].exec("a\x0d\n\x0d\n\x0d\nb ", 4346));
-assertEquals(null, res[1422].exec("a\n\x0d\n\x0db", 4347));
-assertEquals(null, res[1422].exec("a\n\n\x0d\nb ", 4348));
-assertEquals(null, res[1422].exec("** Failers", 4349));
-assertEquals(null, res[1422].exec("a\n\n\n\x0db", 4350));
-assertEquals(null, res[1422].exec("a\x0d", 4351));
-assertEquals(null, res[1423].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 4352));
-assertEquals(null, res[1424].exec(" x{a0}X\n\x0b\x0c\x0d\n", 4353));
-assertEquals(null, res[1425].exec(">\x09 x{a0}X\n\n\n<", 4354));
-assertEquals(null, res[1426].exec(">\x09 x{a0}X\n\n\n<", 4355));
-assertEquals(null, res[1427].exec("X X\n", 4356));
-assertEquals(null, res[1427].exec("X\x09X\x0b", 4357));
-assertEquals(null, res[1427].exec("** Failers", 4358));
-assertEquals(null, res[1427].exec("x{a0} X\n   ", 4359));
-assertEquals(null, res[1428].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 4360));
-assertEquals(null, res[1428].exec("\x09 x{a0}\n\x0b\x0c\x0d\n", 4361));
-assertEquals(null, res[1428].exec("\x09 x{a0}\n\x0b\x0c", 4362));
-assertEquals(null, res[1428].exec("** Failers ", 4363));
-assertEquals(null, res[1428].exec("\x09 x{a0}\n\x0b", 4364));
-assertEquals(null, res[1428].exec(" ", 4365));
-assertEquals(null, res[1429].exec("x{3001}x{3000}x{2030}x{2028}", 4366));
-assertEquals(null, res[1429].exec("Xx{180e}Xx{85}", 4367));
-assertEquals(null, res[1429].exec("** Failers", 4368));
-assertEquals(null, res[1429].exec("x{2009} X\n   ", 4369));
-assertEquals(null, res[1430].exec("x{1680}x{180e}x{2007}Xx{2028}x{2029}\x0c\x0d\n", 4370));
-assertEquals(null, res[1430].exec("\x09x{205f}x{a0}\nx{2029}\x0cx{2028}\n", 4371));
-assertEquals(null, res[1430].exec("\x09 x{202f}\n\x0b\x0c", 4372));
-assertEquals(null, res[1430].exec("** Failers ", 4373));
-assertEquals(null, res[1430].exec("\x09x{200a}x{a0}x{2028}\x0b", 4374));
-assertEquals(null, res[1430].exec(" ", 4375));
-assertEquals(null, res[1431].exec("a\x0db", 4376));
-assertEquals(null, res[1431].exec("a\nb", 4377));
-assertEquals(null, res[1431].exec("a\x0d\nb", 4378));
-assertEquals(null, res[1431].exec("** Failers", 4379));
-assertEquals(null, res[1431].exec("ax{85}b", 4380));
-assertEquals(null, res[1431].exec("a\x0bb     ", 4381));
-assertEquals(null, res[1432].exec("a\x0db", 4382));
-assertEquals(null, res[1432].exec("a\nb", 4383));
-assertEquals(null, res[1432].exec("a\x0d\nb", 4384));
-assertEquals(null, res[1432].exec("ax{85}b", 4385));
-assertEquals(null, res[1432].exec("a\x0bb     ", 4386));
-assertEquals(null, res[1432].exec("** Failers ", 4387));
-assertEquals(null, res[1432].exec("ax{85}b<bsr_anycrlf>", 4388));
-assertEquals(null, res[1432].exec("a\x0bb<bsr_anycrlf>", 4389));
-assertEquals(null, res[1433].exec("a\x0db", 4390));
-assertEquals(null, res[1433].exec("a\nb", 4391));
-assertEquals(null, res[1433].exec("a\x0d\nb", 4392));
-assertEquals(null, res[1433].exec("** Failers", 4393));
-assertEquals(null, res[1433].exec("ax{85}b", 4394));
-assertEquals(null, res[1433].exec("a\x0bb     ", 4395));
-assertEquals(null, res[1434].exec("a\x0db", 4396));
-assertEquals(null, res[1434].exec("a\nb", 4397));
-assertEquals(null, res[1434].exec("a\x0d\nb", 4398));
-assertEquals(null, res[1434].exec("ax{85}b", 4399));
-assertEquals(null, res[1434].exec("a\x0bb     ", 4400));
-assertEquals(null, res[1434].exec("** Failers ", 4401));
-assertEquals(null, res[1434].exec("ax{85}b<bsr_anycrlf>", 4402));
-assertEquals(null, res[1434].exec("a\x0bb<bsr_anycrlf>", 4403));
-assertEquals("X", res[1435].exec("Ax{1ec5}ABCXYZ"), 4404);
-assertEquals(null, res[1437].exec("AB", 4405));
-assertEquals(null, res[1437].exec("*** Failers", 4406));
-assertEquals(null, res[1437].exec("A0", 4407));
-assertEquals(null, res[1437].exec("00   ", 4408));
-assertEquals(null, res[1438].exec("AB", 4409));
-assertEquals(null, res[1438].exec("Ax{300}BC ", 4410));
-assertEquals(null, res[1438].exec("Ax{300}x{301}x{302}BC ", 4411));
-assertEquals(null, res[1438].exec("*** Failers", 4412));
-assertEquals(null, res[1438].exec("x{300}  ", 4413));
-assertEquals(null, res[1439].exec("ABC", 4414));
-assertEquals(null, res[1439].exec("Ax{300}Bx{300}x{301}C ", 4415));
-assertEquals(null, res[1439].exec("Ax{300}x{301}x{302}BC ", 4416));
-assertEquals(null, res[1439].exec("*** Failers", 4417));
-assertEquals(null, res[1439].exec("x{300}  ", 4418));
-assertEquals(null, res[1440].exec("abcd", 4419));
-assertEquals(null, res[1440].exec("a ", 4420));
-assertEquals(null, res[1440].exec("*** Failers ", 4421));
-assertEquals(null, res[1441].exec("1234", 4422));
-assertEquals(null, res[1441].exec("= ", 4423));
-assertEquals(null, res[1441].exec("*** Failers ", 4424));
-assertEquals(null, res[1441].exec("abcd ", 4425));
-assertEquals(null, res[1442].exec("abcdAx{300}x{301}x{302}", 4426));
-assertEquals(null, res[1442].exec("Ax{300}x{301}x{302}", 4427));
-assertEquals(null, res[1442].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}", 4428));
-assertEquals(null, res[1442].exec("a ", 4429));
-assertEquals(null, res[1442].exec("*** Failers ", 4430));
-assertEquals(null, res[1442].exec("x{300}x{301}x{302}", 4431));
-assertEquals("abc", res[1443].exec("abc"), 4432);
-assertEquals("abc", res[1443].exec("Ax{300}abc"), 4433);
-assertEquals("abc", res[1443].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz"), 4434);
-assertEquals("abc", res[1443].exec("x{300}abc  "), 4435);
-assertEquals(null, res[1443].exec("*** Failers", 4436));
-assertEquals("abc", res[1444].exec("abc"), 4437);
-assertEquals(null, res[1444].exec("Ax{300}abc", 4438));
-assertEquals(null, res[1444].exec("*** Failers", 4439));
-assertEquals(null, res[1444].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz", 4440));
-assertEquals(null, res[1444].exec("x{300}abc  ", 4441));
-assertEquals("abc", res[1445].exec("abc"), 4442);
-assertEquals("abc", res[1445].exec("Ax{300}abc"), 4443);
-assertEquals("abc", res[1445].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz"), 4444);
-assertEquals("abc", res[1445].exec("x{300}abc  "), 4445);
-assertEquals(null, res[1445].exec("*** Failers", 4446));
-assertEquals("abc", res[1446].exec("abc"), 4447);
-assertEquals(null, res[1446].exec("Ax{300}abc", 4448));
-assertEquals(null, res[1446].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz", 4449));
-assertEquals(null, res[1446].exec("*** Failers", 4450));
-assertEquals(null, res[1446].exec("x{300}abc  ", 4451));
-assertEquals(null, res[1447].exec("A=b", 4452));
-assertEquals(null, res[1447].exec("=c ", 4453));
-assertEquals(null, res[1447].exec("*** Failers", 4454));
-assertEquals(null, res[1447].exec("1=2 ", 4455));
-assertEquals(null, res[1447].exec("AAAA=b  ", 4456));
-assertEquals(null, res[1448].exec("AAAA=b", 4457));
-assertEquals(null, res[1448].exec("=c ", 4458));
-assertEquals(null, res[1448].exec("*** Failers", 4459));
-assertEquals(null, res[1448].exec("1=2  ", 4460));
-assertEquals(null, res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}X", 4461));
-assertEquals(null, res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}X ", 4462));
-assertEquals(null, res[1449].exec("*** Failers", 4463));
-assertEquals(null, res[1449].exec("X", 4464));
-assertEquals(null, res[1449].exec("Ax{300}x{301}x{302}X", 4465));
-assertEquals(null, res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}X", 4466));
-assertEquals(null, res[1450].exec("x{c0}x{30f}x{660}x{66c}x{f01}x{1680}<", 4467));
-assertEquals(null, res[1450].exec("\npx{300}9!$ < ", 4468));
-assertEquals(null, res[1450].exec("** Failers ", 4469));
-assertEquals(null, res[1450].exec("apx{300}9!$ < ", 4470));
-assertEquals(null, res[1451].exec("X", 4471));
-assertEquals(null, res[1451].exec("** Failers ", 4472));
-assertEquals(null, res[1451].exec("", 4473));
-assertEquals(null, res[1452].exec("9", 4474));
-assertEquals(null, res[1452].exec("** Failers ", 4475));
-assertEquals(null, res[1452].exec("x{c0}", 4476));
-assertEquals(null, res[1453].exec("X", 4477));
-assertEquals(null, res[1453].exec("** Failers ", 4478));
-assertEquals(null, res[1453].exec("x{30f}", 4479));
-assertEquals(null, res[1454].exec("X", 4480));
-assertEquals(null, res[1454].exec("** Failers ", 4481));
-assertEquals(null, res[1454].exec("x{660}", 4482));
-assertEquals(null, res[1455].exec("X", 4483));
-assertEquals(null, res[1455].exec("** Failers ", 4484));
-assertEquals(null, res[1455].exec("x{66c}", 4485));
-assertEquals(null, res[1456].exec("X", 4486));
-assertEquals(null, res[1456].exec("** Failers ", 4487));
-assertEquals(null, res[1456].exec("x{f01}", 4488));
-assertEquals(null, res[1457].exec("X", 4489));
-assertEquals(null, res[1457].exec("** Failers ", 4490));
-assertEquals(null, res[1457].exec("x{1680}", 4491));
-assertEquals(null, res[1458].exec("x{017}", 4492));
-assertEquals(null, res[1458].exec("x{09f} ", 4493));
-assertEquals(null, res[1458].exec("** Failers", 4494));
-assertEquals(null, res[1458].exec("x{0600} ", 4495));
-assertEquals(null, res[1459].exec("x{601}", 4496));
-assertEquals(null, res[1459].exec("** Failers", 4497));
-assertEquals(null, res[1459].exec("x{09f} ", 4498));
-assertEquals(null, res[1460].exec("** Failers", 4499));
-assertEquals(null, res[1460].exec("x{09f} ", 4500));
-assertEquals(null, res[1461].exec("x{f8ff}", 4501));
-assertEquals(null, res[1461].exec("** Failers", 4502));
-assertEquals(null, res[1461].exec("x{09f} ", 4503));
-assertEquals(null, res[1462].exec("?x{dfff}", 4504));
-assertEquals(null, res[1462].exec("** Failers", 4505));
-assertEquals(null, res[1462].exec("x{09f} ", 4506));
-assertEquals(null, res[1463].exec("a", 4507));
-assertEquals(null, res[1463].exec("** Failers ", 4508));
-assertEquals(null, res[1463].exec("Z", 4509));
-assertEquals(null, res[1463].exec("x{e000}  ", 4510));
-assertEquals(null, res[1464].exec("x{2b0}", 4511));
-assertEquals(null, res[1464].exec("** Failers", 4512));
-assertEquals(null, res[1464].exec("a ", 4513));
-assertEquals(null, res[1465].exec("x{1bb}", 4514));
-assertEquals(null, res[1465].exec("** Failers", 4515));
-assertEquals(null, res[1465].exec("a ", 4516));
-assertEquals(null, res[1465].exec("x{2b0}", 4517));
-assertEquals(null, res[1466].exec("x{1c5}", 4518));
-assertEquals(null, res[1466].exec("** Failers", 4519));
-assertEquals(null, res[1466].exec("a ", 4520));
-assertEquals(null, res[1466].exec("x{2b0}", 4521));
-assertEquals(null, res[1467].exec("A", 4522));
-assertEquals(null, res[1467].exec("** Failers", 4523));
-assertEquals(null, res[1467].exec("x{2b0}", 4524));
-assertEquals(null, res[1468].exec("x{903}", 4525));
-assertEquals(null, res[1468].exec("** Failers", 4526));
-assertEquals(null, res[1468].exec("X", 4527));
-assertEquals(null, res[1468].exec("x{300}", 4528));
-assertEquals(null, res[1468].exec("   ", 4529));
-assertEquals(null, res[1469].exec("x{488}", 4530));
-assertEquals(null, res[1469].exec("** Failers", 4531));
-assertEquals(null, res[1469].exec("X", 4532));
-assertEquals(null, res[1469].exec("x{903}", 4533));
-assertEquals(null, res[1469].exec("x{300}", 4534));
-assertEquals(null, res[1470].exec("x{300}", 4535));
-assertEquals(null, res[1470].exec("** Failers", 4536));
-assertEquals(null, res[1470].exec("X", 4537));
-assertEquals(null, res[1470].exec("x{903}", 4538));
-assertEquals(null, res[1470].exec("0123456789x{660}x{661}x{662}x{663}x{664}x{665}x{666}x{667}x{668}x{669}x{66a}", 4539));
-assertEquals(null, res[1470].exec("x{6f0}x{6f1}x{6f2}x{6f3}x{6f4}x{6f5}x{6f6}x{6f7}x{6f8}x{6f9}x{6fa}", 4540));
-assertEquals(null, res[1470].exec("x{966}x{967}x{968}x{969}x{96a}x{96b}x{96c}x{96d}x{96e}x{96f}x{970}", 4541));
-assertEquals(null, res[1470].exec("** Failers", 4542));
-assertEquals(null, res[1470].exec("X", 4543));
-assertEquals(null, res[1471].exec("x{16ee}", 4544));
-assertEquals(null, res[1471].exec("** Failers", 4545));
-assertEquals(null, res[1471].exec("X", 4546));
-assertEquals(null, res[1471].exec("x{966}", 4547));
-assertEquals(null, res[1472].exec("x{b2}", 4548));
-assertEquals(null, res[1472].exec("x{b3}", 4549));
-assertEquals(null, res[1472].exec("** Failers", 4550));
-assertEquals(null, res[1472].exec("X", 4551));
-assertEquals(null, res[1472].exec("x{16ee}", 4552));
-assertEquals(null, res[1473].exec("_", 4553));
-assertEquals(null, res[1473].exec("x{203f}", 4554));
-assertEquals(null, res[1473].exec("** Failers", 4555));
-assertEquals(null, res[1473].exec("X", 4556));
-assertEquals(null, res[1473].exec("-", 4557));
-assertEquals(null, res[1473].exec("x{58a}", 4558));
-assertEquals(null, res[1474].exec("-", 4559));
-assertEquals(null, res[1474].exec("x{58a}", 4560));
-assertEquals(null, res[1474].exec("** Failers", 4561));
-assertEquals(null, res[1474].exec("X", 4562));
-assertEquals(null, res[1474].exec("x{203f}", 4563));
-assertEquals(null, res[1475].exec(")", 4564));
-assertEquals(null, res[1475].exec("]", 4565));
-assertEquals(null, res[1475].exec("}", 4566));
-assertEquals(null, res[1475].exec("x{f3b}", 4567));
-assertEquals(null, res[1475].exec("** Failers", 4568));
-assertEquals(null, res[1475].exec("X", 4569));
-assertEquals(null, res[1475].exec("x{203f}", 4570));
-assertEquals(null, res[1475].exec("(", 4571));
-assertEquals(null, res[1475].exec("[", 4572));
-assertEquals(null, res[1475].exec("{", 4573));
-assertEquals(null, res[1475].exec("x{f3c}", 4574));
-assertEquals(null, res[1476].exec("x{bb}", 4575));
-assertEquals(null, res[1476].exec("x{2019}", 4576));
-assertEquals(null, res[1476].exec("** Failers", 4577));
-assertEquals(null, res[1476].exec("X", 4578));
-assertEquals(null, res[1476].exec("x{203f}", 4579));
-assertEquals(null, res[1477].exec("x{ab}", 4580));
-assertEquals(null, res[1477].exec("x{2018}", 4581));
-assertEquals(null, res[1477].exec("** Failers", 4582));
-assertEquals(null, res[1477].exec("X", 4583));
-assertEquals(null, res[1477].exec("x{203f}", 4584));
-assertEquals(null, res[1478].exec("!", 4585));
-assertEquals(null, res[1478].exec("x{37e}", 4586));
-assertEquals(null, res[1478].exec("** Failers", 4587));
-assertEquals(null, res[1478].exec("X", 4588));
-assertEquals(null, res[1478].exec("x{203f}", 4589));
-assertEquals(null, res[1479].exec("(", 4590));
-assertEquals(null, res[1479].exec("[", 4591));
-assertEquals(null, res[1479].exec("{", 4592));
-assertEquals(null, res[1479].exec("x{f3c}", 4593));
-assertEquals(null, res[1479].exec("** Failers", 4594));
-assertEquals(null, res[1479].exec("X", 4595));
-assertEquals(null, res[1479].exec(")", 4596));
-assertEquals(null, res[1479].exec("]", 4597));
-assertEquals(null, res[1479].exec("}", 4598));
-assertEquals(null, res[1479].exec("x{f3b}", 4599));
-assertEquals(null, res[1479].exec("$x{a2}x{a3}x{a4}x{a5}x{a6}", 4600));
-assertEquals(null, res[1479].exec("x{9f2}", 4601));
-assertEquals(null, res[1479].exec("** Failers", 4602));
-assertEquals(null, res[1479].exec("X", 4603));
-assertEquals(null, res[1479].exec("x{2c2}", 4604));
-assertEquals(null, res[1480].exec("x{2c2}", 4605));
-assertEquals(null, res[1480].exec("** Failers", 4606));
-assertEquals(null, res[1480].exec("X", 4607));
-assertEquals(null, res[1480].exec("x{9f2}", 4608));
-assertEquals(null, res[1480].exec("+<|~x{ac}x{2044}", 4609));
-assertEquals(null, res[1480].exec("** Failers", 4610));
-assertEquals(null, res[1480].exec("X", 4611));
-assertEquals(null, res[1480].exec("x{9f2}", 4612));
-assertEquals(null, res[1481].exec("x{a6}", 4613));
-assertEquals(null, res[1481].exec("x{482} ", 4614));
-assertEquals(null, res[1481].exec("** Failers", 4615));
-assertEquals(null, res[1481].exec("X", 4616));
-assertEquals(null, res[1481].exec("x{9f2}", 4617));
-assertEquals(null, res[1482].exec("x{2028}", 4618));
-assertEquals(null, res[1482].exec("** Failers", 4619));
-assertEquals(null, res[1482].exec("X", 4620));
-assertEquals(null, res[1482].exec("x{2029}", 4621));
-assertEquals(null, res[1483].exec("x{2029}", 4622));
-assertEquals(null, res[1483].exec("** Failers", 4623));
-assertEquals(null, res[1483].exec("X", 4624));
-assertEquals(null, res[1483].exec("x{2028}", 4625));
-assertEquals(null, res[1484].exec("\\ \\", 4626));
-assertEquals(null, res[1484].exec("x{a0}", 4627));
-assertEquals(null, res[1484].exec("x{1680}", 4628));
-assertEquals(null, res[1484].exec("x{180e}", 4629));
-assertEquals(null, res[1484].exec("x{2000}", 4630));
-assertEquals(null, res[1484].exec("x{2001}     ", 4631));
-assertEquals(null, res[1484].exec("** Failers", 4632));
-assertEquals(null, res[1484].exec("x{2028}", 4633));
-assertEquals(null, res[1484].exec("x{200d} ", 4634));
-assertEquals(null, res[1484].exec("  x{660}x{661}x{662}ABC", 4635));
-assertEquals(null, res[1484].exec("  x{660}x{661}x{662}ABC", 4636));
-assertEquals(null, res[1485].exec("  x{660}x{661}x{662}ABC", 4637));
-assertEquals(null, res[1486].exec("  x{660}x{661}x{662}ABC", 4638));
-assertEquals(null, res[1487].exec("  x{660}x{661}x{662}ABC", 4639));
-assertEquals(null, res[1488].exec("  x{660}x{661}x{662}ABC", 4640));
-assertEquals(null, res[1489].exec("  x{660}x{661}x{662}ABC", 4641));
-assertEquals(null, res[1490].exec("  x{660}x{661}x{662}ABC", 4642));
-assertEquals(null, res[1491].exec("  x{660}x{661}x{662}ABC", 4643));
-assertEquals(null, res[1492].exec("  x{660}x{661}x{662}ABC", 4644));
-assertEquals(null, res[1493].exec("  x{660}x{661}x{662}ABC", 4645));
-assertEquals(null, res[1493].exec("  x{660}x{661}x{662}ABC", 4646));
-assertEquals(null, res[1493].exec("  x{660}x{661}x{662}ABC", 4647));
-assertEquals(null, res[1493].exec("  ** Failers", 4648));
-assertEquals(null, res[1493].exec("  x{660}x{661}x{662}ABC", 4649));
-assertEquals(null, res[1494].exec("A", 4650));
-assertEquals(null, res[1494].exec("ax{10a0}B ", 4651));
-assertEquals(null, res[1494].exec("** Failers ", 4652));
-assertEquals(null, res[1494].exec("a", 4653));
-assertEquals(null, res[1494].exec("x{1d00}  ", 4654));
-assertEquals(null, res[1495].exec("1234", 4655));
-assertEquals(null, res[1495].exec("** Failers", 4656));
-assertEquals(null, res[1495].exec("ABC ", 4657));
-assertEquals(null, res[1496].exec("1234", 4658));
-assertEquals(null, res[1496].exec("** Failers", 4659));
-assertEquals(null, res[1496].exec("ABC ", 4660));
-assertEquals(null, res[1496].exec("A2XYZ", 4661));
-assertEquals(null, res[1496].exec("123A5XYZPQR", 4662));
-assertEquals(null, res[1496].exec("ABAx{660}XYZpqr", 4663));
-assertEquals(null, res[1496].exec("** Failers", 4664));
-assertEquals(null, res[1496].exec("AXYZ", 4665));
-assertEquals(null, res[1496].exec("XYZ     ", 4666));
-assertEquals(null, res[1496].exec("1XYZ", 4667));
-assertEquals(null, res[1496].exec("AB=XYZ.. ", 4668));
-assertEquals(null, res[1496].exec("XYZ ", 4669));
-assertEquals(null, res[1496].exec("** Failers", 4670));
-assertEquals(null, res[1496].exec("WXYZ ", 4671));
-assertEquals(null, res[1497].exec("1234", 4672));
-assertEquals(null, res[1497].exec("1234", 4673));
-assertEquals(null, res[1497].exec("12-34", 4674));
-assertEquals("{", res[1497].exec("12+x{661}-34  "), 4675);
-assertEquals(null, res[1497].exec("** Failers", 4676));
-assertEquals("d", res[1497].exec("abcd  "), 4677);
-assertEquals("d", res[1498].exec("abcd"), 4678);
-assertEquals(null, res[1498].exec("** Failers", 4679));
-assertEquals(null, res[1498].exec("1234", 4680));
-assertEquals(null, res[1499].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4681));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1499].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4682);
-assertEquals(" ", res[1499].exec(" "), 4683);
-assertEquals(null, res[1499].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4684));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1499].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4685);
-assertEquals(null, res[1500].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4686));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1500].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4687);
-assertEquals(null, res[1501].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4688));
-assertEquals(null, res[1501].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 4689));
-assertEquals(null, res[1502].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4690));
-assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1502].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4691);
-assertEquals(null, res[1503].exec("a", 4692));
-assertEquals(null, res[1503].exec("A ", 4693));
-assertEquals(null, res[1504].exec("a", 4694));
-assertEquals(null, res[1504].exec("A ", 4695));
-assertEquals(null, res[1505].exec("A", 4696));
-assertEquals(null, res[1505].exec("aZ", 4697));
-assertEquals(null, res[1505].exec("** Failers", 4698));
-assertEquals(null, res[1505].exec("abc   ", 4699));
-assertEquals(null, res[1506].exec("A", 4700));
-assertEquals(null, res[1506].exec("aZ", 4701));
-assertEquals(null, res[1506].exec("** Failers", 4702));
-assertEquals(null, res[1506].exec("abc   ", 4703));
-assertEquals(null, res[1507].exec("a", 4704));
-assertEquals(null, res[1507].exec("Az", 4705));
-assertEquals(null, res[1507].exec("** Failers", 4706));
-assertEquals(null, res[1507].exec("ABC   ", 4707));
-assertEquals(null, res[1508].exec("a", 4708));
-assertEquals(null, res[1508].exec("Az", 4709));
-assertEquals(null, res[1508].exec("** Failers", 4710));
-assertEquals(null, res[1508].exec("ABC   ", 4711));
-assertEquals(null, res[1508].exec("x{c0}", 4712));
-assertEquals(null, res[1508].exec("x{e0} ", 4713));
-assertEquals(null, res[1508].exec("x{c0}", 4714));
-assertEquals(null, res[1508].exec("x{e0} ", 4715));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 4716));
-assertEquals(null, res[1508].exec("** Failers", 4717));
-assertEquals(null, res[1508].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 4718));
-assertEquals(null, res[1508].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 4719));
-assertEquals(null, res[1508].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 4720));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 4721));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 4722));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 4723));
-assertEquals(null, res[1508].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 4724));
-assertEquals(null, res[1508].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 4725));
-assertEquals(null, res[1508].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 4726));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 4727));
-assertEquals(null, res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 4728));
-assertEquals(null, res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}", 4729));
-assertEquals(null, res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 4730));
-assertEquals(null, res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 4731));
-assertEquals(null, res[1508].exec("x{391}", 4732));
-assertEquals(null, res[1508].exec("x{ff3a}", 4733));
-assertEquals(null, res[1508].exec("x{3b1}", 4734));
-assertEquals(null, res[1508].exec("x{ff5a}   ", 4735));
-assertEquals(null, res[1508].exec("x{c0}", 4736));
-assertEquals(null, res[1508].exec("x{e0} ", 4737));
-assertEquals(null, res[1508].exec("x{104}", 4738));
-assertEquals(null, res[1508].exec("x{105}", 4739));
-assertEquals(null, res[1508].exec("x{109}  ", 4740));
-assertEquals(null, res[1508].exec("** Failers", 4741));
-assertEquals(null, res[1508].exec("x{100}", 4742));
-assertEquals(null, res[1508].exec("x{10a} ", 4743));
-assertEquals(null, res[1508].exec("Z", 4744));
-assertEquals(null, res[1508].exec("z", 4745));
-assertEquals(null, res[1508].exec("x{39c}", 4746));
-assertEquals(null, res[1508].exec("x{178}", 4747));
-assertEquals(null, res[1508].exec("|", 4748));
-assertEquals(null, res[1508].exec("x{80}", 4749));
-assertEquals(null, res[1508].exec("x{ff}", 4750));
-assertEquals(null, res[1508].exec("x{100}", 4751));
-assertEquals(null, res[1508].exec("x{101} ", 4752));
-assertEquals(null, res[1508].exec("** Failers", 4753));
-assertEquals(null, res[1508].exec("x{102}", 4754));
-assertEquals(null, res[1508].exec("Y", 4755));
-assertEquals(null, res[1508].exec("y           ", 4756));
-assertEquals(null, res[1509].exec("A", 4757));
-assertEquals(null, res[1509].exec("Ax{300}BC ", 4758));
-assertEquals(null, res[1509].exec("Ax{300}x{301}x{302}BC ", 4759));
-assertEquals(null, res[1509].exec("*** Failers", 4760));
-assertEquals(null, res[1509].exec("x{300}  ", 4761));
-assertEquals("X", res[1510].exec("X123"), 4762);
-assertEquals(null, res[1510].exec("*** Failers", 4763));
-assertEquals(null, res[1510].exec("AXYZ", 4764));
-assertEquals(null, res[1511].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 4765));
-assertEquals(null, res[1511].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 4766));
-assertEquals(null, res[1512].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 4767));
-assertEquals(null, res[1512].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 4768));
-assertEquals("A,,A", res[1513].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 4769);
-assertEquals("A,,A", res[1513].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 4770);
-assertEquals("A,,A", res[1514].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 4771);
-assertEquals("A,,A", res[1514].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 4772);
-assertEquals(null, res[1515].exec("*** Failers", 4773));
-assertEquals(null, res[1515].exec("Ax{300}x{301}x{302}", 4774));
-assertEquals(null, res[1516].exec("Ax{300}x{301}Bx{300}X", 4775));
-assertEquals(null, res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 4776));
-assertEquals(null, res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 4777));
-assertEquals(null, res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 4778));
-assertEquals(null, res[1517].exec("Ax{300}x{301}Bx{300}X", 4779));
-assertEquals(null, res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 4780));
-assertEquals(null, res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 4781));
-assertEquals(null, res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 4782));
-assertEquals(null, res[1518].exec("12X", 4783));
-assertEquals(null, res[1518].exec("123X", 4784));
-assertEquals(null, res[1518].exec("*** Failers", 4785));
-assertEquals(null, res[1518].exec("X", 4786));
-assertEquals(null, res[1518].exec("1X", 4787));
-assertEquals(null, res[1518].exec("1234X     ", 4788));
-assertEquals(null, res[1518].exec("x{100}   ", 4789));
-assertEquals(null, res[1518].exec("x{101} ", 4790));
-assertEquals(null, res[1518].exec("x{2e81}x{3007}x{2f804}x{31a0}", 4791));
-assertEquals(null, res[1518].exec("** Failers", 4792));
-assertEquals(null, res[1518].exec("x{2e7f}  ", 4793));
-assertEquals(null, res[1518].exec("x{3105}", 4794));
-assertEquals(null, res[1518].exec("** Failers", 4795));
-assertEquals(null, res[1518].exec("x{30ff}  ", 4796));
-assertEquals(null, res[1519].exec("x{06e9}", 4797));
-assertEquals(null, res[1519].exec("x{060b}", 4798));
-assertEquals(null, res[1519].exec("** Failers", 4799));
-assertEquals(null, res[1519].exec("Xx{06e9}   ", 4800));
-assertEquals(null, res[1520].exec("x{2f800}", 4801));
-assertEquals(null, res[1520].exec("** Failers", 4802));
-assertEquals(null, res[1520].exec("x{a014}", 4803));
-assertEquals(null, res[1520].exec("x{a4c6}   ", 4804));
-assertEquals(null, res[1521].exec("AXYZ", 4805));
-assertEquals(null, res[1521].exec("x{1234}XYZ ", 4806));
-assertEquals(null, res[1521].exec("** Failers", 4807));
-assertEquals(null, res[1521].exec("X  ", 4808));
-assertEquals(null, res[1522].exec("** Failers", 4809));
-assertEquals(null, res[1522].exec("AX", 4810));
-assertEquals(null, res[1523].exec("XYZ", 4811));
-assertEquals(null, res[1523].exec("AXYZ", 4812));
-assertEquals(null, res[1523].exec("x{1234}XYZ ", 4813));
-assertEquals(null, res[1523].exec("** Failers", 4814));
-assertEquals(null, res[1523].exec("ABXYZ   ", 4815));
-assertEquals(null, res[1524].exec("XYZ", 4816));
-assertEquals(null, res[1524].exec("** Failers", 4817));
-assertEquals(null, res[1524].exec("AXYZ", 4818));
-assertEquals(null, res[1524].exec("x{1234}XYZ ", 4819));
-assertEquals(null, res[1524].exec("ABXYZ   ", 4820));
-assertEquals(null, res[1524].exec("AXYZ", 4821));
-assertEquals(null, res[1524].exec("x{1234}XYZ", 4822));
-assertEquals(null, res[1524].exec("Ax{1234}XYZ", 4823));
-assertEquals(null, res[1524].exec("** Failers", 4824));
-assertEquals(null, res[1524].exec("XYZ", 4825));
-assertEquals(null, res[1524].exec("** Failers", 4826));
-assertEquals(null, res[1524].exec("AXYZ", 4827));
-assertEquals(null, res[1524].exec("x{1234}XYZ", 4828));
-assertEquals(null, res[1524].exec("Ax{1234}XYZ", 4829));
-assertEquals(null, res[1524].exec("XYZ", 4830));
-assertEquals(null, res[1525].exec("XYZ", 4831));
-assertEquals(null, res[1525].exec("AXYZ", 4832));
-assertEquals(null, res[1525].exec("x{1234}XYZ", 4833));
-assertEquals(null, res[1525].exec("Ax{1234}XYZ", 4834));
-assertEquals(null, res[1525].exec("** Failers", 4835));
-assertEquals(null, res[1526].exec("XYZ", 4836));
-assertEquals(null, res[1526].exec("** Failers", 4837));
-assertEquals(null, res[1526].exec("AXYZ", 4838));
-assertEquals(null, res[1526].exec("x{1234}XYZ", 4839));
-assertEquals(null, res[1526].exec("Ax{1234}XYZ", 4840));
-assertEquals("AX", res[1527].exec("AXYZ"), 4841);
-assertEquals(null, res[1527].exec("x{1234}XYZ ", 4842));
-assertEquals(null, res[1527].exec("** Failers", 4843));
-assertEquals(null, res[1527].exec("X  ", 4844));
-assertEquals(null, res[1528].exec("** Failers", 4845));
-assertEquals("AX", res[1528].exec("AX"), 4846);
-assertEquals("X", res[1529].exec("XYZ"), 4847);
-assertEquals("AX", res[1529].exec("AXYZ"), 4848);
-assertEquals(null, res[1529].exec("x{1234}XYZ ", 4849));
-assertEquals(null, res[1529].exec("** Failers", 4850));
-assertEquals(null, res[1529].exec("ABXYZ   ", 4851));
-assertEquals("X", res[1530].exec("XYZ"), 4852);
-assertEquals(null, res[1530].exec("** Failers", 4853));
-assertEquals("AX", res[1530].exec("AXYZ"), 4854);
-assertEquals(null, res[1530].exec("x{1234}XYZ ", 4855));
-assertEquals(null, res[1530].exec("ABXYZ   ", 4856));
-assertEquals("AX", res[1531].exec("AXYZ"), 4857);
-assertEquals(null, res[1531].exec("x{1234}XYZ", 4858));
-assertEquals(null, res[1531].exec("Ax{1234}XYZ", 4859));
-assertEquals(null, res[1531].exec("** Failers", 4860));
-assertEquals(null, res[1531].exec("XYZ", 4861));
-assertEquals(null, res[1532].exec("** Failers", 4862));
-assertEquals("AX", res[1532].exec("AXYZ"), 4863);
-assertEquals(null, res[1532].exec("x{1234}XYZ", 4864));
-assertEquals(null, res[1532].exec("Ax{1234}XYZ", 4865));
-assertEquals(null, res[1532].exec("XYZ", 4866));
-assertEquals("X", res[1533].exec("XYZ"), 4867);
-assertEquals("AX", res[1533].exec("AXYZ"), 4868);
-assertEquals(null, res[1533].exec("x{1234}XYZ", 4869));
-assertEquals(null, res[1533].exec("Ax{1234}XYZ", 4870));
-assertEquals(null, res[1533].exec("** Failers", 4871));
-assertEquals("X", res[1534].exec("XYZ"), 4872);
-assertEquals(null, res[1534].exec("** Failers", 4873));
-assertEquals("AX", res[1534].exec("AXYZ"), 4874);
-assertEquals(null, res[1534].exec("x{1234}XYZ", 4875));
-assertEquals(null, res[1534].exec("Ax{1234}XYZ", 4876));
-assertEquals(null, res[1535].exec("abcdefgh", 4877));
-assertEquals(null, res[1535].exec("x{1234}\n\x0dx{3456}xyz ", 4878));
-assertEquals(null, res[1536].exec("abcdefgh", 4879));
-assertEquals(null, res[1536].exec("x{1234}\n\x0dx{3456}xyz ", 4880));
-assertEquals(null, res[1537].exec("** Failers", 4881));
-assertEquals(null, res[1537].exec("abcdefgh", 4882));
-assertEquals(null, res[1537].exec("x{1234}\n\x0dx{3456}xyz ", 4883));
-assertEquals(null, res[1538].exec(" AXY", 4884));
-assertEquals(null, res[1538].exec(" aXY", 4885));
-assertEquals(null, res[1538].exec(" x{1c5}XY", 4886));
-assertEquals(null, res[1538].exec(" ** Failers", 4887));
-assertEquals(null, res[1538].exec(" x{1bb}XY", 4888));
-assertEquals(null, res[1538].exec(" x{2b0}XY", 4889));
-assertEquals(null, res[1538].exec(" !XY      ", 4890));
-assertEquals(null, res[1539].exec(" AXY", 4891));
-assertEquals(null, res[1539].exec(" aXY", 4892));
-assertEquals(null, res[1539].exec(" x{1c5}XY", 4893));
-assertEquals(null, res[1539].exec(" ** Failers", 4894));
-assertEquals(null, res[1539].exec(" x{1bb}XY", 4895));
-assertEquals(null, res[1539].exec(" x{2b0}XY", 4896));
-assertEquals(null, res[1539].exec(" !XY      ", 4897));
-assertEquals(null, res[1539].exec(" AXY", 4898));
-assertEquals(null, res[1539].exec(" aXY", 4899));
-assertEquals(null, res[1539].exec(" AbcdeXyz ", 4900));
-assertEquals(null, res[1539].exec(" x{1c5}AbXY", 4901));
-assertEquals(null, res[1539].exec(" abcDEXypqreXlmn ", 4902));
-assertEquals(null, res[1539].exec(" ** Failers", 4903));
-assertEquals(null, res[1539].exec(" x{1bb}XY", 4904));
-assertEquals(null, res[1539].exec(" x{2b0}XY", 4905));
-assertEquals(null, res[1539].exec(" !XY      ", 4906));
-assertEquals(null, res[1540].exec(" AXY", 4907));
-assertEquals(null, res[1540].exec(" aXY", 4908));
-assertEquals(null, res[1540].exec(" AbcdeXyz ", 4909));
-assertEquals(null, res[1540].exec(" x{1c5}AbXY", 4910));
-assertEquals(null, res[1540].exec(" abcDEXypqreXlmn ", 4911));
-assertEquals(null, res[1540].exec(" ** Failers", 4912));
-assertEquals(null, res[1540].exec(" x{1bb}XY", 4913));
-assertEquals(null, res[1540].exec(" x{2b0}XY", 4914));
-assertEquals(null, res[1540].exec(" !XY      ", 4915));
-assertEquals(null, res[1540].exec(" AXY", 4916));
-assertEquals(null, res[1540].exec(" aXY", 4917));
-assertEquals(null, res[1540].exec(" AbcdeXyz ", 4918));
-assertEquals(null, res[1540].exec(" x{1c5}AbXY", 4919));
-assertEquals(null, res[1540].exec(" abcDEXypqreXlmn ", 4920));
-assertEquals(null, res[1540].exec(" ** Failers", 4921));
-assertEquals(null, res[1540].exec(" x{1bb}XY", 4922));
-assertEquals(null, res[1540].exec(" x{2b0}XY", 4923));
-assertEquals(null, res[1540].exec(" !XY      ", 4924));
-assertEquals(null, res[1541].exec(" AXY", 4925));
-assertEquals(null, res[1541].exec(" aXY", 4926));
-assertEquals(null, res[1541].exec(" AbcdeXyz ", 4927));
-assertEquals(null, res[1541].exec(" x{1c5}AbXY", 4928));
-assertEquals(null, res[1541].exec(" abcDEXypqreXlmn ", 4929));
-assertEquals(null, res[1541].exec(" ** Failers", 4930));
-assertEquals(null, res[1541].exec(" x{1bb}XY", 4931));
-assertEquals(null, res[1541].exec(" x{2b0}XY", 4932));
-assertEquals(null, res[1541].exec(" !XY      ", 4933));
-assertEquals(null, res[1542].exec(" !XY", 4934));
-assertEquals(null, res[1542].exec(" x{1bb}XY", 4935));
-assertEquals(null, res[1542].exec(" x{2b0}XY", 4936));
-assertEquals(null, res[1542].exec(" ** Failers", 4937));
-assertEquals(null, res[1542].exec(" x{1c5}XY", 4938));
-assertEquals(null, res[1542].exec(" AXY      ", 4939));
-assertEquals(null, res[1543].exec(" !XY", 4940));
-assertEquals(null, res[1543].exec(" x{1bb}XY", 4941));
-assertEquals(null, res[1543].exec(" x{2b0}XY", 4942));
-assertEquals(null, res[1543].exec(" ** Failers", 4943));
-assertEquals(null, res[1543].exec(" x{1c5}XY", 4944));
-assertEquals(null, res[1543].exec(" AXY      ", 4945));
-assertEquals(null, res[1543].exec("x{c0}x{e0}x{116}x{117}", 4946));
-assertEquals(null, res[1543].exec("x{c0}x{e0}x{116}x{117}", 4947));
-assertEquals(null, res[1545].exec("123abcdefg", 4948));
-assertEquals(null, res[1545].exec("123abc\xc4\xc5zz", 4949));
-assertEquals(null, res[1546].exec("x{102A4}x{AA52}x{A91D}x{1C46}x{10283}x{1092E}x{1C6B}x{A93B}x{A8BF}x{1BA0}x{A50A}====", 4950));
-assertEquals(null, res[1546].exec("x{a77d}x{1d79}", 4951));
-assertEquals(null, res[1546].exec("x{1d79}x{a77d} ", 4952));
-assertEquals(null, res[1546].exec("x{a77d}x{1d79}", 4953));
-assertEquals(null, res[1546].exec("** Failers ", 4954));
-assertEquals(null, res[1546].exec("x{1d79}x{a77d} ", 4955));
+assertNull(res[1330].exec("xabcpqrx", 3954));
+assertNull(res[1330].exec("xxyzx ", 3955));
+assertNull(res[1330].exec("abcabc", 3956));
+assertNull(res[1330].exec("xyzabc ", 3957));
+assertNull(res[1330].exec("** Failers ", 3958));
+assertNull(res[1330].exec("xyzxyz ", 3959));
+assertNull(res[1331].exec("X X\n", 3960));
+assertNull(res[1331].exec("X\x09X\x0b", 3961));
+assertNull(res[1331].exec("** Failers", 3962));
+assertNull(res[1331].exec("\xa0 X\n   ", 3963));
+assertNull(res[1332].exec("\x09 \xa0X\n\x0b\x0c\x0d\n", 3964));
+assertNull(res[1332].exec("\x09 \xa0\n\x0b\x0c\x0d\n", 3965));
+assertNull(res[1332].exec("\x09 \xa0\n\x0b\x0c", 3966));
+assertNull(res[1332].exec("** Failers ", 3967));
+assertNull(res[1332].exec("\x09 \xa0\n\x0b", 3968));
+assertNull(res[1332].exec(" ", 3969));
+assertNull(res[1333].exec("XY  ABCDE", 3970));
+assertNull(res[1333].exec("XY  PQR ST ", 3971));
+assertNull(res[1334].exec("XY  AB    PQRS", 3972));
+assertNull(res[1335].exec(">XNNNYZ", 3973));
+assertNull(res[1335].exec(">  X NYQZ", 3974));
+assertNull(res[1335].exec("** Failers", 3975));
+assertNull(res[1335].exec(">XYZ   ", 3976));
+assertNull(res[1335].exec(">  X NY Z", 3977));
+assertNull(res[1336].exec(">XY\nZ\nA\x0bNN\x0c", 3978));
+assertNull(res[1336].exec(">\n\x0dX\nY\n\x0bZZZ\nAAA\x0bNNN\x0c", 3979));
+assertNull(res[1337].exec("\x0d\nA", 3980));
+assertToStringEquals("\nA", res[1338].exec("\x0d\nA "), 3981);
+assertToStringEquals("\nA", res[1339].exec("\x0d\nA "), 3982);
+assertToStringEquals("\nA,\n", res[1340].exec("\x0d\nA "), 3983);
+assertNull(res[1341].exec("a\x0db", 3984));
+assertNull(res[1341].exec("a\nb", 3985));
+assertNull(res[1341].exec("a\x0d\nb", 3986));
+assertNull(res[1341].exec("** Failers", 3987));
+assertNull(res[1341].exec("a\x85b", 3988));
+assertNull(res[1341].exec("a\x0bb     ", 3989));
+assertNull(res[1342].exec("a\x0db", 3990));
+assertNull(res[1342].exec("a\nb", 3991));
+assertNull(res[1342].exec("a\x0d\nb", 3992));
+assertNull(res[1342].exec("a\x85b", 3993));
+assertNull(res[1342].exec("a\x0bb     ", 3994));
+assertNull(res[1342].exec("** Failers ", 3995));
+assertNull(res[1342].exec("a\x85b<bsr_anycrlf>", 3996));
+assertNull(res[1342].exec("a\x0bb<bsr_anycrlf>", 3997));
+assertNull(res[1343].exec("a\x0db", 3998));
+assertNull(res[1343].exec("a\nb", 3999));
+assertNull(res[1343].exec("a\x0d\nb", 4000));
+assertNull(res[1343].exec("** Failers", 4001));
+assertNull(res[1343].exec("a\x85b", 4002));
+assertNull(res[1343].exec("a\x0bb     ", 4003));
+assertNull(res[1344].exec("a\x0db", 4004));
+assertNull(res[1344].exec("a\nb", 4005));
+assertNull(res[1344].exec("a\x0d\nb", 4006));
+assertNull(res[1344].exec("a\x85b", 4007));
+assertNull(res[1344].exec("a\x0bb     ", 4008));
+assertNull(res[1344].exec("** Failers ", 4009));
+assertNull(res[1344].exec("a\x85b<bsr_anycrlf>", 4010));
+assertNull(res[1344].exec("a\x0bb<bsr_anycrlf>", 4011));
+assertNull(res[1345].exec("a\x0d\n\nb", 4012));
+assertNull(res[1345].exec("a\n\x0d\x0db", 4013));
+assertNull(res[1345].exec("a\x0d\n\x0d\n\x0d\n\x0d\nb", 4014));
+assertNull(res[1345].exec("** Failers", 4015));
+assertNull(res[1345].exec("a\x8585b", 4016));
+assertNull(res[1345].exec("a\x0b\x00bb     ", 4017));
+assertNull(res[1346].exec("a\x0d\x0db", 4018));
+assertNull(res[1346].exec("a\n\n\nb", 4019));
+assertNull(res[1346].exec("a\x0d\n\n\x0d\x0db", 4020));
+assertNull(res[1346].exec("a\x8585b", 4021));
+assertNull(res[1346].exec("a\x0b\x00bb     ", 4022));
+assertNull(res[1346].exec("** Failers ", 4023));
+assertNull(res[1346].exec("a\x0d\x0d\x0d\x0d\x0db ", 4024));
+assertNull(res[1346].exec("a\x8585b<bsr_anycrlf>", 4025));
+assertNull(res[1346].exec("a\x0b\x00bb<bsr_anycrlf>", 4026));
+assertToStringEquals("abc", res[1347].exec("abc "), 4027);
+assertNull(res[1348].exec("** Failers", 4028));
+assertNull(res[1348].exec("ab", 4029));
+assertNull(res[1349].exec("** Failers", 4030));
+assertNull(res[1349].exec("ab ", 4031));
+assertNull(res[1349].exec("** Failers", 4032));
+assertNull(res[1349].exec("ab ", 4033));
+assertToStringEquals("aXb", res[1350].exec("aXb"), 4034);
+assertToStringEquals("a\nb", res[1350].exec("a\nb "), 4035);
+assertNull(res[1350].exec("** Failers", 4036));
+assertNull(res[1350].exec("ab  ", 4037));
+assertToStringEquals("aXb", res[1351].exec("aXb"), 4038);
+assertToStringEquals("a\nX\nXb", res[1351].exec("a\nX\nXb "), 4039);
+assertNull(res[1351].exec("** Failers", 4040));
+assertNull(res[1351].exec("ab  ", 4041));
+assertNull(res[1352].exec("ab", 4042));
+assertNull(res[1352].exec("ax{100}b  ", 4043));
+assertNull(res[1352].exec("ax{100}x{100}b  ", 4044));
+assertNull(res[1352].exec("ax{100}b  ", 4045));
+assertNull(res[1352].exec("ax{100}x{100}b  ", 4046));
+assertNull(res[1352].exec("*** Failers ", 4047));
+assertNull(res[1352].exec("ab", 4048));
+assertNull(res[1352].exec(" ", 4049));
+assertToStringEquals("X", res[1353].exec("Xoanon"), 4050);
+assertToStringEquals("X", res[1353].exec("+Xoanon"), 4051);
+assertToStringEquals("X", res[1353].exec("x{300}Xoanon "), 4052);
+assertNull(res[1353].exec("*** Failers ", 4053));
+assertNull(res[1353].exec("YXoanon  ", 4054));
+assertToStringEquals("X", res[1354].exec("YXoanon"), 4055);
+assertNull(res[1354].exec("*** Failers", 4056));
+assertNull(res[1354].exec("Xoanon", 4057));
+assertNull(res[1354].exec("+Xoanon    ", 4058));
+assertNull(res[1354].exec("x{300}Xoanon ", 4059));
+assertToStringEquals("X", res[1355].exec("X+oanon"), 4060);
+assertNull(res[1355].exec("ZXx{300}oanon ", 4061));
+assertToStringEquals("X", res[1355].exec("FAX "), 4062);
+assertNull(res[1355].exec("*** Failers ", 4063));
+assertNull(res[1355].exec("Xoanon  ", 4064));
+assertToStringEquals("X", res[1356].exec("Xoanon  "), 4065);
+assertNull(res[1356].exec("*** Failers", 4066));
+assertNull(res[1356].exec("X+oanon", 4067));
+assertToStringEquals("X", res[1356].exec("ZXx{300}oanon "), 4068);
+assertNull(res[1356].exec("FAX ", 4069));
+assertToStringEquals("b", res[1357].exec("abcd"), 4070);
+assertToStringEquals("x", res[1357].exec("ax{100}   "), 4071);
+assertToStringEquals("b", res[1357].exec("ab99"), 4072);
+assertToStringEquals("x", res[1357].exec("x{123}x{123}45"), 4073);
+assertToStringEquals("x", res[1357].exec("x{400}x{401}x{402}6  "), 4074);
+assertToStringEquals("*", res[1357].exec("*** Failers"), 4075);
+assertToStringEquals("d", res[1357].exec("d99"), 4076);
+assertToStringEquals("x", res[1357].exec("x{123}x{122}4   "), 4077);
+assertToStringEquals("x", res[1357].exec("x{400}x{403}6  "), 4078);
+assertToStringEquals("x", res[1357].exec("x{400}x{401}x{402}x{402}6  "), 4079);
+assertNull(res[1358].exec("\ufffd]", 4080));
+assertNull(res[1358].exec("\ufffd", 4081));
+assertNull(res[1358].exec("\ufffd\ufffd\ufffd", 4082));
+assertNull(res[1358].exec("\ufffd\ufffd\ufffd?", 4083));
+assertToStringEquals("acb", res[1359].exec("acb"), 4084);
+assertToStringEquals("ab", res[1359].exec("ab"), 4085);
+assertNull(res[1359].exec("ax{100}b ", 4086));
+assertNull(res[1359].exec("*** Failers", 4087));
+assertNull(res[1359].exec("a\nb  ", 4088));
+assertNull(res[1360].exec("ax{4000}xyb ", 4089));
+assertNull(res[1360].exec("ax{4000}yb ", 4090));
+assertNull(res[1360].exec("ax{4000}x{100}yb ", 4091));
+assertNull(res[1360].exec("*** Failers", 4092));
+assertNull(res[1360].exec("ax{4000}b ", 4093));
+assertNull(res[1360].exec("ac\ncb ", 4094));
+assertToStringEquals("a\xc0,,\xc0", res[1361].exec("a\xc0\x88b"), 4095);
+assertToStringEquals("ax,,x", res[1362].exec("ax{100}b"), 4096);
+assertToStringEquals("a\xc0\x88b,\xc0\x88,b", res[1363].exec("a\xc0\x88b"), 4097);
+assertToStringEquals("ax{100}b,x{100},b", res[1364].exec("ax{100}b"), 4098);
+assertToStringEquals("a\xc0\x92,\xc0,\x92", res[1365].exec("a\xc0\x92bcd"), 4099);
+assertToStringEquals("ax{,x,{", res[1366].exec("ax{240}bcd"), 4100);
+assertToStringEquals("a\xc0\x92,\xc0,\x92", res[1367].exec("a\xc0\x92bcd"), 4101);
+assertToStringEquals("ax{,x,{", res[1368].exec("ax{240}bcd"), 4102);
+assertToStringEquals("a\xc0,,\xc0", res[1369].exec("a\xc0\x92bcd"), 4103);
+assertToStringEquals("ax,,x", res[1370].exec("ax{240}bcd"), 4104);
+assertNull(res[1371].exec("ax{1234}xyb ", 4105));
+assertNull(res[1371].exec("ax{1234}x{4321}yb ", 4106));
+assertNull(res[1371].exec("ax{1234}x{4321}x{3412}b ", 4107));
+assertNull(res[1371].exec("*** Failers", 4108));
+assertNull(res[1371].exec("ax{1234}b ", 4109));
+assertNull(res[1371].exec("ac\ncb ", 4110));
+assertToStringEquals("ax{1234}xyb,x{1234}xy", res[1372].exec("ax{1234}xyb "), 4111);
+assertToStringEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[1372].exec("ax{1234}x{4321}yb "), 4112);
+assertToStringEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[1372].exec("ax{1234}x{4321}x{3412}b "), 4113);
+assertToStringEquals("axxxxbcdefghijb,xxxxbcdefghij", res[1372].exec("axxxxbcdefghijb "), 4114);
+assertToStringEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[1372].exec("ax{1234}x{4321}x{3412}x{3421}b "), 4115);
+assertNull(res[1372].exec("*** Failers", 4116));
+assertToStringEquals("ax{1234}b,x{1234}", res[1372].exec("ax{1234}b "), 4117);
+assertToStringEquals("ax{1234}xyb,x{1234}xy", res[1373].exec("ax{1234}xyb "), 4118);
+assertToStringEquals("ax{1234}x{4321}yb,x{1234}x{4321}y", res[1373].exec("ax{1234}x{4321}yb "), 4119);
+assertToStringEquals("ax{1234}x{4321}x{3412}b,x{1234}x{4321}x{3412}", res[1373].exec("ax{1234}x{4321}x{3412}b "), 4120);
+assertToStringEquals("axxxxb,xxxx", res[1373].exec("axxxxbcdefghijb "), 4121);
+assertToStringEquals("ax{1234}x{4321}x{3412}x{3421}b,x{1234}x{4321}x{3412}x{3421}", res[1373].exec("ax{1234}x{4321}x{3412}x{3421}b "), 4122);
+assertNull(res[1373].exec("*** Failers", 4123));
+assertToStringEquals("ax{1234}b,x{1234}", res[1373].exec("ax{1234}b "), 4124);
+assertNull(res[1374].exec("ax{1234}xyb ", 4125));
+assertNull(res[1374].exec("ax{1234}x{4321}yb ", 4126));
+assertNull(res[1374].exec("ax{1234}x{4321}x{3412}b ", 4127));
+assertToStringEquals("axxxxb,xxxx", res[1374].exec("axxxxbcdefghijb "), 4128);
+assertNull(res[1374].exec("ax{1234}x{4321}x{3412}x{3421}b ", 4129));
+assertToStringEquals("axbxxb,xbxx", res[1374].exec("axbxxbcdefghijb "), 4130);
+assertToStringEquals("axxxxxb,xxxxx", res[1374].exec("axxxxxbcdefghijb "), 4131);
+assertNull(res[1374].exec("*** Failers", 4132));
+assertNull(res[1374].exec("ax{1234}b ", 4133));
+assertNull(res[1374].exec("axxxxxxbcdefghijb ", 4134));
+assertNull(res[1375].exec("ax{1234}xyb ", 4135));
+assertNull(res[1375].exec("ax{1234}x{4321}yb ", 4136));
+assertNull(res[1375].exec("ax{1234}x{4321}x{3412}b ", 4137));
+assertToStringEquals("axxxxb,xxxx", res[1375].exec("axxxxbcdefghijb "), 4138);
+assertNull(res[1375].exec("ax{1234}x{4321}x{3412}x{3421}b ", 4139));
+assertToStringEquals("axbxxb,xbxx", res[1375].exec("axbxxbcdefghijb "), 4140);
+assertToStringEquals("axxxxxb,xxxxx", res[1375].exec("axxxxxbcdefghijb "), 4141);
+assertNull(res[1375].exec("*** Failers", 4142));
+assertNull(res[1375].exec("ax{1234}b ", 4143));
+assertNull(res[1375].exec("axxxxxxbcdefghijb ", 4144));
+assertNull(res[1375].exec("*** Failers", 4145));
+assertNull(res[1375].exec("x{100}", 4146));
+assertNull(res[1375].exec("aXbcd", 4147));
+assertNull(res[1375].exec("ax{100}bcd", 4148));
+assertNull(res[1375].exec("ax{100000}bcd", 4149));
+assertNull(res[1375].exec("x{100}x{100}x{100}b", 4150));
+assertNull(res[1375].exec("*** Failers ", 4151));
+assertNull(res[1375].exec("x{100}x{100}b", 4152));
+assertNull(res[1375].exec("x{ab} ", 4153));
+assertNull(res[1375].exec("\xc2\xab", 4154));
+assertNull(res[1375].exec("*** Failers ", 4155));
+assertNull(res[1375].exec("\x00{ab}", 4156));
+assertNull(res[1375].exec("WXYZ", 4157));
+assertNull(res[1375].exec("x{256}XYZ ", 4158));
+assertNull(res[1375].exec("*** Failers", 4159));
+assertNull(res[1375].exec("XYZ ", 4160));
+assertToStringEquals("bcd", res[1376].exec("bcd"), 4161);
+assertToStringEquals("00}", res[1376].exec("x{100}aYx{256}Z "), 4162);
+assertToStringEquals("x{", res[1377].exec("x{100}bc"), 4163);
+assertToStringEquals("x{100}bcA", res[1378].exec("x{100}bcAa"), 4164);
+assertToStringEquals("x{", res[1379].exec("x{100}bca"), 4165);
+assertToStringEquals("bcd", res[1380].exec("bcd"), 4166);
+assertToStringEquals("00}", res[1380].exec("x{100}aYx{256}Z "), 4167);
+assertToStringEquals("x{", res[1381].exec("x{100}bc"), 4168);
+assertToStringEquals("x{100}bc", res[1382].exec("x{100}bcAa"), 4169);
+assertToStringEquals("x{", res[1383].exec("x{100}bca"), 4170);
+assertNull(res[1383].exec("abcd", 4171));
+assertNull(res[1383].exec("abcd", 4172));
+assertToStringEquals("x{", res[1383].exec("x{100}x{100} "), 4173);
+assertToStringEquals("x{", res[1383].exec("x{100}x{100} "), 4174);
+assertToStringEquals("x{", res[1383].exec("x{100}x{100}x{100}x{100} "), 4175);
+assertNull(res[1383].exec("abce", 4176));
+assertToStringEquals("x{", res[1383].exec("x{100}x{100}x{100}x{100} "), 4177);
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4178));
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4179));
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}x{100} ", 4180));
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}XX", 4181));
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 4182));
+assertNull(res[1383].exec("abcdx{100}x{100}x{100}x{100}x{100}x{100}x{100}XX", 4183));
+assertToStringEquals("Xy", res[1383].exec("Xyyyax{100}x{100}bXzzz"), 4184);
+assertToStringEquals("X", res[1386].exec("1X2"), 4185);
+assertToStringEquals("x", res[1386].exec("1x{100}2 "), 4186);
+assertToStringEquals(">X", res[1387].exec("> >X Y"), 4187);
+assertToStringEquals(">x", res[1387].exec("> >x{100} Y"), 4188);
+assertToStringEquals("1", res[1388].exec("x{100}3"), 4189);
+assertToStringEquals(" ", res[1389].exec("x{100} X"), 4190);
+assertToStringEquals("abcd", res[1390].exec("12abcd34"), 4191);
+assertToStringEquals("*** Failers", res[1390].exec("*** Failers"), 4192);
+assertToStringEquals("  ", res[1390].exec("1234  "), 4193);
+assertToStringEquals("abc", res[1391].exec("12abcd34"), 4194);
+assertToStringEquals("ab", res[1391].exec("12ab34"), 4195);
+assertToStringEquals("***", res[1391].exec("*** Failers  "), 4196);
+assertNull(res[1391].exec("1234", 4197));
+assertToStringEquals("  ", res[1391].exec("12a34  "), 4198);
+assertToStringEquals("ab", res[1392].exec("12abcd34"), 4199);
+assertToStringEquals("ab", res[1392].exec("12ab34"), 4200);
+assertToStringEquals("**", res[1392].exec("*** Failers  "), 4201);
+assertNull(res[1392].exec("1234", 4202));
+assertToStringEquals("  ", res[1392].exec("12a34  "), 4203);
+assertToStringEquals("12", res[1393].exec("12abcd34"), 4204);
+assertNull(res[1393].exec("*** Failers", 4205));
+assertToStringEquals("12", res[1394].exec("12abcd34"), 4206);
+assertToStringEquals("123", res[1394].exec("1234abcd"), 4207);
+assertNull(res[1394].exec("*** Failers  ", 4208));
+assertNull(res[1394].exec("1.4 ", 4209));
+assertToStringEquals("12", res[1395].exec("12abcd34"), 4210);
+assertToStringEquals("12", res[1395].exec("1234abcd"), 4211);
+assertNull(res[1395].exec("*** Failers  ", 4212));
+assertNull(res[1395].exec("1.4 ", 4213));
+assertToStringEquals("12abcd34", res[1396].exec("12abcd34"), 4214);
+assertToStringEquals("***", res[1396].exec("*** Failers"), 4215);
+assertNull(res[1396].exec("     ", 4216));
+assertToStringEquals("12a", res[1397].exec("12abcd34"), 4217);
+assertToStringEquals("123", res[1397].exec("1234abcd"), 4218);
+assertToStringEquals("***", res[1397].exec("*** Failers"), 4219);
+assertNull(res[1397].exec("       ", 4220));
+assertToStringEquals("12", res[1398].exec("12abcd34"), 4221);
+assertToStringEquals("12", res[1398].exec("1234abcd"), 4222);
+assertToStringEquals("**", res[1398].exec("*** Failers"), 4223);
+assertNull(res[1398].exec("       ", 4224));
+assertToStringEquals(">      <", res[1399].exec("12>      <34"), 4225);
+assertNull(res[1399].exec("*** Failers", 4226));
+assertToStringEquals(">  <", res[1400].exec("ab>  <cd"), 4227);
+assertToStringEquals(">   <", res[1400].exec("ab>   <ce"), 4228);
+assertNull(res[1400].exec("*** Failers", 4229));
+assertNull(res[1400].exec("ab>    <cd ", 4230));
+assertToStringEquals(">  <", res[1401].exec("ab>  <cd"), 4231);
+assertToStringEquals(">   <", res[1401].exec("ab>   <ce"), 4232);
+assertNull(res[1401].exec("*** Failers", 4233));
+assertNull(res[1401].exec("ab>    <cd ", 4234));
+assertToStringEquals("12", res[1402].exec("12      34"), 4235);
+assertToStringEquals("Failers", res[1402].exec("*** Failers"), 4236);
+assertNull(res[1402].exec("+++=*! ", 4237));
+assertToStringEquals("ab", res[1403].exec("ab  cd"), 4238);
+assertToStringEquals("abc", res[1403].exec("abcd ce"), 4239);
+assertToStringEquals("Fai", res[1403].exec("*** Failers"), 4240);
+assertNull(res[1403].exec("a.b.c", 4241));
+assertToStringEquals("ab", res[1404].exec("ab  cd"), 4242);
+assertToStringEquals("ab", res[1404].exec("abcd ce"), 4243);
+assertToStringEquals("Fa", res[1404].exec("*** Failers"), 4244);
+assertNull(res[1404].exec("a.b.c", 4245));
+assertToStringEquals("====", res[1405].exec("12====34"), 4246);
+assertToStringEquals("*** ", res[1405].exec("*** Failers"), 4247);
+assertToStringEquals(" ", res[1405].exec("abcd "), 4248);
+assertToStringEquals("===", res[1406].exec("ab====cd"), 4249);
+assertToStringEquals("==", res[1406].exec("ab==cd"), 4250);
+assertToStringEquals("***", res[1406].exec("*** Failers"), 4251);
+assertNull(res[1406].exec("a.b.c", 4252));
+assertToStringEquals("==", res[1407].exec("ab====cd"), 4253);
+assertToStringEquals("==", res[1407].exec("ab==cd"), 4254);
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4255);
+assertNull(res[1407].exec("a.b.c", 4256));
+assertNull(res[1407].exec("x{100}", 4257));
+assertNull(res[1407].exec("Zx{100}", 4258));
+assertNull(res[1407].exec("x{100}Z", 4259));
+assertToStringEquals("**", res[1407].exec("*** Failers "), 4260);
+assertNull(res[1407].exec("Zx{100}", 4261));
+assertNull(res[1407].exec("x{100}", 4262));
+assertNull(res[1407].exec("x{100}Z", 4263));
+assertToStringEquals("**", res[1407].exec("*** Failers "), 4264);
+assertNull(res[1407].exec("abcx{200}X", 4265));
+assertNull(res[1407].exec("abcx{100}X ", 4266));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4267);
+assertToStringEquals("  ", res[1407].exec("X  "), 4268);
+assertNull(res[1407].exec("abcx{200}X", 4269));
+assertNull(res[1407].exec("abcx{100}X ", 4270));
+assertNull(res[1407].exec("abQX ", 4271));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4272);
+assertToStringEquals("  ", res[1407].exec("X  "), 4273);
+assertNull(res[1407].exec("abcx{100}x{200}x{100}X", 4274));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4275);
+assertNull(res[1407].exec("abcx{200}X", 4276));
+assertToStringEquals("  ", res[1407].exec("X  "), 4277);
+assertNull(res[1407].exec("AX", 4278));
+assertNull(res[1407].exec("x{150}X", 4279));
+assertNull(res[1407].exec("x{500}X ", 4280));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4281);
+assertNull(res[1407].exec("x{100}X", 4282));
+assertToStringEquals("  ", res[1407].exec("x{200}X   "), 4283);
+assertNull(res[1407].exec("AX", 4284));
+assertNull(res[1407].exec("x{150}X", 4285));
+assertNull(res[1407].exec("x{500}X ", 4286));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4287);
+assertNull(res[1407].exec("x{100}X", 4288));
+assertToStringEquals("  ", res[1407].exec("x{200}X   "), 4289);
+assertNull(res[1407].exec("QX ", 4290));
+assertNull(res[1407].exec("AX", 4291));
+assertNull(res[1407].exec("x{500}X ", 4292));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4293);
+assertNull(res[1407].exec("x{100}X", 4294));
+assertNull(res[1407].exec("x{150}X", 4295));
+assertToStringEquals("  ", res[1407].exec("x{200}X   "), 4296);
+assertNull(res[1407].exec("z", 4297));
+assertNull(res[1407].exec("Z ", 4298));
+assertNull(res[1407].exec("x{100}", 4299));
+assertToStringEquals("**", res[1407].exec("*** Failers"), 4300);
+assertNull(res[1407].exec("x{102}", 4301));
+assertToStringEquals("  ", res[1407].exec("y    "), 4302);
+assertToStringEquals("\xff", res[1408].exec(">\xff<"), 4303);
+assertNull(res[1409].exec(">x{ff}<", 4304));
+assertToStringEquals("X", res[1410].exec("XYZ"), 4305);
+assertToStringEquals("X", res[1411].exec("XYZ"), 4306);
+assertToStringEquals("x", res[1411].exec("x{123} "), 4307);
+assertToStringEquals(",", res[1416].exec("catac"), 4308);
+assertToStringEquals(",", res[1416].exec("ax{256}a "), 4309);
+assertToStringEquals(",", res[1416].exec("x{85}"), 4310);
+assertToStringEquals("abc1", res[1417].exec("abc1 \nabc2 \x0babc3xx \x0cabc4 \x0dabc5xx \x0d\nabc6 x{0085}abc7 x{2028}abc8 x{2029}abc9 JUNK"), 4311);
+assertToStringEquals("abc1", res[1418].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d abc5\x0d\n abc6x{0085} abc7x{2028} abc8x{2029} abc9"), 4312);
+assertNull(res[1419].exec("a\nb", 4313));
+assertNull(res[1419].exec("a\x0db", 4314));
+assertNull(res[1419].exec("a\x0d\nb", 4315));
+assertNull(res[1419].exec("a\x0bb", 4316));
+assertNull(res[1419].exec("a\x0cb", 4317));
+assertNull(res[1419].exec("ax{85}b   ", 4318));
+assertNull(res[1419].exec("ax{2028}b ", 4319));
+assertNull(res[1419].exec("ax{2029}b ", 4320));
+assertNull(res[1419].exec("** Failers", 4321));
+assertNull(res[1419].exec("a\n\x0db    ", 4322));
+assertToStringEquals("ab", res[1420].exec("ab"), 4323);
+assertNull(res[1420].exec("a\nb", 4324));
+assertNull(res[1420].exec("a\x0db", 4325));
+assertNull(res[1420].exec("a\x0d\nb", 4326));
+assertNull(res[1420].exec("a\x0bb", 4327));
+assertNull(res[1420].exec("a\x0cx{2028}x{2029}b", 4328));
+assertNull(res[1420].exec("ax{85}b   ", 4329));
+assertNull(res[1420].exec("a\n\x0db    ", 4330));
+assertNull(res[1420].exec("a\n\x0dx{85}\x0cb ", 4331));
+assertNull(res[1421].exec("a\nb", 4332));
+assertNull(res[1421].exec("a\x0db", 4333));
+assertNull(res[1421].exec("a\x0d\nb", 4334));
+assertNull(res[1421].exec("a\x0bb", 4335));
+assertNull(res[1421].exec("a\x0cx{2028}x{2029}b", 4336));
+assertNull(res[1421].exec("ax{85}b   ", 4337));
+assertNull(res[1421].exec("a\n\x0db    ", 4338));
+assertNull(res[1421].exec("a\n\x0dx{85}\x0cb ", 4339));
+assertNull(res[1421].exec("** Failers", 4340));
+assertNull(res[1421].exec("ab  ", 4341));
+assertNull(res[1422].exec("a\nb", 4342));
+assertNull(res[1422].exec("a\n\x0db", 4343));
+assertNull(res[1422].exec("a\n\x0dx{85}b", 4344));
+assertNull(res[1422].exec("a\x0d\n\x0d\nb ", 4345));
+assertNull(res[1422].exec("a\x0d\n\x0d\n\x0d\nb ", 4346));
+assertNull(res[1422].exec("a\n\x0d\n\x0db", 4347));
+assertNull(res[1422].exec("a\n\n\x0d\nb ", 4348));
+assertNull(res[1422].exec("** Failers", 4349));
+assertNull(res[1422].exec("a\n\n\n\x0db", 4350));
+assertNull(res[1422].exec("a\x0d", 4351));
+assertNull(res[1423].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 4352));
+assertNull(res[1424].exec(" x{a0}X\n\x0b\x0c\x0d\n", 4353));
+assertNull(res[1425].exec(">\x09 x{a0}X\n\n\n<", 4354));
+assertNull(res[1426].exec(">\x09 x{a0}X\n\n\n<", 4355));
+assertNull(res[1427].exec("X X\n", 4356));
+assertNull(res[1427].exec("X\x09X\x0b", 4357));
+assertNull(res[1427].exec("** Failers", 4358));
+assertNull(res[1427].exec("x{a0} X\n   ", 4359));
+assertNull(res[1428].exec("\x09 x{a0}X\n\x0b\x0c\x0d\n", 4360));
+assertNull(res[1428].exec("\x09 x{a0}\n\x0b\x0c\x0d\n", 4361));
+assertNull(res[1428].exec("\x09 x{a0}\n\x0b\x0c", 4362));
+assertNull(res[1428].exec("** Failers ", 4363));
+assertNull(res[1428].exec("\x09 x{a0}\n\x0b", 4364));
+assertNull(res[1428].exec(" ", 4365));
+assertNull(res[1429].exec("x{3001}x{3000}x{2030}x{2028}", 4366));
+assertNull(res[1429].exec("Xx{180e}Xx{85}", 4367));
+assertNull(res[1429].exec("** Failers", 4368));
+assertNull(res[1429].exec("x{2009} X\n   ", 4369));
+assertNull(res[1430].exec("x{1680}x{180e}x{2007}Xx{2028}x{2029}\x0c\x0d\n", 4370));
+assertNull(res[1430].exec("\x09x{205f}x{a0}\nx{2029}\x0cx{2028}\n", 4371));
+assertNull(res[1430].exec("\x09 x{202f}\n\x0b\x0c", 4372));
+assertNull(res[1430].exec("** Failers ", 4373));
+assertNull(res[1430].exec("\x09x{200a}x{a0}x{2028}\x0b", 4374));
+assertNull(res[1430].exec(" ", 4375));
+assertNull(res[1431].exec("a\x0db", 4376));
+assertNull(res[1431].exec("a\nb", 4377));
+assertNull(res[1431].exec("a\x0d\nb", 4378));
+assertNull(res[1431].exec("** Failers", 4379));
+assertNull(res[1431].exec("ax{85}b", 4380));
+assertNull(res[1431].exec("a\x0bb     ", 4381));
+assertNull(res[1432].exec("a\x0db", 4382));
+assertNull(res[1432].exec("a\nb", 4383));
+assertNull(res[1432].exec("a\x0d\nb", 4384));
+assertNull(res[1432].exec("ax{85}b", 4385));
+assertNull(res[1432].exec("a\x0bb     ", 4386));
+assertNull(res[1432].exec("** Failers ", 4387));
+assertNull(res[1432].exec("ax{85}b<bsr_anycrlf>", 4388));
+assertNull(res[1432].exec("a\x0bb<bsr_anycrlf>", 4389));
+assertNull(res[1433].exec("a\x0db", 4390));
+assertNull(res[1433].exec("a\nb", 4391));
+assertNull(res[1433].exec("a\x0d\nb", 4392));
+assertNull(res[1433].exec("** Failers", 4393));
+assertNull(res[1433].exec("ax{85}b", 4394));
+assertNull(res[1433].exec("a\x0bb     ", 4395));
+assertNull(res[1434].exec("a\x0db", 4396));
+assertNull(res[1434].exec("a\nb", 4397));
+assertNull(res[1434].exec("a\x0d\nb", 4398));
+assertNull(res[1434].exec("ax{85}b", 4399));
+assertNull(res[1434].exec("a\x0bb     ", 4400));
+assertNull(res[1434].exec("** Failers ", 4401));
+assertNull(res[1434].exec("ax{85}b<bsr_anycrlf>", 4402));
+assertNull(res[1434].exec("a\x0bb<bsr_anycrlf>", 4403));
+assertToStringEquals("X", res[1435].exec("Ax{1ec5}ABCXYZ"), 4404);
+assertNull(res[1437].exec("AB", 4405));
+assertNull(res[1437].exec("*** Failers", 4406));
+assertNull(res[1437].exec("A0", 4407));
+assertNull(res[1437].exec("00   ", 4408));
+assertNull(res[1438].exec("AB", 4409));
+assertNull(res[1438].exec("Ax{300}BC ", 4410));
+assertNull(res[1438].exec("Ax{300}x{301}x{302}BC ", 4411));
+assertNull(res[1438].exec("*** Failers", 4412));
+assertNull(res[1438].exec("x{300}  ", 4413));
+assertNull(res[1439].exec("ABC", 4414));
+assertNull(res[1439].exec("Ax{300}Bx{300}x{301}C ", 4415));
+assertNull(res[1439].exec("Ax{300}x{301}x{302}BC ", 4416));
+assertNull(res[1439].exec("*** Failers", 4417));
+assertNull(res[1439].exec("x{300}  ", 4418));
+assertNull(res[1440].exec("abcd", 4419));
+assertNull(res[1440].exec("a ", 4420));
+assertNull(res[1440].exec("*** Failers ", 4421));
+assertNull(res[1441].exec("1234", 4422));
+assertNull(res[1441].exec("= ", 4423));
+assertNull(res[1441].exec("*** Failers ", 4424));
+assertNull(res[1441].exec("abcd ", 4425));
+assertNull(res[1442].exec("abcdAx{300}x{301}x{302}", 4426));
+assertNull(res[1442].exec("Ax{300}x{301}x{302}", 4427));
+assertNull(res[1442].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}", 4428));
+assertNull(res[1442].exec("a ", 4429));
+assertNull(res[1442].exec("*** Failers ", 4430));
+assertNull(res[1442].exec("x{300}x{301}x{302}", 4431));
+assertToStringEquals("abc", res[1443].exec("abc"), 4432);
+assertToStringEquals("abc", res[1443].exec("Ax{300}abc"), 4433);
+assertToStringEquals("abc", res[1443].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz"), 4434);
+assertToStringEquals("abc", res[1443].exec("x{300}abc  "), 4435);
+assertNull(res[1443].exec("*** Failers", 4436));
+assertToStringEquals("abc", res[1444].exec("abc"), 4437);
+assertNull(res[1444].exec("Ax{300}abc", 4438));
+assertNull(res[1444].exec("*** Failers", 4439));
+assertNull(res[1444].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz", 4440));
+assertNull(res[1444].exec("x{300}abc  ", 4441));
+assertToStringEquals("abc", res[1445].exec("abc"), 4442);
+assertToStringEquals("abc", res[1445].exec("Ax{300}abc"), 4443);
+assertToStringEquals("abc", res[1445].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz"), 4444);
+assertToStringEquals("abc", res[1445].exec("x{300}abc  "), 4445);
+assertNull(res[1445].exec("*** Failers", 4446));
+assertToStringEquals("abc", res[1446].exec("abc"), 4447);
+assertNull(res[1446].exec("Ax{300}abc", 4448));
+assertNull(res[1446].exec("Ax{300}x{301}x{302}Ax{300}Ax{300}Ax{300}abcxyz", 4449));
+assertNull(res[1446].exec("*** Failers", 4450));
+assertNull(res[1446].exec("x{300}abc  ", 4451));
+assertNull(res[1447].exec("A=b", 4452));
+assertNull(res[1447].exec("=c ", 4453));
+assertNull(res[1447].exec("*** Failers", 4454));
+assertNull(res[1447].exec("1=2 ", 4455));
+assertNull(res[1447].exec("AAAA=b  ", 4456));
+assertNull(res[1448].exec("AAAA=b", 4457));
+assertNull(res[1448].exec("=c ", 4458));
+assertNull(res[1448].exec("*** Failers", 4459));
+assertNull(res[1448].exec("1=2  ", 4460));
+assertNull(res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}X", 4461));
+assertNull(res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}X ", 4462));
+assertNull(res[1449].exec("*** Failers", 4463));
+assertNull(res[1449].exec("X", 4464));
+assertNull(res[1449].exec("Ax{300}x{301}x{302}X", 4465));
+assertNull(res[1449].exec("Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}Ax{300}x{301}x{302}X", 4466));
+assertNull(res[1450].exec("x{c0}x{30f}x{660}x{66c}x{f01}x{1680}<", 4467));
+assertNull(res[1450].exec("\npx{300}9!$ < ", 4468));
+assertNull(res[1450].exec("** Failers ", 4469));
+assertNull(res[1450].exec("apx{300}9!$ < ", 4470));
+assertNull(res[1451].exec("X", 4471));
+assertNull(res[1451].exec("** Failers ", 4472));
+assertNull(res[1451].exec("", 4473));
+assertNull(res[1452].exec("9", 4474));
+assertNull(res[1452].exec("** Failers ", 4475));
+assertNull(res[1452].exec("x{c0}", 4476));
+assertNull(res[1453].exec("X", 4477));
+assertNull(res[1453].exec("** Failers ", 4478));
+assertNull(res[1453].exec("x{30f}", 4479));
+assertNull(res[1454].exec("X", 4480));
+assertNull(res[1454].exec("** Failers ", 4481));
+assertNull(res[1454].exec("x{660}", 4482));
+assertNull(res[1455].exec("X", 4483));
+assertNull(res[1455].exec("** Failers ", 4484));
+assertNull(res[1455].exec("x{66c}", 4485));
+assertNull(res[1456].exec("X", 4486));
+assertNull(res[1456].exec("** Failers ", 4487));
+assertNull(res[1456].exec("x{f01}", 4488));
+assertNull(res[1457].exec("X", 4489));
+assertNull(res[1457].exec("** Failers ", 4490));
+assertNull(res[1457].exec("x{1680}", 4491));
+assertNull(res[1458].exec("x{017}", 4492));
+assertNull(res[1458].exec("x{09f} ", 4493));
+assertNull(res[1458].exec("** Failers", 4494));
+assertNull(res[1458].exec("x{0600} ", 4495));
+assertNull(res[1459].exec("x{601}", 4496));
+assertNull(res[1459].exec("** Failers", 4497));
+assertNull(res[1459].exec("x{09f} ", 4498));
+assertNull(res[1460].exec("** Failers", 4499));
+assertNull(res[1460].exec("x{09f} ", 4500));
+assertNull(res[1461].exec("x{f8ff}", 4501));
+assertNull(res[1461].exec("** Failers", 4502));
+assertNull(res[1461].exec("x{09f} ", 4503));
+assertNull(res[1462].exec("?x{dfff}", 4504));
+assertNull(res[1462].exec("** Failers", 4505));
+assertNull(res[1462].exec("x{09f} ", 4506));
+assertNull(res[1463].exec("a", 4507));
+assertNull(res[1463].exec("** Failers ", 4508));
+assertNull(res[1463].exec("Z", 4509));
+assertNull(res[1463].exec("x{e000}  ", 4510));
+assertNull(res[1464].exec("x{2b0}", 4511));
+assertNull(res[1464].exec("** Failers", 4512));
+assertNull(res[1464].exec("a ", 4513));
+assertNull(res[1465].exec("x{1bb}", 4514));
+assertNull(res[1465].exec("** Failers", 4515));
+assertNull(res[1465].exec("a ", 4516));
+assertNull(res[1465].exec("x{2b0}", 4517));
+assertNull(res[1466].exec("x{1c5}", 4518));
+assertNull(res[1466].exec("** Failers", 4519));
+assertNull(res[1466].exec("a ", 4520));
+assertNull(res[1466].exec("x{2b0}", 4521));
+assertNull(res[1467].exec("A", 4522));
+assertNull(res[1467].exec("** Failers", 4523));
+assertNull(res[1467].exec("x{2b0}", 4524));
+assertNull(res[1468].exec("x{903}", 4525));
+assertNull(res[1468].exec("** Failers", 4526));
+assertNull(res[1468].exec("X", 4527));
+assertNull(res[1468].exec("x{300}", 4528));
+assertNull(res[1468].exec("   ", 4529));
+assertNull(res[1469].exec("x{488}", 4530));
+assertNull(res[1469].exec("** Failers", 4531));
+assertNull(res[1469].exec("X", 4532));
+assertNull(res[1469].exec("x{903}", 4533));
+assertNull(res[1469].exec("x{300}", 4534));
+assertNull(res[1470].exec("x{300}", 4535));
+assertNull(res[1470].exec("** Failers", 4536));
+assertNull(res[1470].exec("X", 4537));
+assertNull(res[1470].exec("x{903}", 4538));
+assertNull(res[1470].exec("0123456789x{660}x{661}x{662}x{663}x{664}x{665}x{666}x{667}x{668}x{669}x{66a}", 4539));
+assertNull(res[1470].exec("x{6f0}x{6f1}x{6f2}x{6f3}x{6f4}x{6f5}x{6f6}x{6f7}x{6f8}x{6f9}x{6fa}", 4540));
+assertNull(res[1470].exec("x{966}x{967}x{968}x{969}x{96a}x{96b}x{96c}x{96d}x{96e}x{96f}x{970}", 4541));
+assertNull(res[1470].exec("** Failers", 4542));
+assertNull(res[1470].exec("X", 4543));
+assertNull(res[1471].exec("x{16ee}", 4544));
+assertNull(res[1471].exec("** Failers", 4545));
+assertNull(res[1471].exec("X", 4546));
+assertNull(res[1471].exec("x{966}", 4547));
+assertNull(res[1472].exec("x{b2}", 4548));
+assertNull(res[1472].exec("x{b3}", 4549));
+assertNull(res[1472].exec("** Failers", 4550));
+assertNull(res[1472].exec("X", 4551));
+assertNull(res[1472].exec("x{16ee}", 4552));
+assertNull(res[1473].exec("_", 4553));
+assertNull(res[1473].exec("x{203f}", 4554));
+assertNull(res[1473].exec("** Failers", 4555));
+assertNull(res[1473].exec("X", 4556));
+assertNull(res[1473].exec("-", 4557));
+assertNull(res[1473].exec("x{58a}", 4558));
+assertNull(res[1474].exec("-", 4559));
+assertNull(res[1474].exec("x{58a}", 4560));
+assertNull(res[1474].exec("** Failers", 4561));
+assertNull(res[1474].exec("X", 4562));
+assertNull(res[1474].exec("x{203f}", 4563));
+assertNull(res[1475].exec(")", 4564));
+assertNull(res[1475].exec("]", 4565));
+assertNull(res[1475].exec("}", 4566));
+assertNull(res[1475].exec("x{f3b}", 4567));
+assertNull(res[1475].exec("** Failers", 4568));
+assertNull(res[1475].exec("X", 4569));
+assertNull(res[1475].exec("x{203f}", 4570));
+assertNull(res[1475].exec("(", 4571));
+assertNull(res[1475].exec("[", 4572));
+assertNull(res[1475].exec("{", 4573));
+assertNull(res[1475].exec("x{f3c}", 4574));
+assertNull(res[1476].exec("x{bb}", 4575));
+assertNull(res[1476].exec("x{2019}", 4576));
+assertNull(res[1476].exec("** Failers", 4577));
+assertNull(res[1476].exec("X", 4578));
+assertNull(res[1476].exec("x{203f}", 4579));
+assertNull(res[1477].exec("x{ab}", 4580));
+assertNull(res[1477].exec("x{2018}", 4581));
+assertNull(res[1477].exec("** Failers", 4582));
+assertNull(res[1477].exec("X", 4583));
+assertNull(res[1477].exec("x{203f}", 4584));
+assertNull(res[1478].exec("!", 4585));
+assertNull(res[1478].exec("x{37e}", 4586));
+assertNull(res[1478].exec("** Failers", 4587));
+assertNull(res[1478].exec("X", 4588));
+assertNull(res[1478].exec("x{203f}", 4589));
+assertNull(res[1479].exec("(", 4590));
+assertNull(res[1479].exec("[", 4591));
+assertNull(res[1479].exec("{", 4592));
+assertNull(res[1479].exec("x{f3c}", 4593));
+assertNull(res[1479].exec("** Failers", 4594));
+assertNull(res[1479].exec("X", 4595));
+assertNull(res[1479].exec(")", 4596));
+assertNull(res[1479].exec("]", 4597));
+assertNull(res[1479].exec("}", 4598));
+assertNull(res[1479].exec("x{f3b}", 4599));
+assertNull(res[1479].exec("$x{a2}x{a3}x{a4}x{a5}x{a6}", 4600));
+assertNull(res[1479].exec("x{9f2}", 4601));
+assertNull(res[1479].exec("** Failers", 4602));
+assertNull(res[1479].exec("X", 4603));
+assertNull(res[1479].exec("x{2c2}", 4604));
+assertNull(res[1480].exec("x{2c2}", 4605));
+assertNull(res[1480].exec("** Failers", 4606));
+assertNull(res[1480].exec("X", 4607));
+assertNull(res[1480].exec("x{9f2}", 4608));
+assertNull(res[1480].exec("+<|~x{ac}x{2044}", 4609));
+assertNull(res[1480].exec("** Failers", 4610));
+assertNull(res[1480].exec("X", 4611));
+assertNull(res[1480].exec("x{9f2}", 4612));
+assertNull(res[1481].exec("x{a6}", 4613));
+assertNull(res[1481].exec("x{482} ", 4614));
+assertNull(res[1481].exec("** Failers", 4615));
+assertNull(res[1481].exec("X", 4616));
+assertNull(res[1481].exec("x{9f2}", 4617));
+assertNull(res[1482].exec("x{2028}", 4618));
+assertNull(res[1482].exec("** Failers", 4619));
+assertNull(res[1482].exec("X", 4620));
+assertNull(res[1482].exec("x{2029}", 4621));
+assertNull(res[1483].exec("x{2029}", 4622));
+assertNull(res[1483].exec("** Failers", 4623));
+assertNull(res[1483].exec("X", 4624));
+assertNull(res[1483].exec("x{2028}", 4625));
+assertNull(res[1484].exec("\\ \\", 4626));
+assertNull(res[1484].exec("x{a0}", 4627));
+assertNull(res[1484].exec("x{1680}", 4628));
+assertNull(res[1484].exec("x{180e}", 4629));
+assertNull(res[1484].exec("x{2000}", 4630));
+assertNull(res[1484].exec("x{2001}     ", 4631));
+assertNull(res[1484].exec("** Failers", 4632));
+assertNull(res[1484].exec("x{2028}", 4633));
+assertNull(res[1484].exec("x{200d} ", 4634));
+assertNull(res[1484].exec("  x{660}x{661}x{662}ABC", 4635));
+assertNull(res[1484].exec("  x{660}x{661}x{662}ABC", 4636));
+assertNull(res[1485].exec("  x{660}x{661}x{662}ABC", 4637));
+assertNull(res[1486].exec("  x{660}x{661}x{662}ABC", 4638));
+assertNull(res[1487].exec("  x{660}x{661}x{662}ABC", 4639));
+assertNull(res[1488].exec("  x{660}x{661}x{662}ABC", 4640));
+assertNull(res[1489].exec("  x{660}x{661}x{662}ABC", 4641));
+assertNull(res[1490].exec("  x{660}x{661}x{662}ABC", 4642));
+assertNull(res[1491].exec("  x{660}x{661}x{662}ABC", 4643));
+assertNull(res[1492].exec("  x{660}x{661}x{662}ABC", 4644));
+assertNull(res[1493].exec("  x{660}x{661}x{662}ABC", 4645));
+assertNull(res[1493].exec("  x{660}x{661}x{662}ABC", 4646));
+assertNull(res[1493].exec("  x{660}x{661}x{662}ABC", 4647));
+assertNull(res[1493].exec("  ** Failers", 4648));
+assertNull(res[1493].exec("  x{660}x{661}x{662}ABC", 4649));
+assertNull(res[1494].exec("A", 4650));
+assertNull(res[1494].exec("ax{10a0}B ", 4651));
+assertNull(res[1494].exec("** Failers ", 4652));
+assertNull(res[1494].exec("a", 4653));
+assertNull(res[1494].exec("x{1d00}  ", 4654));
+assertNull(res[1495].exec("1234", 4655));
+assertNull(res[1495].exec("** Failers", 4656));
+assertNull(res[1495].exec("ABC ", 4657));
+assertNull(res[1496].exec("1234", 4658));
+assertNull(res[1496].exec("** Failers", 4659));
+assertNull(res[1496].exec("ABC ", 4660));
+assertNull(res[1496].exec("A2XYZ", 4661));
+assertNull(res[1496].exec("123A5XYZPQR", 4662));
+assertNull(res[1496].exec("ABAx{660}XYZpqr", 4663));
+assertNull(res[1496].exec("** Failers", 4664));
+assertNull(res[1496].exec("AXYZ", 4665));
+assertNull(res[1496].exec("XYZ     ", 4666));
+assertNull(res[1496].exec("1XYZ", 4667));
+assertNull(res[1496].exec("AB=XYZ.. ", 4668));
+assertNull(res[1496].exec("XYZ ", 4669));
+assertNull(res[1496].exec("** Failers", 4670));
+assertNull(res[1496].exec("WXYZ ", 4671));
+assertNull(res[1497].exec("1234", 4672));
+assertNull(res[1497].exec("1234", 4673));
+assertNull(res[1497].exec("12-34", 4674));
+assertToStringEquals("{", res[1497].exec("12+x{661}-34  "), 4675);
+assertNull(res[1497].exec("** Failers", 4676));
+assertToStringEquals("d", res[1497].exec("abcd  "), 4677);
+assertToStringEquals("d", res[1498].exec("abcd"), 4678);
+assertNull(res[1498].exec("** Failers", 4679));
+assertNull(res[1498].exec("1234", 4680));
+assertNull(res[1499].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4681));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1499].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4682);
+assertToStringEquals(" ", res[1499].exec(" "), 4683);
+assertNull(res[1499].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4684));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1499].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4685);
+assertNull(res[1500].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4686));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1500].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4687);
+assertNull(res[1501].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4688));
+assertNull(res[1501].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 4689));
+assertNull(res[1502].exec("11111111111111111111111111111111111111111111111111111111111111111111111", 4690));
+assertToStringEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", res[1502].exec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), 4691);
+assertNull(res[1503].exec("a", 4692));
+assertNull(res[1503].exec("A ", 4693));
+assertNull(res[1504].exec("a", 4694));
+assertNull(res[1504].exec("A ", 4695));
+assertNull(res[1505].exec("A", 4696));
+assertNull(res[1505].exec("aZ", 4697));
+assertNull(res[1505].exec("** Failers", 4698));
+assertNull(res[1505].exec("abc   ", 4699));
+assertNull(res[1506].exec("A", 4700));
+assertNull(res[1506].exec("aZ", 4701));
+assertNull(res[1506].exec("** Failers", 4702));
+assertNull(res[1506].exec("abc   ", 4703));
+assertNull(res[1507].exec("a", 4704));
+assertNull(res[1507].exec("Az", 4705));
+assertNull(res[1507].exec("** Failers", 4706));
+assertNull(res[1507].exec("ABC   ", 4707));
+assertNull(res[1508].exec("a", 4708));
+assertNull(res[1508].exec("Az", 4709));
+assertNull(res[1508].exec("** Failers", 4710));
+assertNull(res[1508].exec("ABC   ", 4711));
+assertNull(res[1508].exec("x{c0}", 4712));
+assertNull(res[1508].exec("x{e0} ", 4713));
+assertNull(res[1508].exec("x{c0}", 4714));
+assertNull(res[1508].exec("x{e0} ", 4715));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 4716));
+assertNull(res[1508].exec("** Failers", 4717));
+assertNull(res[1508].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 4718));
+assertNull(res[1508].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 4719));
+assertNull(res[1508].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 4720));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 4721));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 4722));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb0}", 4723));
+assertNull(res[1508].exec("ax{391}x{10427}x{ff3a}x{1fb0}   ", 4724));
+assertNull(res[1508].exec("Ax{3b1}x{10427}x{ff3a}x{1fb0}", 4725));
+assertNull(res[1508].exec("Ax{391}x{1044F}x{ff3a}x{1fb0}", 4726));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff5a}x{1fb0}", 4727));
+assertNull(res[1508].exec("Ax{391}x{10427}x{ff3a}x{1fb8}", 4728));
+assertNull(res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}", 4729));
+assertNull(res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 4730));
+assertNull(res[1508].exec("x{391}x{3b1}x{3b1}x{3b1}x{391}X", 4731));
+assertNull(res[1508].exec("x{391}", 4732));
+assertNull(res[1508].exec("x{ff3a}", 4733));
+assertNull(res[1508].exec("x{3b1}", 4734));
+assertNull(res[1508].exec("x{ff5a}   ", 4735));
+assertNull(res[1508].exec("x{c0}", 4736));
+assertNull(res[1508].exec("x{e0} ", 4737));
+assertNull(res[1508].exec("x{104}", 4738));
+assertNull(res[1508].exec("x{105}", 4739));
+assertNull(res[1508].exec("x{109}  ", 4740));
+assertNull(res[1508].exec("** Failers", 4741));
+assertNull(res[1508].exec("x{100}", 4742));
+assertNull(res[1508].exec("x{10a} ", 4743));
+assertNull(res[1508].exec("Z", 4744));
+assertNull(res[1508].exec("z", 4745));
+assertNull(res[1508].exec("x{39c}", 4746));
+assertNull(res[1508].exec("x{178}", 4747));
+assertNull(res[1508].exec("|", 4748));
+assertNull(res[1508].exec("x{80}", 4749));
+assertNull(res[1508].exec("x{ff}", 4750));
+assertNull(res[1508].exec("x{100}", 4751));
+assertNull(res[1508].exec("x{101} ", 4752));
+assertNull(res[1508].exec("** Failers", 4753));
+assertNull(res[1508].exec("x{102}", 4754));
+assertNull(res[1508].exec("Y", 4755));
+assertNull(res[1508].exec("y           ", 4756));
+assertNull(res[1509].exec("A", 4757));
+assertNull(res[1509].exec("Ax{300}BC ", 4758));
+assertNull(res[1509].exec("Ax{300}x{301}x{302}BC ", 4759));
+assertNull(res[1509].exec("*** Failers", 4760));
+assertNull(res[1509].exec("x{300}  ", 4761));
+assertToStringEquals("X", res[1510].exec("X123"), 4762);
+assertNull(res[1510].exec("*** Failers", 4763));
+assertNull(res[1510].exec("AXYZ", 4764));
+assertNull(res[1511].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 4765));
+assertNull(res[1511].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 4766));
+assertNull(res[1512].exec("Ax{300}x{301}x{302}BCAx{300}x{301} ", 4767));
+assertNull(res[1512].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C ", 4768));
+assertToStringEquals("A,,A", res[1513].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 4769);
+assertToStringEquals("A,,A", res[1513].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 4770);
+assertToStringEquals("A,,A", res[1514].exec("Ax{300}x{301}x{302}BCAx{300}x{301} "), 4771);
+assertToStringEquals("A,,A", res[1514].exec("Ax{300}x{301}x{302}BCAx{300}x{301}C "), 4772);
+assertNull(res[1515].exec("*** Failers", 4773));
+assertNull(res[1515].exec("Ax{300}x{301}x{302}", 4774));
+assertNull(res[1516].exec("Ax{300}x{301}Bx{300}X", 4775));
+assertNull(res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 4776));
+assertNull(res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 4777));
+assertNull(res[1516].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 4778));
+assertNull(res[1517].exec("Ax{300}x{301}Bx{300}X", 4779));
+assertNull(res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}", 4780));
+assertNull(res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}X", 4781));
+assertNull(res[1517].exec("Ax{300}x{301}Bx{300}Cx{300}x{301}DAx{300}X", 4782));
+assertNull(res[1518].exec("12X", 4783));
+assertNull(res[1518].exec("123X", 4784));
+assertNull(res[1518].exec("*** Failers", 4785));
+assertNull(res[1518].exec("X", 4786));
+assertNull(res[1518].exec("1X", 4787));
+assertNull(res[1518].exec("1234X     ", 4788));
+assertNull(res[1518].exec("x{100}   ", 4789));
+assertNull(res[1518].exec("x{101} ", 4790));
+assertNull(res[1518].exec("x{2e81}x{3007}x{2f804}x{31a0}", 4791));
+assertNull(res[1518].exec("** Failers", 4792));
+assertNull(res[1518].exec("x{2e7f}  ", 4793));
+assertNull(res[1518].exec("x{3105}", 4794));
+assertNull(res[1518].exec("** Failers", 4795));
+assertNull(res[1518].exec("x{30ff}  ", 4796));
+assertNull(res[1519].exec("x{06e9}", 4797));
+assertNull(res[1519].exec("x{060b}", 4798));
+assertNull(res[1519].exec("** Failers", 4799));
+assertNull(res[1519].exec("Xx{06e9}   ", 4800));
+assertNull(res[1520].exec("x{2f800}", 4801));
+assertNull(res[1520].exec("** Failers", 4802));
+assertNull(res[1520].exec("x{a014}", 4803));
+assertNull(res[1520].exec("x{a4c6}   ", 4804));
+assertNull(res[1521].exec("AXYZ", 4805));
+assertNull(res[1521].exec("x{1234}XYZ ", 4806));
+assertNull(res[1521].exec("** Failers", 4807));
+assertNull(res[1521].exec("X  ", 4808));
+assertNull(res[1522].exec("** Failers", 4809));
+assertNull(res[1522].exec("AX", 4810));
+assertNull(res[1523].exec("XYZ", 4811));
+assertNull(res[1523].exec("AXYZ", 4812));
+assertNull(res[1523].exec("x{1234}XYZ ", 4813));
+assertNull(res[1523].exec("** Failers", 4814));
+assertNull(res[1523].exec("ABXYZ   ", 4815));
+assertNull(res[1524].exec("XYZ", 4816));
+assertNull(res[1524].exec("** Failers", 4817));
+assertNull(res[1524].exec("AXYZ", 4818));
+assertNull(res[1524].exec("x{1234}XYZ ", 4819));
+assertNull(res[1524].exec("ABXYZ   ", 4820));
+assertNull(res[1524].exec("AXYZ", 4821));
+assertNull(res[1524].exec("x{1234}XYZ", 4822));
+assertNull(res[1524].exec("Ax{1234}XYZ", 4823));
+assertNull(res[1524].exec("** Failers", 4824));
+assertNull(res[1524].exec("XYZ", 4825));
+assertNull(res[1524].exec("** Failers", 4826));
+assertNull(res[1524].exec("AXYZ", 4827));
+assertNull(res[1524].exec("x{1234}XYZ", 4828));
+assertNull(res[1524].exec("Ax{1234}XYZ", 4829));
+assertNull(res[1524].exec("XYZ", 4830));
+assertNull(res[1525].exec("XYZ", 4831));
+assertNull(res[1525].exec("AXYZ", 4832));
+assertNull(res[1525].exec("x{1234}XYZ", 4833));
+assertNull(res[1525].exec("Ax{1234}XYZ", 4834));
+assertNull(res[1525].exec("** Failers", 4835));
+assertNull(res[1526].exec("XYZ", 4836));
+assertNull(res[1526].exec("** Failers", 4837));
+assertNull(res[1526].exec("AXYZ", 4838));
+assertNull(res[1526].exec("x{1234}XYZ", 4839));
+assertNull(res[1526].exec("Ax{1234}XYZ", 4840));
+assertToStringEquals("AX", res[1527].exec("AXYZ"), 4841);
+assertNull(res[1527].exec("x{1234}XYZ ", 4842));
+assertNull(res[1527].exec("** Failers", 4843));
+assertNull(res[1527].exec("X  ", 4844));
+assertNull(res[1528].exec("** Failers", 4845));
+assertToStringEquals("AX", res[1528].exec("AX"), 4846);
+assertToStringEquals("X", res[1529].exec("XYZ"), 4847);
+assertToStringEquals("AX", res[1529].exec("AXYZ"), 4848);
+assertNull(res[1529].exec("x{1234}XYZ ", 4849));
+assertNull(res[1529].exec("** Failers", 4850));
+assertNull(res[1529].exec("ABXYZ   ", 4851));
+assertToStringEquals("X", res[1530].exec("XYZ"), 4852);
+assertNull(res[1530].exec("** Failers", 4853));
+assertToStringEquals("AX", res[1530].exec("AXYZ"), 4854);
+assertNull(res[1530].exec("x{1234}XYZ ", 4855));
+assertNull(res[1530].exec("ABXYZ   ", 4856));
+assertToStringEquals("AX", res[1531].exec("AXYZ"), 4857);
+assertNull(res[1531].exec("x{1234}XYZ", 4858));
+assertNull(res[1531].exec("Ax{1234}XYZ", 4859));
+assertNull(res[1531].exec("** Failers", 4860));
+assertNull(res[1531].exec("XYZ", 4861));
+assertNull(res[1532].exec("** Failers", 4862));
+assertToStringEquals("AX", res[1532].exec("AXYZ"), 4863);
+assertNull(res[1532].exec("x{1234}XYZ", 4864));
+assertNull(res[1532].exec("Ax{1234}XYZ", 4865));
+assertNull(res[1532].exec("XYZ", 4866));
+assertToStringEquals("X", res[1533].exec("XYZ"), 4867);
+assertToStringEquals("AX", res[1533].exec("AXYZ"), 4868);
+assertNull(res[1533].exec("x{1234}XYZ", 4869));
+assertNull(res[1533].exec("Ax{1234}XYZ", 4870));
+assertNull(res[1533].exec("** Failers", 4871));
+assertToStringEquals("X", res[1534].exec("XYZ"), 4872);
+assertNull(res[1534].exec("** Failers", 4873));
+assertToStringEquals("AX", res[1534].exec("AXYZ"), 4874);
+assertNull(res[1534].exec("x{1234}XYZ", 4875));
+assertNull(res[1534].exec("Ax{1234}XYZ", 4876));
+assertNull(res[1535].exec("abcdefgh", 4877));
+assertNull(res[1535].exec("x{1234}\n\x0dx{3456}xyz ", 4878));
+assertNull(res[1536].exec("abcdefgh", 4879));
+assertNull(res[1536].exec("x{1234}\n\x0dx{3456}xyz ", 4880));
+assertNull(res[1537].exec("** Failers", 4881));
+assertNull(res[1537].exec("abcdefgh", 4882));
+assertNull(res[1537].exec("x{1234}\n\x0dx{3456}xyz ", 4883));
+assertNull(res[1538].exec(" AXY", 4884));
+assertNull(res[1538].exec(" aXY", 4885));
+assertNull(res[1538].exec(" x{1c5}XY", 4886));
+assertNull(res[1538].exec(" ** Failers", 4887));
+assertNull(res[1538].exec(" x{1bb}XY", 4888));
+assertNull(res[1538].exec(" x{2b0}XY", 4889));
+assertNull(res[1538].exec(" !XY      ", 4890));
+assertNull(res[1539].exec(" AXY", 4891));
+assertNull(res[1539].exec(" aXY", 4892));
+assertNull(res[1539].exec(" x{1c5}XY", 4893));
+assertNull(res[1539].exec(" ** Failers", 4894));
+assertNull(res[1539].exec(" x{1bb}XY", 4895));
+assertNull(res[1539].exec(" x{2b0}XY", 4896));
+assertNull(res[1539].exec(" !XY      ", 4897));
+assertNull(res[1539].exec(" AXY", 4898));
+assertNull(res[1539].exec(" aXY", 4899));
+assertNull(res[1539].exec(" AbcdeXyz ", 4900));
+assertNull(res[1539].exec(" x{1c5}AbXY", 4901));
+assertNull(res[1539].exec(" abcDEXypqreXlmn ", 4902));
+assertNull(res[1539].exec(" ** Failers", 4903));
+assertNull(res[1539].exec(" x{1bb}XY", 4904));
+assertNull(res[1539].exec(" x{2b0}XY", 4905));
+assertNull(res[1539].exec(" !XY      ", 4906));
+assertNull(res[1540].exec(" AXY", 4907));
+assertNull(res[1540].exec(" aXY", 4908));
+assertNull(res[1540].exec(" AbcdeXyz ", 4909));
+assertNull(res[1540].exec(" x{1c5}AbXY", 4910));
+assertNull(res[1540].exec(" abcDEXypqreXlmn ", 4911));
+assertNull(res[1540].exec(" ** Failers", 4912));
+assertNull(res[1540].exec(" x{1bb}XY", 4913));
+assertNull(res[1540].exec(" x{2b0}XY", 4914));
+assertNull(res[1540].exec(" !XY      ", 4915));
+assertNull(res[1540].exec(" AXY", 4916));
+assertNull(res[1540].exec(" aXY", 4917));
+assertNull(res[1540].exec(" AbcdeXyz ", 4918));
+assertNull(res[1540].exec(" x{1c5}AbXY", 4919));
+assertNull(res[1540].exec(" abcDEXypqreXlmn ", 4920));
+assertNull(res[1540].exec(" ** Failers", 4921));
+assertNull(res[1540].exec(" x{1bb}XY", 4922));
+assertNull(res[1540].exec(" x{2b0}XY", 4923));
+assertNull(res[1540].exec(" !XY      ", 4924));
+assertNull(res[1541].exec(" AXY", 4925));
+assertNull(res[1541].exec(" aXY", 4926));
+assertNull(res[1541].exec(" AbcdeXyz ", 4927));
+assertNull(res[1541].exec(" x{1c5}AbXY", 4928));
+assertNull(res[1541].exec(" abcDEXypqreXlmn ", 4929));
+assertNull(res[1541].exec(" ** Failers", 4930));
+assertNull(res[1541].exec(" x{1bb}XY", 4931));
+assertNull(res[1541].exec(" x{2b0}XY", 4932));
+assertNull(res[1541].exec(" !XY      ", 4933));
+assertNull(res[1542].exec(" !XY", 4934));
+assertNull(res[1542].exec(" x{1bb}XY", 4935));
+assertNull(res[1542].exec(" x{2b0}XY", 4936));
+assertNull(res[1542].exec(" ** Failers", 4937));
+assertNull(res[1542].exec(" x{1c5}XY", 4938));
+assertNull(res[1542].exec(" AXY      ", 4939));
+assertNull(res[1543].exec(" !XY", 4940));
+assertNull(res[1543].exec(" x{1bb}XY", 4941));
+assertNull(res[1543].exec(" x{2b0}XY", 4942));
+assertNull(res[1543].exec(" ** Failers", 4943));
+assertNull(res[1543].exec(" x{1c5}XY", 4944));
+assertNull(res[1543].exec(" AXY      ", 4945));
+assertNull(res[1543].exec("x{c0}x{e0}x{116}x{117}", 4946));
+assertNull(res[1543].exec("x{c0}x{e0}x{116}x{117}", 4947));
+assertNull(res[1545].exec("123abcdefg", 4948));
+assertNull(res[1545].exec("123abc\xc4\xc5zz", 4949));
+assertNull(res[1546].exec("x{102A4}x{AA52}x{A91D}x{1C46}x{10283}x{1092E}x{1C6B}x{A93B}x{A8BF}x{1BA0}x{A50A}====", 4950));
+assertNull(res[1546].exec("x{a77d}x{1d79}", 4951));
+assertNull(res[1546].exec("x{1d79}x{a77d} ", 4952));
+assertNull(res[1546].exec("x{a77d}x{1d79}", 4953));
+assertNull(res[1546].exec("** Failers ", 4954));
+assertNull(res[1546].exec("x{1d79}x{a77d} ", 4955));
 assertThrows("var re = //;", 4956);
diff --git a/test/mjsunit/this-property-assignment.js b/test/mjsunit/this-property-assignment.js
index c681999..54c6537 100644
--- a/test/mjsunit/this-property-assignment.js
+++ b/test/mjsunit/this-property-assignment.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Tests the handling of multiple assignments to the same property in a 
+// Tests the handling of multiple assignments to the same property in a
 // constructor that only has simple this property assignments.
 
 function Node() {
diff --git a/test/mjsunit/tools/codemap.js b/test/mjsunit/tools/codemap.js
index 81fb810..33d7e4e 100644
--- a/test/mjsunit/tools/codemap.js
+++ b/test/mjsunit/tools/codemap.js
@@ -157,6 +157,7 @@
   codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2'));
   codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3'));
   var allStatics = codeMap.getAllStaticEntries();
+  allStatics = allStatics.map(String);
   allStatics.sort();
   assertEquals(['lib1: 3000', 'lib2: 5000', 'lib3: 10000'], allStatics);
 })();
@@ -168,13 +169,15 @@
   codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
   codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
   var allDynamics = codeMap.getAllDynamicEntries();
+  allDynamics = allDynamics.map(String);
   allDynamics.sort();
   assertEquals(['code1: 200', 'code2: 100', 'code3: 50'], allDynamics);
   codeMap.deleteCode(0x1700);
   var allDynamics2 = codeMap.getAllDynamicEntries();
+  allDynamics2 = allDynamics2.map(String);
   allDynamics2.sort();
   assertEquals(['code1: 200', 'code3: 50'], allDynamics2);
   codeMap.deleteCode(0x1500);
   var allDynamics3 = codeMap.getAllDynamicEntries();
-  assertEquals(['code3: 50'], allDynamics3);
+  assertEquals(['code3: 50'], allDynamics3.map(String));
 })();
diff --git a/test/mjsunit/tools/profile_view.js b/test/mjsunit/tools/profile_view.js
index 7f60119..d62205b 100644
--- a/test/mjsunit/tools/profile_view.js
+++ b/test/mjsunit/tools/profile_view.js
@@ -26,7 +26,8 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Load source code files from <project root>/tools.
-// Files: tools/consarray.js tools/profile.js tools/profile_view.js
+// Files: tools/codemap.js tools/consarray.js tools/profile.js
+// Files: tools/profile_view.js
 
 
 function createNode(name, time, opt_parent) {
diff --git a/test/mjsunit/tools/splaytree.js b/test/mjsunit/tools/splaytree.js
index 5e18796..d582dc9 100644
--- a/test/mjsunit/tools/splaytree.js
+++ b/test/mjsunit/tools/splaytree.js
@@ -81,13 +81,13 @@
 (function testSplay() {
   var tree = new SplayTree();
   tree.root_ = createSampleTree();
-  assertArrayEquals(['50', '30', '60', '10', '40', '90', '20', '70', '100', '15', '80'],
+  assertArrayEquals([50, 30, 60, 10, 40, 90, 20, 70, 100, 15, 80],
                     tree.exportValues());
   tree.splay_(50);
-  assertArrayEquals(['50', '30', '60', '10', '40', '90', '20', '70', '100', '15', '80'],
+  assertArrayEquals([50, 30, 60, 10, 40, 90, 20, 70, 100, 15, 80],
                     tree.exportValues());
   tree.splay_(80);
-  assertArrayEquals(['80', '60', '90', '50', '70', '100', '30', '10', '40', '20', '15'],
+  assertArrayEquals([80, 60, 90, 50, 70, 100, 30, 10, 40, 20, 15],
                     tree.exportValues());
 })();
 
diff --git a/test/mjsunit/try.js b/test/mjsunit/try.js
index 794860a..86afdf7 100644
--- a/test/mjsunit/try.js
+++ b/test/mjsunit/try.js
@@ -250,7 +250,7 @@
     } catch (o) {
       x--;
     }
-  } 
+  }
   return x;
 }
 
@@ -274,7 +274,7 @@
       x--;
     }
     x--; // should not happen
-  } 
+  }
   return x;
 }
 
diff --git a/test/mjsunit/typeof.js b/test/mjsunit/typeof.js
index 39dec72..8aa0ab1 100644
--- a/test/mjsunit/typeof.js
+++ b/test/mjsunit/typeof.js
@@ -25,14 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The type of a regular expression should be 'function', including in
+// The type of a regular expression should be 'object', including in
 // the context of string equality comparisons.
 
 var r = new RegExp;
-assertEquals('function', typeof r);
-assertTrue(typeof r == 'function');
+assertEquals('object', typeof r);
+assertTrue(typeof r == 'object');
+assertFalse(typeof r == 'function');
 
 function test(x, y) { return x == y; }
-assertFalse(test('object', typeof r));
-
-assertFalse(typeof r == 'object');
+assertTrue(test('object', typeof r));
diff --git a/test/mjsunit/unbox-double-arrays.js b/test/mjsunit/unbox-double-arrays.js
new file mode 100644
index 0000000..feecaec
--- /dev/null
+++ b/test/mjsunit/unbox-double-arrays.js
@@ -0,0 +1,527 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test dictionary -> double elements -> dictionary elements round trip
+
+// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc
+var large_array_size = 100000;
+var approx_dict_to_elements_threshold = 70000;
+
+var name = 0;
+
+function expected_array_value(i) {
+  if ((i % 50) != 0) {
+    return i;
+  } else {
+    return i + 0.5;
+  }
+}
+
+function force_to_fast_double_array(a) {
+  a[large_array_size - 2] = 1;
+  for (var i= 0; i < approx_dict_to_elements_threshold; ++i ) {
+    a[i] = expected_array_value(i);
+  }
+  assertTrue(%HasFastDoubleElements(a));
+}
+
+function make_object_like_array(size) {
+  obj = new Object();
+  obj.length = size;
+  return obj;
+}
+
+function testOneArrayType(allocator) {
+  var large_array = new allocator(large_array_size);
+  force_to_fast_double_array(large_array);
+  var six = 6;
+
+  for (var i= 0; i < approx_dict_to_elements_threshold; i += 501 ) {
+    assertEquals(expected_array_value(i), large_array[i]);
+  }
+
+  // This function has a constant and won't get inlined.
+  function computed_6() {
+    return six;
+  }
+
+  // Multiple versions of the test function makes sure that IC/Crankshaft state
+  // doesn't get reused.
+  function test_various_loads(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads2(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads3(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads4(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads5(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads6(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
+    assertEquals(undefined, a[large_array_size-1]);
+    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_stores(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    a[5] = value_5;
+    a[computed_6()] = value_6;
+    a[7] = value_7;
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  // Test double and integer values
+  test_various_loads(large_array,
+                     expected_array_value(5),
+                     expected_array_value(6),
+                     expected_array_value(7));
+  test_various_loads(large_array,
+                     expected_array_value(5),
+                     expected_array_value(6),
+                     expected_array_value(7));
+  test_various_loads(large_array,
+                     expected_array_value(5),
+                     expected_array_value(6),
+                     expected_array_value(7));
+  %OptimizeFunctionOnNextCall(test_various_loads);
+  test_various_loads(large_array,
+                     expected_array_value(5),
+                     expected_array_value(6),
+                     expected_array_value(7));
+
+  // Test NaN values
+  test_various_stores(large_array, NaN, -NaN, expected_array_value(7));
+
+  test_various_loads2(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+  test_various_loads2(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+  test_various_loads2(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+  %OptimizeFunctionOnNextCall(test_various_loads2);
+  test_various_loads2(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+
+  // Test Infinity values
+  test_various_stores(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+
+  test_various_loads3(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+  test_various_loads3(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+  test_various_loads3(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+  %OptimizeFunctionOnNextCall(test_various_loads3);
+  test_various_loads3(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+
+  // Test the hole for the default runtime implementation.
+  delete large_array[5];
+  delete large_array[6];
+  test_various_loads4(large_array,
+                      undefined,
+                      undefined,
+                      expected_array_value(7));
+
+  // Test the keyed load IC implementation when the value is the hole.
+  test_various_stores(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  test_various_loads5(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  test_various_loads5(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  delete large_array[5];
+  delete large_array[6];
+  test_various_loads5(large_array,
+                      undefined,
+                      undefined,
+                      expected_array_value(7));
+  test_various_loads5(large_array,
+                      undefined,
+                      undefined,
+                      expected_array_value(7));
+
+  // Make sure Crankshaft code handles the hole correctly (bailout)
+  test_various_stores(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  test_various_loads6(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  test_various_loads6(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+  %OptimizeFunctionOnNextCall(test_various_loads6);
+  test_various_loads6(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  delete large_array[5];
+  delete large_array[6];
+  test_various_loads6(large_array,
+                      undefined,
+                      undefined,
+                      expected_array_value(7));
+
+  // Test stores for non-NaN.
+  %OptimizeFunctionOnNextCall(test_various_stores);
+  test_various_stores(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  test_various_stores(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  test_various_loads6(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  // Test NaN behavior for stores.
+  test_various_stores(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+
+  test_various_stores(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+
+  test_various_loads6(large_array,
+                      NaN,
+                      -NaN,
+                      expected_array_value(7));
+
+  // Test Infinity behavior for stores.
+  test_various_stores(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+
+  test_various_stores(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+
+  test_various_loads6(large_array,
+                      Infinity,
+                      -Infinity,
+                      expected_array_value(7));
+
+  assertTrue(%GetOptimizationStatus(test_various_stores) != 2);
+
+  // Make sure that we haven't converted from fast double.
+  assertTrue(%HasFastDoubleElements(large_array));
+}
+
+testOneArrayType(make_object_like_array);
+testOneArrayType(Array);
+
+var large_array = new Array(large_array_size);
+force_to_fast_double_array(large_array);
+assertTrue(%HasFastDoubleElements(large_array));
+
+// Cause the array to grow beyond it's JSArray length. This will double the
+// size of the capacity and force the array into "slow" dictionary case.
+large_array[5] = Infinity;
+large_array[large_array_size+10001] = 50;
+assertTrue(%HasDictionaryElements(large_array));
+assertEquals(50, large_array[large_array_size+10001]);
+assertEquals(large_array_size+10002, large_array.length);
+assertEquals(Infinity, large_array[5]);
+assertEquals(undefined, large_array[large_array_size-1]);
+assertEquals(undefined, large_array[-1]);
+assertEquals(large_array_size+10002, large_array.length);
+
+// Test dictionary -> double elements -> fast elements.
+var large_array2 = new Array(large_array_size);
+force_to_fast_double_array(large_array2);
+delete large_array2[5];
+
+// Convert back to fast elements and make sure the contents of the array are
+// unchanged.
+large_array2[25] = new Object();
+assertTrue(%HasFastElements(large_array2));
+for (var i= 0; i < approx_dict_to_elements_threshold; i += 500 ) {
+  if (i != 25 && i != 5) {
+    assertEquals(expected_array_value(i), large_array2[i]);
+  }
+}
+assertEquals(undefined, large_array2[5]);
+assertEquals(undefined, large_array2[large_array_size-1]);
+assertEquals(undefined, large_array2[-1]);
+assertEquals(large_array_size, large_array2.length);
+
+// Make sure it's possible to change the array's length and that array is still
+// intact after the resize.
+var large_array3 = new Array(large_array_size);
+force_to_fast_double_array(large_array3);
+large_array3.length = 60000;
+assertEquals(60000, large_array3.length);
+assertEquals(undefined, large_array3[60000]);
+assertTrue(%HasFastDoubleElements(large_array3));
+assertEquals(expected_array_value(5), large_array3[5]);
+assertEquals(expected_array_value(6), large_array3[6]);
+assertEquals(expected_array_value(7), large_array3[7]);
+assertEquals(expected_array_value(large_array3.length-1),
+             large_array3[large_array3.length-1]);
+assertEquals(undefined, large_array3[large_array_size-1]);
+assertEquals(undefined, large_array3[-1]);
+gc();
+
+for (var i= 0; i < large_array3.length; i += 501 ) {
+  assertEquals(expected_array_value(i), large_array3[i]);
+}
+
+large_array3.length = 25;
+assertEquals(25, large_array3.length);
+assertTrue(%HasFastDoubleElements(large_array3));
+assertEquals(undefined, large_array3[25]);
+assertEquals(expected_array_value(5), large_array3[5]);
+assertEquals(expected_array_value(6), large_array3[6]);
+assertEquals(expected_array_value(7), large_array3[7]);
+assertEquals(expected_array_value(large_array3.length-1),
+             large_array3[large_array3.length-1]);
+assertEquals(undefined, large_array3[large_array_size-1]);
+assertEquals(undefined, large_array3[-1]);
+gc();
+
+for (var i= 0; i < large_array3.length; ++i) {
+  assertEquals(expected_array_value(i), large_array3[i]);
+}
+
+large_array3.length = 100;
+assertEquals(100, large_array3.length);
+large_array3[95] = 95;
+assertTrue(%HasFastDoubleElements(large_array3));
+assertEquals(undefined, large_array3[100]);
+assertEquals(95, large_array3[95]);
+assertEquals(expected_array_value(5), large_array3[5]);
+assertEquals(expected_array_value(6), large_array3[6]);
+assertEquals(expected_array_value(7), large_array3[7]);
+assertEquals(undefined, large_array3[large_array3.length-1]);
+assertEquals(undefined, large_array3[large_array_size-1]);
+assertEquals(undefined, large_array3[-1]);
+gc();
+
+// Test apply on arrays backed by double elements.
+function called_by_apply(arg0, arg1, arg2, arg3, arg4, arg5, arg6) {
+  assertEquals(expected_array_value(0), arg0);
+  assertEquals(NaN, arg1);
+  assertEquals(-NaN, arg2);
+  assertEquals(Infinity, arg3);
+  assertEquals(-Infinity, arg4);
+  assertEquals(expected_array_value(5), arg5);
+}
+
+large_array3[1] = NaN;
+large_array3[2] = -NaN;
+large_array3[3] = Infinity;
+large_array3[4] = -Infinity;
+
+function call_apply() {
+  assertTrue(%HasFastDoubleElements(large_array3));
+  called_by_apply.apply({}, large_array3);
+}
+
+call_apply();
+call_apply();
+call_apply();
+%OptimizeFunctionOnNextCall(call_apply);
+call_apply();
+call_apply();
+call_apply();
+
+function test_for_in() {
+  // Due to previous tests, keys 0..25 and 95 should be present.
+  next_expected = 0;
+  assertTrue(%HasFastDoubleElements(large_array3));
+  for (x in large_array3) {
+    assertTrue(next_expected++ == x);
+    if (next_expected == 25) {
+      next_expected = 95;
+    }
+  }
+  assertTrue(next_expected == 96);
+}
+
+test_for_in();
+test_for_in();
+test_for_in();
+%OptimizeFunctionOnNextCall(test_for_in);
+test_for_in();
+test_for_in();
+test_for_in();
+
+function test_get_property_names() {
+  names = %GetPropertyNames(large_array3);
+  property_name_count = 0;
+  for (x in names) { property_name_count++; };
+  assertEquals(26, property_name_count);
+}
+
+test_get_property_names();
+test_get_property_names();
+test_get_property_names();
+
+// Test elements getters.
+assertEquals(expected_array_value(10), large_array3[10]);
+assertEquals(expected_array_value(-NaN), large_array3[2]);
+large_array3.__defineGetter__("2", function(){
+    return expected_array_value(10);
+});
+
+function test_getter() {
+  assertEquals(expected_array_value(10), large_array3[10]);
+  assertEquals(expected_array_value(10), large_array3[2]);
+}
+
+test_getter();
+test_getter();
+test_getter();
+%OptimizeFunctionOnNextCall(test_getter);
+test_getter();
+test_getter();
+test_getter();
+
+// Test element setters.
+large_array4 = new Array(large_array_size);
+force_to_fast_double_array(large_array4);
+
+var setter_called = false;
+
+assertEquals(expected_array_value(10), large_array4[10]);
+assertEquals(expected_array_value(2), large_array4[2]);
+large_array4.__defineSetter__("10", function(value){
+    setter_called = true;
+  });
+
+function test_setter() {
+  setter_called = false;
+  large_array4[10] = 119;
+  assertTrue(setter_called);
+  assertEquals(undefined, large_array4[10]);
+  assertEquals(expected_array_value(2), large_array4[2]);
+}
+
+test_setter();
+test_setter();
+test_setter();
+%OptimizeFunctionOnNextCall(test_setter);
+test_setter();
+test_setter();
+test_setter();
diff --git a/test/mjsunit/unicode-test.js b/test/mjsunit/unicode-test.js
index 59a684e..66a029a 100644
--- a/test/mjsunit/unicode-test.js
+++ b/test/mjsunit/unicode-test.js
@@ -807,7 +807,7 @@
 "    * Васильев Л.С. Древний Китай: в 3 т. Т. 3. Период Чжаньго (V–III вв. до н.э.). М.: Восточная литература, 2006. ISBN 502018103X\n" +
 "    * Непомнин О.Е. История Китая: Эпоха Цин. XVII – начало XX века. М.: Восточная литература, 2005. ISBN 5020184004\n";
 
-var devanagari = 
+var devanagari =
 "भारत\n" +
 "विकिपीडिया, एक मुक्त ज्ञानकोष से\n" +
 "Jump to: navigation, search\n" +
@@ -1417,7 +1417,7 @@
 "There are many words of French origin in English, such as competition, art, table, publicity, police, role, routine, machine, force, and many others that have been and are being anglicised; they are now pronounced according to English rules of phonology, rather than French. A large portion of English vocabulary is of French or Oïl language origin, most derived from, or transmitted via, the Anglo-Norman spoken by the upper classes in England for several hundred years after the Norman Conquest.\n";
 
 
-var greek = 
+var greek =
 "Ελλάδα\n" +
 "Από τη Βικιπαίδεια, την ελεύθερη εγκυκλοπαίδεια\n" +
 "Ελληνική Δημοκρατία\n" +
diff --git a/test/mjsunit/value-wrapper.js b/test/mjsunit/value-wrapper.js
index 88330b4..76e200f 100644
--- a/test/mjsunit/value-wrapper.js
+++ b/test/mjsunit/value-wrapper.js
@@ -39,7 +39,7 @@
     assertEquals('object', (42).TypeOfThis());
     assertEquals('object', (3.14).TypeOfThis());
   }
-  
+
   for (var i = 0; i < 10; i++) {
     assertEquals('object', 'xxx'['TypeOfThis']());
     assertEquals('object', true['TypeOfThis']());
@@ -47,11 +47,11 @@
     assertEquals('object', (42)['TypeOfThis']());
     assertEquals('object', (3.14)['TypeOfThis']());
   }
-  
+
   function CallTypeOfThis(obj) {
     assertEquals('object', obj.TypeOfThis());
   }
-  
+
   for (var i = 0; i < 10; i++) {
     CallTypeOfThis('xxx');
     CallTypeOfThis(true);
@@ -59,7 +59,7 @@
     CallTypeOfThis(42);
     CallTypeOfThis(3.14);
   }
-  
+
   function TestWithWith(obj) {
     with (obj) {
       for (var i = 0; i < 10; i++) {
@@ -67,13 +67,13 @@
       }
     }
   }
-  
+
   TestWithWith('xxx');
   TestWithWith(true);
   TestWithWith(false);
   TestWithWith(42);
   TestWithWith(3.14);
-  
+
   for (var i = 0; i < 10; i++) {
     assertEquals('object', true[7]());
     assertEquals('object', false[7]());
@@ -100,7 +100,7 @@
 
 function TypeOfThis() { return typeof this; }
 
-// Test with normal setup of prototype. 
+// Test with normal setup of prototype.
 String.prototype.TypeOfThis = TypeOfThis;
 Boolean.prototype.TypeOfThis = TypeOfThis;
 Number.prototype.TypeOfThis = TypeOfThis;
diff --git a/test/mjsunit/with-leave.js b/test/mjsunit/with-leave.js
index ded62ca..7369faa 100644
--- a/test/mjsunit/with-leave.js
+++ b/test/mjsunit/with-leave.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -59,3 +59,162 @@
 }
 assertTrue(caught);
 
+
+// We want to test the context chain shape.  In each of the tests cases
+// below, the outer with is to force a runtime lookup of the identifier 'x'
+// to actually verify that the inner context has been discarded.  A static
+// lookup of 'x' might accidentally succeed.
+with ({x: 'outer'}) {
+  label: {
+    with ({x: 'inner'}) {
+      break label;
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  label: {
+    with ({x: 'middle'}) {
+      with ({x: 'inner'}) {
+        break label;
+      }
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  for (var i = 0; i < 10; ++i) {
+    with ({x: 'inner' + i}) {
+      continue;
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  label: for (var i = 0; i < 10; ++i) {
+    with ({x: 'middle' + i}) {
+      for (var j = 0; j < 10; ++j) {
+        with ({x: 'inner' + j}) {
+          continue label;
+        }
+      }
+    }
+  }
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    with ({x: 'inner'}) {
+      throw 0;
+    }
+  } catch (e) {
+    assertEquals('outer', x);
+  }
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    with ({x: 'middle'}) {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    }
+  } catch (e) {
+    assertEquals('outer', x);
+  }
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } finally {
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      }
+    } finally {
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+// Verify that the context is correctly set in the stack frame after exiting
+// from with.
+function f() {}
+
+with ({x: 'outer'}) {
+  label: {
+    with ({x: 'inner'}) {
+      break label;
+    }
+  }
+  f();  // The context could be restored from the stack after the call.
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  for (var i = 0; i < 10; ++i) {
+    with ({x: 'inner' + i}) {
+      continue;
+    }
+  }
+  f();
+  assertEquals('outer', x);
+}
+
+
+with ({x: 'outer'}) {
+  try {
+    with ({x: 'inner'}) {
+      throw 0;
+    }
+  } catch (e) {
+    f();
+    assertEquals('outer', x);
+  }
+}
+
+
+try {
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } finally {
+      f();
+      assertEquals('outer', x);
+    }
+  }
+} catch (e) {
+  if (e instanceof MjsUnitAssertionError) throw e;
+}
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index b9528bd..3a27130 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -201,11 +201,19 @@
 ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug
 
 
+# Test that assumes specific execution time, flaky in debug mode.
+js1_5/Array/regress-101964: PASS || FAIL if $mode == debug
+
+
 ##################### INCOMPATIBLE TESTS #####################
 
 # This section is for tests that fail in both V8 and JSC.  Thus they
 # have been determined to be incompatible between Mozilla and V8/JSC.
 
+# Fail because it calls builtins as functions and do not expect the
+# builtin to have undefined as the receiver.
+ecma/String/15.5.4.6-2: FAIL_OK
+
 # Fail because of toLowerCase and toUpperCase conversion.
 ecma/String/15.5.4.11-2: FAIL_OK
 ecma/String/15.5.4.11-5: FAIL_OK
@@ -241,10 +249,9 @@
 # toExponential argument restricted to range 0..20 in JSC/V8
 ecma_3/Number/15.7.4.6-1: FAIL_OK
 
-#:=== RegExp:=== 
-# To be compatible with JSC we silently ignore flags that do not make
-# sense.  These tests expects us to throw exceptions.  
-ecma_3/RegExp/regress-57631: FAIL_OK
+#:=== RegExp:===
+# We don't match the syntax error message of Mozilla for invalid
+# RegExp flags.
 ecma_3/RegExp/15.10.4.1-6: FAIL_OK
 
 # PCRE doesn't allow subpattern nesting deeper than 200, this tests
@@ -325,11 +332,6 @@
 js1_5/Regress/regress-320119: FAIL_OK
 
 
-# We do not recognize a multiline comment as newline character.
-# We are compatible with JSC.
-ecma_3/LexicalConventions/7.4-01: FAIL_OK
-
-
 # No support for toSource().
 js1_5/Regress/regress-248444: FAIL_OK
 js1_5/Regress/regress-313967-01: FAIL_OK
@@ -432,6 +434,14 @@
 js1_2/regexp/string_split: FAIL_OK
 
 
+# RegExps are not callable.
+js1_2/regexp/simple_form: FAIL_OK
+js1_2/regexp/regress-6359: FAIL_OK
+js1_2/regexp/regress-9141: FAIL_OK
+js1_5/Regress/regress-224956: FAIL_OK
+js1_5/Regress/regress-325925: FAIL_OK
+ecma_2/RegExp/regress-001: FAIL_OK
+
 # We do not check for bad surrogate pairs when quoting strings.
 js1_5/Regress/regress-315974: FAIL_OK
 
@@ -459,10 +469,6 @@
 js1_5/extensions/regress-452178: FAIL_OK
 
 
-# 'native' *is* a keyword in V8.
-js1_5/Regress/regress-240317: FAIL_OK
-
-
 # Requires Mozilla-specific strict mode or options() function.
 ecma_3/Object/8.6.1-01: FAIL_OK
 js1_5/Exceptions/regress-315147: FAIL_OK
@@ -528,6 +534,12 @@
 js1_5/LexicalConventions/regress-343675: FAIL_OK
 
 
+# Tests if future reserved keywords of ECMA-262, edition 3 emit warnings. We
+# implement the edition 5 behaviour and fail on use of edition 5 future reserved
+# keywords as identifiers.
+js1_5/Regress/regress-240317: FAIL_OK
+
+
 # Unsupported list comprehensions: [ ... for ... ] and for each.
 js1_5/Regress/regress-352009: FAIL_OK
 js1_5/Regress/regress-349648: FAIL_OK
@@ -570,7 +582,7 @@
 js1_5/Regress/regress-417893: FAIL_OK
 
 
-# Unsupported use of "[]" as function parameter. We match JSC. 
+# Unsupported use of "[]" as function parameter. We match JSC.
 js1_5/Regress/regress-416737-01: FAIL_OK
 js1_5/Regress/regress-416737-02: FAIL_OK
 
@@ -587,29 +599,13 @@
 js1_5/Array/regress-350256-02: FAIL
 
 
-# This fails because 'delete arguments[i]' does not disconnect the
-# argument from the arguments array.  See issue #900066.
-ecma_3/Function/regress-137181: FAIL
-
-
-# 'export' and 'import' are not keywords in V8.
-ecma_2/Exceptions/lexical-010: FAIL
-ecma_2/Exceptions/lexical-022: FAIL
-
-
-# Requires Mozilla-specific strict mode.
-ecma_2/Exceptions/lexical-011: FAIL
-ecma_2/Exceptions/lexical-014: FAIL
-ecma_2/Exceptions/lexical-016: FAIL
-ecma_2/Exceptions/lexical-021: FAIL
-ecma_2/LexicalConventions/keywords-001: FAIL
-js1_5/Regress/regress-306633: FAIL
-
-
 # This test seems designed to fail (it produces a 700Mbyte string).
 # We fail on out of memory.  The important thing is not to crash.
 js1_5/Regress/regress-303213: FAIL || TIMEOUT if $mode == debug
 
+# This test fails since we now throw in String.prototype.match when apply
+# is given null or undefined as this argument (and so does firefox nightly).
+js1_5/Regress/regress-295052: FAIL
 
 # Bug 1202592: New ecma_3/String/15.5.4.11 is failing.
 ecma_3/String/15.5.4.11: FAIL
@@ -672,7 +668,6 @@
 js1_5/extensions/regress-352094: FAIL_OK
 js1_5/extensions/regress-352261: FAIL_OK
 js1_5/extensions/regress-352281: FAIL_OK
-js1_5/extensions/regress-352372: FAIL_OK
 js1_5/extensions/regress-352455: FAIL_OK
 js1_5/extensions/regress-352604: FAIL_OK
 js1_5/extensions/regress-353214: FAIL_OK
diff --git a/test/mozilla/testcfg.py b/test/mozilla/testcfg.py
index 3728f79..587781d 100644
--- a/test/mozilla/testcfg.py
+++ b/test/mozilla/testcfg.py
@@ -125,7 +125,7 @@
     return tests
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'mozilla.status')
diff --git a/test/preparser/duplicate-parameter.pyt b/test/preparser/duplicate-parameter.pyt
new file mode 100644
index 0000000..4dfb7d6
--- /dev/null
+++ b/test/preparser/duplicate-parameter.pyt
@@ -0,0 +1,90 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Templatated tests with duplicate parameter names.
+
+# ----------------------------------------------------------------------
+# Constants and utility functions
+
+# A template that performs the same strict-mode test in different
+# scopes (global scope, function scope, and nested function scope),
+# and in non-strict mode too.
+def DuplicateParameterTest(name, source):
+  expectation = "strict_param_dupe"
+  non_selfstrict = {"selfstrict":"", "id":"selfnormal"}
+
+  Template(name, '"use strict";\n' + source)(non_selfstrict, expectation)
+  Template(name + '-infunc',
+           'function foo() {\n "use strict";\n' + source +'\n}\n')(
+               non_selfstrict, expectation)
+  Template(name + '-infunc2',
+           'function foo() {\n "use strict";\n  function bar() {\n' +
+           source +'\n }\n}\n')(non_selfstrict, expectation)
+
+  selfstrict = {"selfstrict": "\"use strict\";", "id": "selfstrict"}
+  nestedstrict = {"selfstrict": "function bar(){\"use strict\";}",
+                  "id": "nestedstrict"}
+  selfstrictnestedclean = {"selfstrict": """
+      "use strict";
+      function bar(){}
+    """, "id": "selfstrictnestedclean"}
+  selftest = Template(name + '-$id', source)
+  selftest(selfstrict, expectation)
+  selftest(selfstrictnestedclean, expectation)
+  selftest(nestedstrict, None)
+  selftest(non_selfstrict, None)
+
+
+# ----------------------------------------------------------------------
+# Test templates
+
+DuplicateParameterTest("dups", """
+  function foo(a, a) { $selfstrict }
+""");
+
+DuplicateParameterTest("dups-apart", """
+  function foo(a, b, c, d, e, f, g, h, i, j, k, l, m, n, a) { $selfstrict }
+""");
+
+DuplicateParameterTest("dups-escaped", """
+  function foo(\u0061, b, c, d, e, f, g, h, i, j, k, l, m, n, a) { $selfstrict }
+""");
+
+DuplicateParameterTest("triples", """
+  function foo(a, b, c, d, e, f, g, h, a, i, j, k, l, m, n, a) { $selfstrict }
+""");
+
+DuplicateParameterTest("escapes", """
+  function foo(a, \u0061) { $selfstrict }
+""");
+
+DuplicateParameterTest("long-names", """
+  function foo(arglebargleglopglyfarglebargleglopglyfarglebargleglopglyfa,
+               arglebargleglopglyfarglebargleglopglyfarglebargleglopglyfa) {
+    $selfstrict
+  }
+""");
diff --git a/test/preparser/duplicate-property.pyt b/test/preparser/duplicate-property.pyt
new file mode 100644
index 0000000..5abf9ad
--- /dev/null
+++ b/test/preparser/duplicate-property.pyt
@@ -0,0 +1,162 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Tests of duplicate properties in object literals.
+
+# ----------------------------------------------------------------------
+# Utility functions to generate a number of tests for each property
+# name pair.
+
+def PropertyTest(name, propa, propb, allow_strict = True):
+  replacement = {"id1": propa, "id2": propb, "name": name}
+
+  # Tests same test in both strict and non-strict context.
+  def StrictTest(name, source, replacement, expectation):
+    if (allow_strict):
+      Template("strict-" + name,
+               "\"use strict\";\n" + source)(replacement, expectation)
+      Template(name, source)(replacement, expectation)
+
+  # This one only fails in non-strict context.
+  if (allow_strict):
+    Template("strict-$name-data-data", """
+        "use strict";
+        var o = {$id1: 42, $id2: 42};
+      """)(replacement, "strict_duplicate_property")
+
+  Template("$name-data-data", """
+      var o = {$id1: 42, $id2: 42};
+    """)(replacement, None)
+
+  StrictTest("$name-data-get", """
+      var o = {$id1: 42, get $id2(){}};
+    """, replacement, "accessor_data_property")
+
+  StrictTest("$name-data-set", """
+      var o = {$id1: 42, set $id2(v){}};
+    """, replacement, "accessor_data_property")
+
+  StrictTest("$name-get-data", """
+      var o = {get $id1(){}, $id2: 42};
+    """, replacement, "accessor_data_property")
+
+  StrictTest("$name-set-data", """
+      var o = {set $id1(v){}, $id2: 42};
+    """, replacement, "accessor_data_property")
+
+  StrictTest("$name-get-get", """
+      var o = {get $id1(){}, get $id2(){}};
+    """, replacement, "accessor_get_set")
+
+  StrictTest("$name-set-set", """
+      var o = {set $id1(v){}, set $id2(v){}};
+    """, replacement, "accessor_get_set")
+
+  StrictTest("$name-nested-get", """
+      var o = {get $id1(){}, o: {get $id2(){} } };
+    """, replacement, None)
+
+  StrictTest("$name-nested-set", """
+      var o = {set $id1(){}, o: {set $id2(){} } };
+    """, replacement, None)
+
+
+def TestBothWays(name, propa, propb, allow_strict = True):
+  PropertyTest(name + "-1", propa, propb, allow_strict)
+  PropertyTest(name + "-2", propb, propa, allow_strict)
+
+def TestSame(name, prop, allow_strict = True):
+  PropertyTest(name, prop, prop, allow_strict)
+
+#-----------------------------------------------------------------------
+
+# Simple identifier property
+TestSame("a", "a")
+
+# Get/set identifiers
+TestSame("get-id", "get")
+TestSame("set-id", "set")
+
+# Number properties
+TestSame("0", "0")
+TestSame("0.1", "0.1")
+TestSame("1.0", "1.0")
+TestSame("42.33", "42.33")
+TestSame("2^32-2", "4294967294")
+TestSame("2^32", "4294967296")
+TestSame("2^53", "9007199254740992")
+TestSame("Hex20", "0x20")
+TestSame("exp10", "1e10")
+TestSame("exp20", "1e20")
+TestSame("Oct40", "040", False);
+
+
+# String properties
+TestSame("str-a", '"a"')
+TestSame("str-0", '"0"')
+TestSame("str-42", '"42"')
+TestSame("str-empty", '""')
+
+# Keywords
+TestSame("if", "if")
+TestSame("case", "case")
+
+# Future reserved keywords
+TestSame("public", "public")
+TestSame("class", "class")
+
+
+# Test that numbers are converted to string correctly.
+
+TestBothWays("hex-int", "0x20", "32")
+TestBothWays("oct-int", "040", "32", False)  # Octals disallowed in strict mode.
+TestBothWays("dec-int", "32.00", "32")
+TestBothWays("dec-underflow-int",
+             "32.00000000000000000000000000000000000000001", "32")
+TestBothWays("exp-int", "3.2e1", "32")
+TestBothWays("exp-int", "3200e-2", "32")
+TestBothWays("overflow-inf", "1e2000", "Infinity")
+TestBothWays("overflow-inf-exact", "1.797693134862315808e+308", "Infinity")
+TestBothWays("non-overflow-inf-exact", "1.797693134862315807e+308",
+                                       "1.7976931348623157e+308")
+TestBothWays("underflow-0", "1e-2000", "0")
+TestBothWays("underflow-0-exact", "2.4703282292062E-324", "0")
+TestBothWays("non-underflow-0-exact", "2.4703282292063E-324", "5e-324")
+TestBothWays("precission-loss-high", "9007199254740992", "9007199254740993")
+TestBothWays("precission-loss-low", "1.9999999999999998", "1.9999999999999997")
+TestBothWays("non-canonical-literal-int", "1.0", "1")
+TestBothWays("non-canonical-literal-frac", "1.50", "1.5")
+TestBothWays("rounding-down", "1.12512512512512452", "1.1251251251251244")
+TestBothWays("rounding-up", "1.12512512512512453", "1.1251251251251246")
+
+TestBothWays("hex-int-str", "0x20", '"32"')
+TestBothWays("dec-int-str", "32.00", '"32"')
+TestBothWays("exp-int-str", "3.2e1", '"32"')
+TestBothWays("overflow-inf-str", "1e2000", '"Infinity"')
+TestBothWays("underflow-0-str", "1e-2000", '"0"')
+TestBothWays("non-canonical-literal-int-str", "1.0", '"1"')
+TestBothWays("non-canonical-literal-frac-str", "1.50", '"1.5"')
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/non-use-strict-hex-escape.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/non-use-strict-hex-escape.js
index aa93b25..bf28923 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/non-use-strict-hex-escape.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// A string looking like "use strict", but with a hex escape in it,
+// doesn't trigger strict mode.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use\x20strict";
+  var x = "hello\040world";
+  return x;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/non-use-strict-octal-escape.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/non-use-strict-octal-escape.js
index aa93b25..9e00742 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/non-use-strict-octal-escape.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// A string looking like "use strict", but with an octal escape in it,
+// doesn't trigger strict mode.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use\040strict";
+  var x = "hello\040world";
+  return x;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/non-use-strict-uhex-escape.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/non-use-strict-uhex-escape.js
index aa93b25..5fba673 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/non-use-strict-uhex-escape.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// A string looking like "use strict", but with a long hex escape in it,
+// doesn't trigger strict mode.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use\u0020strict";
+  var x = "hello\040world";
+  return x;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/nonstrict-arguments.js
similarity index 74%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/nonstrict-arguments.js
index aa93b25..890f62e 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/nonstrict-arguments.js
@@ -25,12 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Eval restrictions should not trigger outside of strict-mode code.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var arguments = 42;
+arguments = arguments++;
+arguments += --arguments;
+arguments -= ++arguments;
+arguments *= arguments--;
+function arguments(arguments) {};
+try {} catch (arguments) {}
+
+function strict() {
+  "use strict";
+  // Reading eval and arguments is allowed.
+  eval(arguments);
 }
 
-test();
+var arguments = 42;
+arguments = arguments++;
+arguments += --arguments;
+arguments -= ++arguments;
+arguments *= arguments--;
+function arguments(arguments) {};
+try {} catch (arguments) {}
+
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/nonstrict-eval.js
similarity index 78%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/nonstrict-eval.js
index aa93b25..ad994ab 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/nonstrict-eval.js
@@ -25,12 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Eval restrictions should not trigger outside of strict-mode code.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+var eval = 42;
+eval = eval++;
+eval += --eval;
+eval -= ++eval;
+eval *= eval--;
+function eval(eval) {};
+try {} catch (eval) {}
+
+function strict() {
+  "use strict";
+  // Reading eval and arguments is allowed.
+  eval(arguments);
 }
 
-test();
+var eval = 42;
+eval = eval++;
+eval += --eval;
+eval -= ++eval;
+eval *= eval--;
+function eval(eval) {};
+try {} catch (eval) {}
+
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/nonstrict-with.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/nonstrict-with.js
index aa93b25..12d05a0 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/nonstrict-with.js
@@ -25,12 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// The with statement is allowed in non-strict code, and even around
+// strict code.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
+with ({}) {}
+
+with ({x : 42}) {
+  var foo = function () {
+    "use strict";
+    return x;
+  };
 }
 
-test();
+with ({}) {}
+
+
+
diff --git a/test/preparser/preparser.expectation b/test/preparser/preparser.expectation
new file mode 100644
index 0000000..638f90e
--- /dev/null
+++ b/test/preparser/preparser.expectation
@@ -0,0 +1,14 @@
+# Expectations for .js preparser tests.
+# Only mentions tests that throw SyntaxError, and optionally specifies
+# the message and location expected in the exception.
+# Format:
+#   testname[:message[:beg_pos,end_pos]]
+strict-octal-number:strict_octal_literal
+strict-octal-string:strict_octal_literal
+strict-octal-regexp:strict_octal_literal
+strict-octal-use-strict-after:strict_octal_literal
+strict-octal-use-strict-before:strict_octal_literal
+
+strict-const:strict_const
+
+strict-with:strict_mode_with
diff --git a/test/preparser/preparser.status b/test/preparser/preparser.status
new file mode 100644
index 0000000..db17778
--- /dev/null
+++ b/test/preparser/preparser.status
@@ -0,0 +1,39 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+prefix preparser
+
+# We don't parse RegExps at scanning time, so we can't fail on octal
+# escapes (we need to parse to distinguish octal escapes from valid
+# back-references).
+strict-octal-regexp: FAIL
+
+##############################################################################
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-const.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-const.js
index aa93b25..91e9e39 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-const.js
@@ -25,12 +25,5 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
-
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+"use strict";
+const x = 42;
\ No newline at end of file
diff --git a/test/preparser/strict-function-statement.pyt b/test/preparser/strict-function-statement.pyt
new file mode 100644
index 0000000..08c4288
--- /dev/null
+++ b/test/preparser/strict-function-statement.pyt
@@ -0,0 +1,99 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# In strict mode, function declarations may only appear as source elements.
+
+# A template that performs the same strict-mode test in different
+# scopes (global scope, function scope, and nested function scope).
+def StrictTest(name, source):
+  Test(name, '"use strict";\n' + source, "strict_function")
+  Test(name + '-infunc',
+       'function foo() {\n "use strict";\n' + source +'\n}\n',
+       "strict_function")
+  Test(name + '-infunc2',
+       'function foo() {\n "use strict";\n  function bar() {\n' +
+       source +'\n }\n}\n',
+       "strict_function")
+
+# Not testing with-scope, since with is not allowed in strict mode at all.
+
+StrictTest("block", """
+  { function foo() { } }
+""")
+
+StrictTest("try-w-catch", """
+  try { function foo() { } } catch (e) { }
+""")
+
+StrictTest("try-w-finally", """
+  try { function foo() { } } finally { }
+""")
+
+StrictTest("catch", """
+  try { } catch (e) { function foo() { } }
+""")
+
+StrictTest("finally", """
+  try { } finally { function foo() { } }
+""")
+
+StrictTest("for", """
+  for (;;) { function foo() { } }
+""")
+
+StrictTest("while", """
+  while (true) { function foo() { } }
+""")
+
+StrictTest("do", """
+  do { function foo() { } } while (true);
+""")
+
+StrictTest("then", """
+  if (true) { function foo() { } }
+""")
+
+
+StrictTest("then-w-else", """
+  if (true) { function foo() { } } else { }
+""")
+
+
+StrictTest("else", """
+  if (true) { } else { function foo() { } }
+""")
+
+StrictTest("switch-case", """
+  switch (true) { case true: function foo() { } }
+""")
+
+StrictTest("labeled", """
+  label: function foo() { }
+""")
+
+
+
diff --git a/test/preparser/strict-identifiers.pyt b/test/preparser/strict-identifiers.pyt
new file mode 100644
index 0000000..72808e2
--- /dev/null
+++ b/test/preparser/strict-identifiers.pyt
@@ -0,0 +1,233 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Templatated tests with eval/arguments/future reserved words.
+
+# ----------------------------------------------------------------------
+# Constants and utility functions
+
+reserved_words = [
+  'class',
+  'const', # Has other error message than other reserved words.
+  'enum',
+  'export',
+  'extends',
+  'import',
+  'super'
+  ]
+
+strict_reserved_words = [
+  'implements',
+  'interface',
+  'let',
+  'package',
+  'private',
+  'protected',
+  'public',
+  'static',
+  'yield'
+  ]
+
+assign_ops = {
+  "=": "assign",
+  "+=": "addeq",
+  "-=": "subeq",
+  "*=": "muleq",
+  "/=": "diveq",
+  "%=": "modeq",
+  "&=": "andeq",
+  "|=": "oreq",
+  "^=": "xoreq",
+  "<<=": "shleq",
+  ">>=": "asreq",
+  ">>>=": "lsreq"
+  }
+
+
+# A template that performs the same strict-mode test in different
+# scopes (global scope, function scope, and nested function scope).
+def StrictTemplate(name, source):
+  def MakeTests(replacement, expectation):
+    Template(name, '"use strict";\n' + source)(replacement, expectation)
+    Template(name + '-infunc',
+             'function foo() {\n "use strict";\n' + source +'\n}\n')(
+              replacement, expectation)
+    Template(name + '-infunc2',
+             'function foo() {\n "use strict";\n  function bar() {\n' +
+             source +'\n }\n}\n')(replacement, expectation)
+  return MakeTests
+
+# ----------------------------------------------------------------------
+# Test templates
+
+arg_name_own = Template("argument-name-own-$id", """
+  function foo($id) {
+    "use strict";
+  }
+""")
+
+arg_name_nested = Template("argument-name-nested-$id", """
+  function foo() {
+    "use strict";
+    function bar($id) { }
+  }
+""")
+
+func_name_own = Template("function-name-own-$id", """
+  function $id(foo) {
+    "use strict";
+  }
+""")
+
+func_name_nested = Template("function-name-nested-$id", """
+  function foo() {
+    "use strict";
+    function $id(bar) { }
+  }
+""")
+
+catch_var = StrictTemplate("catch-$id", """
+    try { } catch ($id) { }
+""")
+
+declare_var = StrictTemplate("var-$id", """
+  var $id = 42;
+""")
+
+assign_var = StrictTemplate("assign-$id-$opname", """
+  var x = $id $op 42;
+""")
+
+prefix_var = StrictTemplate("prefix-$opname-$id", """
+  var x = $op$id;
+""")
+
+postfix_var = StrictTemplate("postfix-$opname-$id", """
+  var x = $id$op;
+""")
+
+read_var = StrictTemplate("read-reserved-$id", """
+  var x = $id;
+""")
+
+setter_arg = StrictTemplate("setter-param-$id", """
+  var x = {set foo($id) { }};
+""")
+
+non_strict_use = Template("nonstrict-$id", """
+  var $id = 42;
+  $id++;
+  $id--;
+  ++$id;
+  --$id;
+  $id += 10;
+  $id -= 10;
+  try {} catch ($id) { }
+  function $id($id) { }
+  var x = {$id: 42};
+  x = {get $id() {}, set $id(value) {}};
+  function foo() { "use strict;" }
+  var $id = 42;
+  $id++;
+  $id--;
+  ++$id;
+  --$id;
+  $id += 10;
+  $id -= 10;
+  try {} catch ($id) { }
+  function $id($id) { }
+  x = {$id: 42};
+  x = {get $id() {}, set $id(value) {}};
+""")
+
+identifier_name_source = """
+  var x = {$id: 42};
+  x = {get $id() {}, set $id(value) {}};
+  x.$id = 42;
+  function foo() { "use strict;" }
+  x = {$id: 42};
+  x = {get $id() {}, set $id(value) {}};
+  x.$id = 42;
+"""
+
+identifier_name = Template("identifier_name-$id", identifier_name_source)
+identifier_name_strict = StrictTemplate("identifier_name_strict-$id",
+                                        identifier_name_source)
+
+# ----------------------------------------------------------------------
+# Run tests
+
+# eval and arguments have specific exceptions for different uses.
+for id in ["eval", "arguments"]:
+  arg_name_own({"id": id}, "strict_param_name")
+  arg_name_nested({"id": id}, "strict_param_name")
+  func_name_own({"id": id}, "strict_function_name")
+  func_name_nested({"id": id}, "strict_function_name")
+  setter_arg({"id": id}, "strict_param_name")
+  for op in assign_ops.keys():
+    assign_var({"id": id, "op":op, "opname": assign_ops[op]},
+               "strict_lhs_assignment")
+  catch_var({"id": id}, "strict_catch_variable")
+  declare_var({"id": id}, "strict_var_name")
+  prefix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_prefix")
+  prefix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_prefix")
+  postfix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_postfix")
+  postfix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_postfix")
+  non_strict_use({"id": id}, None)
+
+
+# Reserved words just throw the same exception in all cases
+# (with "const" being special, as usual).
+for reserved_word in reserved_words + strict_reserved_words:
+  if (reserved_word in strict_reserved_words):
+    message = "strict_reserved_word"
+  elif (reserved_word == "const"):
+    message = "unexpected_token"
+  else:
+    message = "reserved_word"
+  arg_name_own({"id":reserved_word}, message)
+  arg_name_nested({"id":reserved_word}, message)
+  setter_arg({"id": reserved_word}, message)
+  func_name_own({"id":reserved_word}, message)
+  func_name_nested({"id":reserved_word}, message)
+  for op in assign_ops.keys():
+    assign_var({"id":reserved_word, "op":op, "opname": assign_ops[op]}, message)
+  catch_var({"id":reserved_word}, message)
+  declare_var({"id":reserved_word}, message)
+  prefix_var({"id":reserved_word, "op":"++", "opname":"inc"}, message)
+  prefix_var({"id":reserved_word, "op":"--", "opname":"dec"}, message)
+  postfix_var({"id":reserved_word, "op":"++", "opname":"inc"}, message)
+  postfix_var({"id":reserved_word, "op":"--", "opname":"dec"}, message)
+  read_var({"id": reserved_word}, message)
+  identifier_name({"id": reserved_word}, None);
+  identifier_name_strict({"id": reserved_word}, None);
+
+
+# Future reserved words in strict mode behave like normal identifiers
+# in a non strict context.
+for reserved_word in strict_reserved_words:
+  non_strict_use({"id": id}, None)
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-indirect-regexp.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-indirect-regexp.js
index aa93b25..122bd3d 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-indirect-regexp.js
@@ -25,12 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with call to RegExp containing octal escape:
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  var re = RegExp("Hello\\040World");
+  return re;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-number.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-number.js
index aa93b25..d387d6a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-number.js
@@ -25,12 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with octal number literal.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  var x = 012;
+  return x;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-regexp.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-regexp.js
index aa93b25..fded9bf 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-regexp.js
@@ -25,12 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with octal escape in RegExp literal.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  var re = /hello\040world/;
+  return re;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-string.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-string.js
index aa93b25..40408e6 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-string.js
@@ -25,12 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with octal escape in string literal.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  var x = "hello\040world";
+  return x;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-use-strict-after.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-use-strict-after.js
index aa93b25..1af078a 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-use-strict-after.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with octal escape in string/directive prologue looking like
+// "use strict", after "use strict" directive.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  "use\040strict";
+  return true;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-octal-use-strict-before.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-octal-use-strict-before.js
index aa93b25..1dbb571 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-octal-use-strict-before.js
@@ -25,12 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// Strict mode with octal escape in string/directive prologue looking like
+// "use strict, before "use strict" directive.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use\040strict";
+  "use strict";
+  return true;
+}
\ No newline at end of file
diff --git a/test/mjsunit/override-eval-with-non-function.js b/test/preparser/strict-with.js
similarity index 88%
copy from test/mjsunit/override-eval-with-non-function.js
copy to test/preparser/strict-with.js
index aa93b25..a19355e 100644
--- a/test/mjsunit/override-eval-with-non-function.js
+++ b/test/preparser/strict-with.js
@@ -25,12 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// When 'eval' is overridden with a non-function object we should
-// check whether the object is callable.
+// The with statement is not allowed in strict code.
 
-function test() {
-  eval = /foo/;
-  assertEquals("foo", eval("foobar"));
-}
-
-test();
+function foo() {
+  "use strict";
+  with ({}) {}
+}
\ No newline at end of file
diff --git a/test/preparser/testcfg.py b/test/preparser/testcfg.py
index c78d03b..88c06a3 100644
--- a/test/preparser/testcfg.py
+++ b/test/preparser/testcfg.py
@@ -27,17 +27,19 @@
 
 import test
 import os
-from os.path import join, dirname, exists
+from os.path import join, dirname, exists, isfile
 import platform
 import utils
-
+import re
 
 class PreparserTestCase(test.TestCase):
 
-  def __init__(self, root, path, executable, mode, context):
+  def __init__(self, root, path, executable, mode, throws, context, source):
     super(PreparserTestCase, self).__init__(context, path, mode)
     self.executable = executable
     self.root = root
+    self.throws = throws
+    self.source = source
 
   def GetLabel(self):
     return "%s %s %s" % (self.mode, self.path[-2], self.path[-1])
@@ -45,9 +47,20 @@
   def GetName(self):
     return self.path[-1]
 
+  def HasSource(self):
+    return self.source is not None
+
+  def GetSource():
+    return self.source
+
   def BuildCommand(self, path):
-    testfile = join(self.root, self.GetName()) + ".js"
-    result = [self.executable, testfile]
+    if (self.source is not None):
+      result = [self.executable, "-e", self.source]
+    else:
+      testfile = join(self.root, self.GetName()) + ".js"
+      result = [self.executable, testfile]
+    if (self.throws):
+      result += ['throws'] + self.throws
     return result
 
   def GetCommand(self):
@@ -65,19 +78,77 @@
   def GetBuildRequirements(self):
     return ['preparser']
 
+  def GetExpectations(self):
+    expects_file = join(self.root, 'preparser.expectation')
+    map = {}
+    if exists(expects_file):
+      rule_regex = re.compile("^([\w\-]+)(?::([\w\-]+))?(?::(\d+),(\d+))?$")
+      for line in utils.ReadLinesFrom(expects_file):
+        if (line[0] == '#'): continue
+        rule_match = rule_regex.match(line)
+        if rule_match:
+          expects = []
+          if (rule_match.group(2)):
+            expects = expects + [rule_match.group(2)]
+            if (rule_match.group(3)):
+              expects = expects + [rule_match.group(3), rule_match.group(4)]
+          map[rule_match.group(1)] = expects
+    return map;
+
+  def ParsePythonTestTemplates(self, result, filename,
+                               executable, current_path, mode):
+    pathname = join(self.root, filename + ".pyt")
+    def Test(name, source, expectation):
+      throws = None
+      if (expectation is not None):
+        throws = [expectation]
+      test = PreparserTestCase(self.root,
+                               current_path + [filename, name],
+                               executable,
+                               mode, throws, self.context,
+                               source.replace("\n", " "))
+      result.append(test)
+    def Template(name, source):
+      def MkTest(replacement, expectation):
+        testname = name
+        testsource = source
+        for key in replacement.keys():
+          testname = testname.replace("$"+key, replacement[key]);
+          testsource = testsource.replace("$"+key, replacement[key]);
+        Test(testname, testsource, expectation)
+      return MkTest
+    execfile(pathname, {"Test": Test, "Template": Template})
+
   def ListTests(self, current_path, path, mode, variant_flags):
-    executable = join('obj', 'preparser', mode, 'preparser')
+    executable = 'preparser'
     if utils.IsWindows():
       executable += '.exe'
     executable = join(self.context.buildspace, executable)
+    if not isfile(executable):
+      executable = join('obj', 'preparser', mode, 'preparser')
+      if utils.IsWindows():
+        executable += '.exe'
+      executable = join(self.context.buildspace, executable)
+    expectations = self.GetExpectations()
+    result = []
     # Find all .js files in tests/preparser directory.
     filenames = [f[:-3] for f in os.listdir(self.root) if f.endswith(".js")]
     filenames.sort()
-    result = []
     for file in filenames:
+      throws = None;
+      if (file in expectations):
+        throws = expectations[file]
       result.append(PreparserTestCase(self.root,
                                       current_path + [file], executable,
-                                      mode, self.context))
+                                      mode, throws, self.context, None))
+    # Find all .pyt files in test/preparser directory.
+    filenames = [f[:-4] for f in os.listdir(self.root) if f.endswith(".pyt")]
+    filenames.sort()
+    for file in filenames:
+      # Each file as a python source file to be executed in a specially
+      # created environment (defining the Template and Test functions)
+      self.ParsePythonTestTemplates(result, file,
+                                    executable, current_path, mode)
     return result
 
   def GetTestStatus(self, sections, defs):
@@ -85,6 +156,9 @@
     if exists(status_file):
       test.ReadConfigurationInto(status_file, sections, defs)
 
+  def VariantFlags(self):
+    return [[]];
+
 
 def GetConfiguration(context, root):
   return PreparserTestConfiguration(context, root)
diff --git a/test/sputnik/README b/test/sputnik/README
index 50d721f..3656634 100644
--- a/test/sputnik/README
+++ b/test/sputnik/README
@@ -1,6 +1,6 @@
 To run the sputniktests you must check out the test suite from
 googlecode.com.  The test expectations are currently relative to
-version 94. To get the tests run the following command within
+version 97. To get the tests run the following command within
 v8/test/sputnik/
 
-  svn co http://sputniktests.googlecode.com/svn/trunk/ -r94 sputniktests
+  svn co http://sputniktests.googlecode.com/svn/trunk/ -r97 sputniktests
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 6da87ea..868509d 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -28,15 +28,16 @@
 prefix sputnik
 def FAIL_OK = FAIL, OKAY
 
-##################### DELIBERATE INCOMPATIBILITIES #####################
+############################### BUGS ###################################
 
-# 900066: Deleting elements in .arguments should disconnect the
-# element from the actual arguments.  Implementing this is nontrivial
-# and we have no indication that anything on the web depends on this
-# feature.
-S13_A13_T1: FAIL_OK
-S13_A13_T2: FAIL_OK
-S13_A13_T3: FAIL_OK
+# A bound function should fail on access to 'caller' and 'arguments'.
+S15.3.4.5_A1: FAIL
+S15.3.4.5_A2: FAIL
+
+# '__proto__' should be treated as a normal property in JSON.
+S15.12.2_A1: FAIL
+
+##################### DELIBERATE INCOMPATIBILITIES #####################
 
 # This tests precision of trignometric functions.  We're slightly off
 # from the implementation in libc (~ 1e-17) but it's not clear if we
@@ -48,25 +49,13 @@
 # We allow calls to regexp exec() with no arguments to fail for
 # compatibility reasons.
 S15.10.6.2_A1_T16: FAIL_OK
+S15.10.6.2_A12: FAIL_OK
 S15.10.6.3_A1_T16: FAIL_OK
 
-# We allow regexps to be called as functions for compatibility reasons.
-S15.10.7_A1_T1: FAIL_OK
-S15.10.7_A1_T2: FAIL_OK
-
 # We are silent in some regexp cases where the spec wants us to give
 # errors, for compatibility.
 S15.10.2.11_A1_T2: FAIL
 S15.10.2.11_A1_T3: FAIL
-S15.10.4.1_A5_T1: FAIL
-S15.10.4.1_A5_T2: FAIL
-S15.10.4.1_A5_T3: FAIL
-S15.10.4.1_A5_T4: FAIL
-S15.10.4.1_A5_T5: FAIL
-S15.10.4.1_A5_T6: FAIL
-S15.10.4.1_A5_T7: FAIL
-S15.10.4.1_A5_T8: FAIL
-S15.10.4.1_A5_T9: FAIL
 
 # We are more lenient in which string character escapes we allow than
 # the spec (7.8.4 p. 19) wants us to be.  This is for compatibility.
@@ -101,22 +90,21 @@
 S7.8.4_A4.3_T5: FAIL_OK
 S7.8.4_A7.2_T5: FAIL_OK
 
-# We allow some keywords to be used as identifiers
-S7.5.3_A1.26: FAIL_OK
+# Sputnik expects unicode escape sequences in RegExp flags to be interpreted.
+# The specification requires them to be passed uninterpreted to the RegExp
+# constructor. We now implement that.
+S7.8.5_A3.1_T7: FAIL_OK
+S7.8.5_A3.1_T8: FAIL_OK
+S7.8.5_A3.1_T9: FAIL_OK
+
+# We allow some keywords to be used as identifiers.
+S7.5.3_A1.15: FAIL_OK
 S7.5.3_A1.18: FAIL_OK
-S7.5.3_A1.27: FAIL_OK
-S7.5.3_A1.5: FAIL_OK
-S7.5.3_A1.9: FAIL_OK
-S7.5.3_A1.10: FAIL_OK
-S7.5.3_A1.11: FAIL_OK
-# native
-S7.5.3_A1.20: FAIL_OK
 S7.5.3_A1.21: FAIL_OK
 S7.5.3_A1.22: FAIL_OK
 S7.5.3_A1.23: FAIL_OK
-S7.5.3_A1.15: FAIL_OK
 S7.5.3_A1.24: FAIL_OK
-S7.5.3_A1.16: FAIL_OK
+S7.5.3_A1.26: FAIL_OK
 
 # This checks for non-262 behavior
 S12.6.4_A14_T1: PASS || FAIL_OK
@@ -146,16 +134,8 @@
 S12.6.4_D1: PASS || FAIL_OK
 
 # We allow function declarations within statements
-S12.5_A9_T1: FAIL_OK
-S12.5_A9_T2: FAIL_OK
-# S12.6.2_A13_T3: FAIL_OK
-# S12.5_A9_T3: FAIL_OK
-# S12.6.1_A13_T3: FAIL_OK
-S12.1_A1: FAIL_OK
 S12.6.2_A13_T1: FAIL_OK
 S12.6.2_A13_T2: FAIL_OK
-S12.6.1_A13_T1: FAIL_OK
-S12.6.1_A13_T2: FAIL_OK
 S12.6.4_A13_T1: FAIL_OK
 S12.6.4_A13_T2: FAIL_OK
 #S12.6.4_A13_T3: FAIL_OK
@@ -179,6 +159,22 @@
 S9.9_A1: FAIL_OK
 S9.9_A2: FAIL_OK
 
+# Calls builtins without an explicit receiver which means that
+# undefined is passed to the builtin. The tests expect the global
+# object to be passed which was true in ES3 but not in ES5.
+S11.1.1_A2: FAIL_OK
+S15.5.4.4_A1_T3: FAIL_OK
+S15.5.4.5_A1_T3: FAIL_OK
+S15.5.4.6_A1_T3: FAIL_OK
+S15.5.4.7_A1_T3: FAIL_OK
+S15.5.4.8_A1_T3: FAIL_OK
+S15.5.4.9_A1_T3: FAIL_OK
+S15.5.4.10_A1_T3: FAIL_OK
+S15.5.4.11_A1_T3: FAIL_OK
+S15.5.4.12_A1_T3: FAIL_OK
+S15.5.4.13_A1_T3: FAIL_OK
+S15.5.4.14_A1_T3: FAIL_OK
+S15.5.4.15_A1_T3: FAIL_OK
 
 ##################### SKIPPED TESTS #####################
 
@@ -189,26 +185,6 @@
 # V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196
 S8.7_A5_T2: FAIL
 
-# V8 bugs: http://code.google.com/p/v8/issues/detail?id=1198
-# V8 should not wrap this when calling builtin functions
-S15.2.4.3_A12: FAIL
-S15.2.4.7_A13: FAIL
-# Object.prototype.toString
-S15.2.4.2_A12: FAIL
-S15.2.4.2_A13: FAIL
-# Object.prototype.toLocaleString
-S15.2.4.3_A13: FAIL
-S15.2.4.4_A13: FAIL
-S15.2.4.4_A12: FAIL
-# Object.prototype.propertyIsEnumerable
-S15.2.4.7_A12: FAIL
-# Object.prototype.hasOwnProperty
-S15.2.4.5_A12: FAIL
-S15.2.4.5_A13: FAIL
-# Object.prototype.isPrototypeOf
-S15.2.4.6_A13: FAIL
-S15.2.4.6_A12: FAIL
-
 # Invalid test case (recent change adding var changes semantics)
 S8.3_A1_T1: FAIL
 # Test bug: http://code.google.com/p/sputniktests/issues/detail?id=35
@@ -265,11 +241,6 @@
 S15.9.5.8_A1_T2: FAIL_OK
 S15.9.5.9_A1_T2: FAIL_OK
 
-# Regexps have type "function", not "object".
-S11.4.3_A3.6: FAIL_OK
-S15.10.7_A3_T2: FAIL_OK
-S15.10.7_A3_T1: FAIL_OK
-
 [ $arch == arm ]
 
 # BUG(3251225): Tests that timeout with --nocrankshaft.
diff --git a/test/sputnik/testcfg.py b/test/sputnik/testcfg.py
index c1e3c1b..1032c13 100644
--- a/test/sputnik/testcfg.py
+++ b/test/sputnik/testcfg.py
@@ -57,7 +57,7 @@
 
   def AfterRun(self, result):
     # Dispose the temporary file if everything looks okay.
-    if not result.HasPreciousOutput(): self.tmpfile.Dispose()
+    if result is None or not result.HasPreciousOutput(): self.tmpfile.Dispose()
     self.tmpfile = None
 
   def GetCommand(self):
@@ -101,7 +101,7 @@
     return result
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'sputnik.status')
diff --git a/test/test262/README b/test/test262/README
index 6d9e56e..ea6b4a7 100644
--- a/test/test262/README
+++ b/test/test262/README
@@ -4,11 +4,11 @@
 
   http://hg.ecmascript.org/tests/test262
 
-at revision 62 as 'data' in this directory.  Using later version
+at revision 128 as 'data' in this directory.  Using later version
 may be possible but the tests are only known to pass (and indeed run)
 with that revision.
 
-hg clone -r 62 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 128 http://hg.ecmascript.org/tests/test262 data
 
 If you do update to a newer revision you may have to change the test
 harness adapter code since it uses internal functionality from the
diff --git a/test/test262/harness-adapt.js b/test/test262/harness-adapt.js
index bc10a9d..52b5de7 100644
--- a/test/test262/harness-adapt.js
+++ b/test/test262/harness-adapt.js
@@ -43,7 +43,7 @@
   }
 
   Test262Error.prototype.toString = function() {
-    return this.result + " " + error;
+    return this.result + " " + this.error;
   }
 
   function registerTest(test) {
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 754984d..8cee210 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -46,24 +46,12 @@
 # Unanalyzed failures which may be bugs or deliberate differences
 #
 
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: class (class)
-7.6-30: FAIL
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: extends (extends)
-7.6-31: FAIL
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: \u0065\u006e\u0075\u006d (enum)
-7.6-32: FAIL
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: \u0073uper (super)
-7.6-33: FAIL
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: expor\u0074 (export)
-7.6-35: FAIL
-# BUG: 7.6 - SyntaxError expected: reserved words used as Identifier
-#      Names in UTF8: \u0069\u006d\u0070\u006f\u0072\u0074 (import)
-7.6-36: FAIL
+# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
+#      Property of the Global Object under strict mode (NaN)
+10.2.1.1.3-4-16-s: FAIL
+# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
+#      Property of the Global Object under strict mode (undefined)
+10.2.1.1.3-4-18-s: FAIL
 # Invalid test: https://bugs.ecmascript.org/show_bug.cgi?id=76
 10.4.2-2-c-1: FAIL
 # BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
@@ -93,6 +81,12 @@
 # BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
 #      enforced when using Less-than-or-equal operator: valueOf <= valueOf
 11.8.3-5: FAIL
+# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
+#      in strict mode (Global.undefined)
+11.13.1-4-27-s: FAIL
+# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
+#      in strict mode (Global.Infinity)
+11.13.1-4-3-s: FAIL
 # BUG: Global.NaN is a data property with default attribute values
 15.1.1.1-0: FAIL
 # BUG: Global.Infinity is a data property with default attribute values
@@ -123,35 +117,6 @@
 # BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
 #      for properties on built-ins (RegExp.prototype.multiline)
 15.2.3.3-4-215: FAIL
-# Bug? Object.create - 'set' property of one property in 'Properties'
-#      is not present (8.10.5 step 8)
-# V8 throws.
-15.2.3.5-4-267: FAIL
-# Bug? Object.create - 'set' property of one property in 'Properties'
-#      is undefined (8.10.5 step 8.b)
-# V8 throws.
-15.2.3.5-4-292: FAIL
-# Bug? Object.defineProperty - 'set' property in 'Attributes' is not
-#      present (8.10.5 step 8)
-# V8 throws.
-15.2.3.6-3-236: FAIL
-# Bug? Object.defineProperty - 'set' property in 'Attributes' is own
-#      accessor property without a get function (8.10.5 step 8.a)
-# V8 throws.
-15.2.3.6-3-245: FAIL
-# Bug? Object.defineProperty - 'set' property in 'Attributes' is own
-#      accessor property(without a get function) that overrides an inherited
-#      accessor property (8.10.5 step 8.a)
-# V8 throws.
-15.2.3.6-3-246: FAIL
-# Bug? Object.defineProperty - 'set' property in 'Attributes' is an
-#      inherited accessor property without a get function (8.10.5 step 8.a)
-# V8 throws.
-15.2.3.6-3-247: FAIL
-# Bug? Object.defineProperty - value of 'set' property in 'Attributes'
-#      is undefined (8.10.5 step 8.b)
-# V8 throws.
-15.2.3.6-3-261: FAIL
 # Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
 #      property to true successfully when [[Enumerable]] attribute of 'name'
 #      is false and [[Configurable]] attribute of 'name' is true,  the 'desc'
@@ -464,11 +429,6 @@
 #      values of 'name' (15.4.5.1 step 4.c)
 15.2.3.6-4-209: FAIL
 # Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, 'name' is accessor property and assignment to the accessor
-#      property, fails to convert accessor property from accessor property to
-#      data property (15.4.5.1 step 4.c)
-15.2.3.6-4-243-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
 #      property, name is accessor property and 'desc' is accessor descriptor,
 #      test updating the [[Enumerable]] attribute value of 'name' (15.4.5.1 step
 #      4.c)
@@ -493,18 +453,6 @@
 #      (15.4.5.1 step 4.e.ii)
 15.2.3.6-4-276: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own property which is defined in both
-#      [[ParameterMap]] of 'O' and 'O', and is deleted afterwards, and 'desc' is
-#      data descriptor, test 'name' is redefined in 'O' with all correct
-#      attribute values (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.6-4-289-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own property which is defined in both
-#      [[ParameterMap]] of 'O' and 'O', is deleted afterwards, and 'desc' is
-#      accessor descriptor, test 'name' is redefined in 'O' with all correct
-#      attribute values (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.6-4-290-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
 #      formal parameters, 'name' is own accessor property of 'O' which is also
 #      defined in [[ParameterMap]] of 'O', and 'desc' is accessor descriptor,
 #      test updating multiple attribute values of 'name' (10.6
@@ -516,6 +464,19 @@
 #      step 3)
 15.2.3.6-4-291: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
+#      formal parameters, 'name' is own property of 'O' which is also defined in
+#      [[ParameterMap]] of 'O', and 'desc' is data descriptor, test updating
+#      multiple attribute values of 'name' (10.6 [[DefineOwnProperty]] step 3
+#      and 5.b)
+15.2.3.6-4-292-1: FAIL
+# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
+#      formal parameters, 'name' is own data property of 'O' which is also
+#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
+#      updating the [[Value]] attribute value of 'name' which is defined as
+#      unwritable and non-configurable (10.6 [[DefineOwnProperty]] step 4 and
+#      step 5b)
+15.2.3.6-4-293-2: FAIL
+# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
 #      formal parameters, 'name' is own data property of 'O' which is also
 #      defined in [[ParameterMap]] of 'O', test TypeError is not thrown when
 #      updating the [[Value]] attribute value of 'name' which is defined as
@@ -523,39 +484,44 @@
 #      5.b)
 15.2.3.6-4-293-3: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own accessor property of 'O' which is also
+#      formal parameters, 'name' is own data property of 'O' which is also
 #      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Get]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5a)
-15.2.3.6-4-297-1: FAIL
+#      updating the [[Writable]] attribute value of 'name' which is defined as
+#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and 5b)
+15.2.3.6-4-294-1: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own accessor property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Set]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] steps 4 and 5a)
-15.2.3.6-4-298-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own accessor property of 'O' which is also
+#      formal parameters, 'name' is own data property of 'O' which is also
 #      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
 #      updating the [[Enumerable]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] steps 4 and 5a)
-15.2.3.6-4-299-1: FAIL
+#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
+15.2.3.6-4-295-1: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is an index named property of 'O', and 'desc'
-#      is data descriptor, test 'name' is defined in 'O' with all correct
-#      attribute values (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.6-4-301-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is an index named property of 'O' but not
-#      defined in [[ParameterMap]] of 'O', and 'desc' is accessor descriptor,
-#      test 'name' is defined in 'O' with all correct attribute values (10.6
-#      [[DefineOwnProperty]] step 3 and step 5a)
-15.2.3.6-4-302-1: FAIL
+#      formal parameters, 'name' is own data property of 'O' which is also
+#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
+#      updating the [[Configurable]] attribute value of 'name' which is defined
+#      as non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
+15.2.3.6-4-296-1: FAIL
 # Bug? Object.defineProperty - 'O' is an Arguments object, 'name' is an index
 #      named accessor property of 'O' but not defined in [[ParameterMap]] of
 #      'O', and 'desc' is accessor descriptor, test updating multiple attribute
 #      values of 'name' (10.6 [[DefineOwnProperty]] step 3)
 15.2.3.6-4-303: FAIL
+# Bug? ES5 Attributes - indexed property 'P' with attributes [[Writable]]: true,
+#      [[Enumerable]]: true, [[Configurable]]: false is writable using simple
+#      assignment, 'O' is an Arguments object
+15.2.3.6-4-333-11: FAIL
+# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
+#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
+#      accessor property, 'A' is an Array object (8.12.9 - step 9.b.i)
+15.2.3.6-4-360-1: FAIL
+# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
+#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
+#      accessor property, 'O' is an Arguments object (8.12.9 - step 9.b.i)
+15.2.3.6-4-360-6: FAIL
+# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
+#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
+#      accessor property, 'O' is the global object (8.12.9 - step 9.b.i)
+15.2.3.6-4-360-7: FAIL
 # Bug? ES5 Attributes - [[Value]] attribute of data property is the activex host
 #      object
 15.2.3.6-4-401: FAIL
@@ -575,17 +541,6 @@
 #      prototype has a property with the same name and [[Writable]] set to
 #      false(Function.prototype.bind)
 15.2.3.6-4-420: FAIL
-# Bug? ES5 Attributes - Fail to add property into object (Number instance)
-15.2.3.6-4-581: FAIL
-# Bug? ES5 Attributes - Fail to update value of property into of [[Proptotype]]
-#      internal property (JSON)
-15.2.3.6-4-586: FAIL
-# Bug? ES5 Attributes - Fail to update value of property of [[Proptotype]]
-#      internal property (Object.create)
-15.2.3.6-4-591: FAIL
-# Bug? ES5 Attributes - Fail to update value of property into of [[Proptotype]]
-#      internal property (Function.prototype.bind)
-15.2.3.6-4-596: FAIL
 # Bug? ES5 Attributes - all attributes in Array.prototype.indexOf are correct
 15.2.3.6-4-612: FAIL
 # Bug? ES5 Attributes - all attributes in Object.lastIndexOf are correct
@@ -611,20 +566,6 @@
 15.2.3.6-4-623: FAIL
 # Bug? ES5 Attributes - all attributes in Date.prototype.toJSON are correct
 15.2.3.6-4-624: FAIL
-# Bug? Object.defineProperties - argument 'Properties' is an Error object
-#      props.description = obj1;
-15.2.3.7-2-15: FAIL
-# Bug? Object.defineProperties - 'Properties' is an Error object which
-#      implements its own [[Get]] method to get enumerable own property
-#      props.description = obj1;
-15.2.3.7-5-a-16: FAIL
-# Bug? Object.defineProperties - 'set' property of 'descObj' is not present
-#      (8.10.5 step 8)
-15.2.3.7-5-b-227: FAIL
-# Bug? Object.defineProperties - 'descObj' is an Error object which implements
-#      its own [[Get]] method to get 'set' property (8.10.5 step 8.a)
-#      descObj.description = { value: 11 };
-15.2.3.7-5-b-248: FAIL
 # Bug? Object.defineProperties - 'O' is an Array, test the length property of
 #      'O' is own data property (15.4.5.1 step 1)
 15.2.3.7-6-a-112: FAIL
@@ -922,23 +863,48 @@
 #      'desc' is accessor descriptor, test updating multiple attribute values of
 #      'P' (10.6 [[DefineOwnProperty]] step 3)
 15.2.3.7-6-a-280: FAIL
+# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
+#      property of 'O' which is also defined in [[ParameterMap]] of 'O', and
+#      'desc' is data descriptor, test updating multiple attribute values of 'P'
+#      (10.6 [[DefineOwnProperty]] step 3)
+15.2.3.7-6-a-281: FAIL
+# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
+#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
+#      TypeError is thrown when updating the [[Value]] attribute value of 'P'
+#      whose writable and configurable attributes are false (10.6
+#      [[DefineOwnProperty]] step 4)
+15.2.3.7-6-a-282: FAIL
+# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
+#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
+#      TypeError is thrown when updating the [[Writable]] attribute value of 'P'
+#      which is defined as non-configurable (10.6 [[DefineOwnProperty]] step 4)
+15.2.3.7-6-a-283: FAIL
+# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
+#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
+#      TypeError is thrown when updating the [[Enumerable]] attribute value of
+#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
+#      4)
+15.2.3.7-6-a-284: FAIL
+# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
+#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
+#      TypeError is thrown when updating the [[Configurable]] attribute value of
+#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
+#      4)
+15.2.3.7-6-a-285: FAIL
 # Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is an array
 #      index named accessor property of 'O' but not defined in [[ParameterMap]]
 #      of 'O', and 'desc' is accessor descriptor, test updating multiple
 #      attribute values of 'P' (10.6 [[DefineOwnProperty]] step 3)
 15.2.3.7-6-a-292: FAIL
-# Bug? Object.prototype.toString - '[object Undefined]' will be returned when
-#      'this' value is undefined
-15.2.4.2-1-1: FAIL
-# Bug? Object.prototype.toString - '[object Undefined]' will be returned when
-#      'this' value is undefined
-15.2.4.2-1-2: FAIL
-# Bug? Object.prototype.toString - '[object Null]' will be returned when 'this'
-#      value is null
-15.2.4.2-2-1: FAIL
-# Bug? Object.prototype.toString - '[object Null]' will be returned when 'this'
-#      value is null
-15.2.4.2-2-2: FAIL
+# Bug? Strict Mode - 'this' value is a string which cannot be converted to
+#      wrapper objects when the function is called with an array of arguments
+15.3.4.3-1-s: FAIL
+# Bug? Strict Mode - 'this' value is a number which cannot be converted to
+#      wrapper objects when the function is called with an array of arguments
+15.3.4.3-2-s: FAIL
+# Bug? Strict Mode - 'this' value is a boolean which cannot be converted to
+#      wrapper objects when the function is called with an array of arguments
+15.3.4.3-3-s: FAIL
 # Bug? Function.prototype.bind - [[Get]] attribute of 'caller' property in 'F'
 #      is thrower
 15.3.4.5-20-2: FAIL
@@ -951,16 +917,6 @@
 # Bug? Function.prototype.bind - [[Set]] attribute of 'arguments' property in
 #      'F' is thrower
 15.3.4.5-21-3: FAIL
-# Bug? Array.prototype.concat will concat an Array when index property
-#      (read-only) exists in Array.prototype (Step 5.c.i)
-15.4.4.4-5-c-i-1: FAIL
-# Bug? Array.prototype.indexOf applied to undefined throws a TypeError
-15.4.4.14-1-1: FAIL
-# Bug? Array.prototype.indexOf applied to null throws a TypeError
-15.4.4.14-1-2: FAIL
-# Bug? Array.prototype.indexOf - side effects produced by step 1 are visible
-#      when an exception occurs
-15.4.4.14-5-28: FAIL
 # Bug? Array.prototype.indexOf - decreasing length of array does not delete
 #      non-configurable properties
 15.4.4.14-9-a-19: FAIL
@@ -983,13 +939,6 @@
 # Bug? Array.prototype.indexOf - terminates iteration on unhandled exception on
 #      an Array
 15.4.4.14-9-b-i-30: FAIL
-# Bug? Array.prototype.lastIndexOf applied to undefined throws a TypeError
-15.4.4.15-1-1: FAIL
-# Bug? Array.prototype.lastIndexOf applied to null throws a TypeError
-15.4.4.15-1-2: FAIL
-# Bug? Array.prototype.lastIndexOf - side effects produced by step 1 are visible
-#      when an exception occurs
-15.4.4.15-5-28: FAIL
 # Bug? Array.prototype.lastIndexOf - deleting property of prototype causes
 #      prototype index property not to be visited on an Array
 15.4.4.15-8-a-14: FAIL
@@ -1011,6 +960,12 @@
 # Bug? Array.prototype.lastIndexOf terminates iteration on unhandled exception
 #      on an Array
 15.4.4.15-8-b-i-30: FAIL
+# Bug? Array.prototype.every applied to boolean primitive
+15.4.4.16-1-3: FAIL
+# Bug? Array.prototype.every applied to number primitive
+15.4.4.16-1-5: FAIL
+# Bug? Array.prototype.every applied to string primitive
+15.4.4.16-1-7: FAIL
 # Bug? Array.prototype.every - side effects produced by step 2 are visible when
 #      an exception occurs
 15.4.4.16-4-8: FAIL
@@ -1048,6 +1003,12 @@
 # Bug? Array.prototype.every - element changed by getter on previous iterations
 #      is observed on an Array
 15.4.4.16-7-c-i-28: FAIL
+# Bug? Array.prototype.some applied to boolean primitive
+15.4.4.17-1-3: FAIL
+# Bug? Array.prototype.some applied to number primitive
+15.4.4.17-1-5: FAIL
+# Bug? Array.prototype.some applied to applied to string primitive
+15.4.4.17-1-7: FAIL
 # Bug? Array.prototype.some - side effects produced by step 2 are visible when
 #      an exception occurs
 15.4.4.17-4-8: FAIL
@@ -1085,6 +1046,12 @@
 # Bug? Array.prototype.some - element changed by getter on previous iterations
 #      is observed on an Array
 15.4.4.17-7-c-i-28: FAIL
+# Bug? Array.prototype.forEach applied to boolean primitive
+15.4.4.18-1-3: FAIL
+# Bug? Array.prototype.forEach applied to number primitive
+15.4.4.18-1-5: FAIL
+# Bug? Array.prototype.forEach applied to string primitive
+15.4.4.18-1-7: FAIL
 # Bug? Array.prototype.forEach - side effects produced by step 2 are visible
 #      when an exception occurs
 15.4.4.18-4-8: FAIL
@@ -1122,9 +1089,12 @@
 # Bug? Array.prototype.forEach - element changed by getter on previous
 #      iterations is observed on an Array
 15.4.4.18-7-c-i-28: FAIL
-# Bug? Array.prototype.map - applied to Array-like object, 'length' is an own
-#      data property that overrides an inherited accessor property
-15.4.4.19-2-5: FAIL
+# Bug? Array.prototype.map - applied to boolean primitive
+15.4.4.19-1-3: FAIL
+# Bug? Array.prototype.map - applied to number primitive
+15.4.4.19-1-5: FAIL
+# Bug? Array.prototype.map - applied to string primitive
+15.4.4.19-1-7: FAIL
 # Bug? Array.prototype.map - Side effects produced by step 2 are visible when an
 #      exception occurs
 15.4.4.19-4-8: FAIL
@@ -1162,33 +1132,12 @@
 # Bug? Array.prototype.map - element changed by getter on previous iterations is
 #      observed on an Array
 15.4.4.19-8-c-i-28: FAIL
-# Bug? Array.prototype.filter - value of 'length' is a number (value is
-#      negative)
-15.4.4.20-3-7: FAIL
-# Bug? Array.prototype.filter - value of 'length' is a number (value is
-#      Infinity)
-# V8 timeout
-15.4.4.20-3-8: SKIP
-# Bug? Array.prototype.filter - 'length' is a string containing a negative
-#      number
-15.4.4.20-3-12: FAIL
-# Bug? Array.prototype.filter - 'length' is a string containing a decimal number
-15.4.4.20-3-13: FAIL
-# Bug? Array.prototype.filter - 'length' is a string containing +/-Infinity
-15.4.4.20-3-14: SKIP
-# Bug? Array.prototype.filter - value of 'length' is a positive non-integer,
-#      ensure truncation occurs in the proper direction
-# V8 timeout
-15.4.4.20-3-24: FAIL
-# Bug? Array.prototype.filter - value of 'length' is a negative non-integer,
-#      ensure truncation occurs in the proper direction
-15.4.4.20-3-25: FAIL
-# Bug? Array.prototype.filter - value of 'length' is boundary value (2^32)
-# V8 timeout
-15.4.4.20-3-28: SKIP
-# Bug? Array.prototype.filter - value of 'length' is boundary value (2^32 + 1)
-# V8 timeout
-15.4.4.20-3-29: SKIP
+# Bug? Array.prototype.filter applied to boolean primitive
+15.4.4.20-1-3: FAIL
+# Bug? Array.prototype.filter applied to number primitive
+15.4.4.20-1-5: FAIL
+# Bug? Array.prototype.filter applied to string primitive
+15.4.4.20-1-7: FAIL
 # Bug? Array.prototype.filter - side effects produced by step 2 are visible when
 #      an exception occurs
 15.4.4.20-4-8: FAIL
@@ -1207,9 +1156,6 @@
 # Bug? Array.prototype.filter - properties can be added to prototype after
 #      current position are visited on an Array-like object
 15.4.4.20-9-b-6: FAIL
-# Bug? Array.prototype.filter - properties can be added to prototype after
-#      current position are visited on an Array
-15.4.4.20-9-b-7: FAIL
 # Bug? Array.prototype.filter - decreasing length of array does not delete
 #      non-configurable properties
 15.4.4.20-9-b-16: FAIL
@@ -1241,33 +1187,12 @@
 # Bug? Array.prototype.filter - element changed by getter on previous iterations
 #      is observed on an Array
 15.4.4.20-9-c-i-28: FAIL
-# Bug? Array.prototype.reduce - value of 'length' is a number (value is
-#      negative)
-15.4.4.21-3-7: FAIL
-# Bug? Array.prototype.reduce - value of 'length' is a number (value is
-#      Infinity)
-# V8 timeout.
-15.4.4.21-3-8: SKIP
-# Bug? Array.prototype.reduce - 'length' is a string containing a negative
-#      number
-15.4.4.21-3-12: FAIL
-# Bug? Array.prototype.reduce - 'length' is a string containing a decimal number
-15.4.4.21-3-13: FAIL
-# Bug? Array.prototype.reduce - 'length' is a string containing +/-Infinity
-# V8 timeout.
-15.4.4.21-3-14: SKIP
-# Bug? Array.prototype.reduce - value of 'length' is a positive non-integer,
-#      ensure truncation occurs in the proper direction
-15.4.4.21-3-24: FAIL
-# Bug? Array.prototype.reduce - value of 'length' is a negative non-integer,
-#      ensure truncation occurs in the proper direction
-15.4.4.21-3-25: FAIL
-# Bug? Array.prototype.reduce - value of 'length' is boundary value (2^32)
-# V8 timeout.
-15.4.4.21-3-28: SKIP
-# Bug? Array.prototype.reduce - value of 'length' is boundary value (2^32 + 1)
-# V8 timeout.
-15.4.4.21-3-29: SKIP
+# Bug? Array.prototype.reduce applied to boolean primitive
+15.4.4.21-1-3: FAIL
+# Bug? Array.prototype.reduce applied to number primitive
+15.4.4.21-1-5: FAIL
+# Bug? Array.prototype.reduce applied to string primitive
+15.4.4.21-1-7: FAIL
 # Bug? Array.prototype.reduce - side effects produced by step 2 are visible when
 #      an exception occurs
 15.4.4.21-4-8: FAIL
@@ -1289,36 +1214,12 @@
 # Bug? Array.prototype.reduce - decreasing length of array does not delete
 #      non-configurable properties
 15.4.4.21-9-b-29: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is a number (value is
-#      negative)
-15.4.4.22-3-7: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is a number (value is
-#      Infinity)
-# V8 timeout.
-15.4.4.22-3-8: SKIP
-# Bug? Array.prototype.reduceRight - value of 'length' is a string containing a
-#      negative number
-15.4.4.22-3-12: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is a string containing a
-#      decimal number
-15.4.4.22-3-13: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is a string containing
-#      +/-Infinity
-# V8 timeout.
-15.4.4.22-3-14: SKIP
-# Bug? Array.prototype.reduceRight - value of 'length' is a positive
-#      non-integer, ensure truncation occurs in the proper direction
-15.4.4.22-3-24: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is a negative
-#      non-integer, ensure truncation occurs in the proper direction
-15.4.4.22-3-25: FAIL
-# Bug? Array.prototype.reduceRight - value of 'length' is boundary value (2^32)
-# V8 timeout.
-15.4.4.22-3-28: SKIP
-# Bug? Array.prototype.reduceRight - value of 'length' is boundary value (2^32 +
-#      1)
-# V8 timeout.
-15.4.4.22-3-29: SKIP
+# Bug? Array.prototype.reduceRight applied to boolean primitive
+15.4.4.22-1-3: FAIL
+# Bug? Array.prototype.reduceRight applied to number primitive
+15.4.4.22-1-5: FAIL
+# Bug? Array.prototype.reduceRight applied to string primitive
+15.4.4.22-1-7: FAIL
 # Bug? Array.prototype.reduceRight - side effects produced by step 2 are visible
 #      when an exception occurs
 15.4.4.22-4-8: FAIL
@@ -1378,10 +1279,6 @@
 # Bug? Array.prototype.reduceRight - modifications to length will change number
 #      of iterations
 15.4.4.22-9-9: FAIL
-# Bug? String.prototype.trim throws TypeError when string is undefined
-15.5.4.20-1-1: FAIL
-# Bug? String.prototype.trim throws TypeError when string is null
-15.5.4.20-1-2: FAIL
 # Bug? String.prototype.trim - 'S' is a string with all WhiteSpace
 15.5.4.20-3-2: FAIL
 # Bug? String.prototype.trim - 'S' is a string with all union of WhiteSpace and
@@ -1404,9 +1301,6 @@
 # Bug? String.prototype.trim handles whitepace and lineterminators
 #      (\\uFEFF\\uFEFF)
 15.5.4.20-4-34: FAIL
-# Bug? Date Time String Format - specified default values will be set for all
-#      optional fields(MM, DD, mm, ss and time zone) when they are absent
-15.9.1.15-1: FAIL
 # Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
 #      Date(1970, 0, -99999999, 0, 0, 0, -1), the time zone is UTC(0)
 15.9.5.43-0-8: FAIL
diff --git a/test/test262/testcfg.py b/test/test262/testcfg.py
index aa1212e..9482046 100644
--- a/test/test262/testcfg.py
+++ b/test/test262/testcfg.py
@@ -111,7 +111,7 @@
     return tests
 
   def GetBuildRequirements(self):
-    return ['sample', 'sample=shell']
+    return ['d8']
 
   def GetTestStatus(self, sections, defs):
     status_file = join(self.root, 'test262.status')
diff --git a/tools/codemap.js b/tools/codemap.js
index 71a99cc..129179e 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -79,6 +79,7 @@
  * @param {CodeMap.CodeEntry} codeEntry Code entry object.
  */
 CodeMap.prototype.addCode = function(start, codeEntry) {
+  this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
   this.dynamics_.insert(start, codeEntry);
 };
 
@@ -92,6 +93,7 @@
  */
 CodeMap.prototype.moveCode = function(from, to) {
   var removedNode = this.dynamics_.remove(from);
+  this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
   this.dynamics_.insert(to, removedNode.value);
 };
 
@@ -146,6 +148,23 @@
 /**
  * @private
  */
+CodeMap.prototype.deleteAllCoveredNodes_ = function(tree, start, end) {
+  var to_delete = [];
+  var addr = end - 1;
+  while (addr >= start) {
+    var node = tree.findGreatestLessThan(addr);
+    if (!node) break;
+    var start2 = node.key, end2 = start2 + node.value.size;
+    if (start2 < end && start < end2) to_delete.push(start2);
+    addr = start2 - 1;
+  }
+  for (var i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
+};
+
+
+/**
+ * @private
+ */
 CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
   return addr >= node.key && addr < (node.key + node.value.size);
 };
@@ -211,6 +230,14 @@
 
 
 /**
+ * Returns an array of pairs of all dynamic code entries and their addresses.
+ */
+CodeMap.prototype.getAllDynamicEntriesWithAddresses = function() {
+  return this.dynamics_.exportKeysAndValues();
+};
+
+
+/**
  * Returns an array of all static code entries.
  */
 CodeMap.prototype.getAllStaticEntries = function() {
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
index 2c173ab..511ab2b 100755
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -216,16 +216,26 @@
   return row['total_size_before'] - row['total_size_after']
 
 def other_scope(r):
-  return r['pause'] - r['mark'] - r['sweep'] - r['compact']
+  if r['gc'] == 's':
+    # there is no 'other' scope for scavenging collections.
+    return 0
+  return r['pause'] - r['mark'] - r['sweep'] - r['compact'] - r['external']
+
+def scavenge_scope(r):
+  if r['gc'] == 's':
+    return r['pause'] - r['external']
+  return 0
 
 plots = [
   [
     Set('style fill solid 0.5 noborder'),
     Set('style histogram rowstacked'),
     Set('style data histograms'),
-    Plot(Item('Marking', 'mark', lc = 'purple'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
          Item('Sweep', 'sweep', lc = 'blue'),
          Item('Compaction', 'compact', lc = 'red'),
+         Item('External', 'external', lc = '#489D43'),
          Item('Other', other_scope, lc = 'grey'))
   ],
   [
@@ -314,6 +324,10 @@
     stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
     stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
     stats(out, 'Compact', filter(lambda r: r['compact'] != 0, trace), 'compact')
+    stats(out,
+          'External',
+          filter(lambda r: r['external'] != 0, trace),
+          'external')
     out.write('</table>')
     for chart in charts:
       out.write('<img src="%s">' % chart)
diff --git a/tools/gcmole/gccause.lua b/tools/gcmole/gccause.lua
new file mode 100644
index 0000000..a6fe542
--- /dev/null
+++ b/tools/gcmole/gccause.lua
@@ -0,0 +1,60 @@
+-- Copyright 2011 the V8 project authors. All rights reserved.
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+--     * Redistributions of source code must retain the above copyright
+--       notice, this list of conditions and the following disclaimer.
+--     * Redistributions in binary form must reproduce the above
+--       copyright notice, this list of conditions and the following
+--       disclaimer in the documentation and/or other materials provided
+--       with the distribution.
+--     * Neither the name of Google Inc. nor the names of its
+--       contributors may be used to endorse or promote products derived
+--       from this software without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- This is an auxiliary tool that reads gccauses file generated by
+-- gcmole.lua and prints tree of the calls that can potentially cause a GC
+-- inside a given function.
+--
+-- Usage: lua tools/gcmole/gccause.lua <function-name-pattern>
+--
+
+assert(loadfile "gccauses")()
+
+local P = ...
+
+local T = {}
+
+local function TrackCause(name, lvl)
+   io.write(("  "):rep(lvl or 0), name, "\n")
+   if GC[name] then
+      local causes = GC[name]
+      for i = 1, #causes do
+	 local f = causes[i]
+	 if not T[f] then
+	    T[f] = true
+	    TrackCause(f, (lvl or 0) + 1)
+	 end
+      end
+   end
+end
+
+for name, _ in pairs(GC) do
+   if name:match(P) then
+      T = {}
+      TrackCause(name)
+   end
+end
diff --git a/tools/gcmole/gcmole.cc b/tools/gcmole/gcmole.cc
index ad64c1d..71ba24a 100644
--- a/tools/gcmole/gcmole.cc
+++ b/tools/gcmole/gcmole.cc
@@ -69,6 +69,47 @@
 }
 
 
+struct Resolver {
+  explicit Resolver(clang::ASTContext& ctx)
+      : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
+  }
+
+  Resolver(clang::ASTContext& ctx, clang::DeclContext* decl_ctx)
+      : ctx_(ctx), decl_ctx_(decl_ctx) {
+  }
+
+  clang::DeclarationName ResolveName(const char* n) {
+    clang::IdentifierInfo* ident = &ctx_.Idents.get(n);
+    return ctx_.DeclarationNames.getIdentifier(ident);
+  }
+
+  Resolver ResolveNamespace(const char* n) {
+    return Resolver(ctx_, Resolve<clang::NamespaceDecl>(n));
+  }
+
+  template<typename T>
+  T* Resolve(const char* n) {
+    if (decl_ctx_ == NULL) return NULL;
+
+    clang::DeclContext::lookup_result result =
+        decl_ctx_->lookup(ResolveName(n));
+
+    clang::DeclContext::lookup_iterator end = result.second;
+    for (clang::DeclContext::lookup_iterator i = result.first;
+         i != end;
+         i++) {
+      if (isa<T>(*i)) return cast<T>(*i);
+    }
+
+    return NULL;
+  }
+
+ private:
+  clang::ASTContext& ctx_;
+  clang::DeclContext* decl_ctx_;
+};
+
+
 class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
  public:
   explicit CalleesPrinter(clang::MangleContext* ctx) : ctx_(ctx) {
@@ -140,12 +181,14 @@
   Callgraph callgraph_;
 };
 
+
 class FunctionDeclarationFinder
     : public clang::ASTConsumer,
       public clang::RecursiveASTVisitor<FunctionDeclarationFinder> {
  public:
   explicit FunctionDeclarationFinder(clang::Diagnostic& d,
-                                     clang::SourceManager& sm)
+                                     clang::SourceManager& sm,
+                                     const std::vector<std::string>& args)
       : d_(d), sm_(sm) { }
 
   virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
@@ -202,100 +245,807 @@
 }
 
 
-static bool IsHandleType(const clang::DeclarationName& handleDeclName,
-                         const clang::QualType& qtype) {
-  const clang::Type* canonical_type =
-      qtype.getTypePtr()->getCanonicalTypeUnqualified().getTypePtr();
+static const int kNoEffect = 0;
+static const int kCausesGC = 1;
+static const int kRawDef = 2;
+static const int kRawUse = 4;
+static const int kAllEffects = kCausesGC | kRawDef | kRawUse;
 
-  if (const clang::TemplateSpecializationType* type =
-          canonical_type->getAs<clang::TemplateSpecializationType>()) {
-    if (clang::TemplateDecl* decl =
-            type->getTemplateName().getAsTemplateDecl()) {
-      if (decl->getTemplatedDecl()->getDeclName() == handleDeclName) {
-        return true;
+class Environment;
+
+class ExprEffect {
+ public:
+  bool hasGC() { return (effect_ & kCausesGC) != 0; }
+  void setGC() { effect_ |= kCausesGC; }
+
+  bool hasRawDef() { return (effect_ & kRawDef) != 0; }
+  void setRawDef() { effect_ |= kRawDef; }
+
+  bool hasRawUse() { return (effect_ & kRawUse) != 0; }
+  void setRawUse() { effect_ |= kRawUse; }
+
+  static ExprEffect None() { return ExprEffect(kNoEffect, NULL); }
+  static ExprEffect NoneWithEnv(Environment* env) {
+    return ExprEffect(kNoEffect, env);
+  }
+  static ExprEffect RawUse() { return ExprEffect(kRawUse, NULL); }
+
+  static ExprEffect Merge(ExprEffect a, ExprEffect b);
+  static ExprEffect MergeSeq(ExprEffect a, ExprEffect b);
+  ExprEffect Define(const std::string& name);
+
+  Environment* env() {
+    return reinterpret_cast<Environment*>(effect_ & ~kAllEffects);
+  }
+
+ private:
+  ExprEffect(int effect, Environment* env)
+      : effect_((effect & kAllEffects) |
+                reinterpret_cast<intptr_t>(env)) { }
+
+  intptr_t effect_;
+};
+
+
+const std::string BAD_EXPR_MSG("Possible problem with evaluation order.");
+const std::string DEAD_VAR_MSG("Possibly dead variable.");
+
+
+class Environment {
+ public:
+  Environment() { }
+
+  static Environment Unreachable() {
+    Environment env;
+    env.live_.set();
+    return env;
+  }
+
+  static Environment Merge(const Environment& l,
+                           const Environment& r) {
+    return Environment(l, r);
+  }
+
+  Environment ApplyEffect(ExprEffect effect) const {
+    Environment out = effect.hasGC() ? Environment() : Environment(*this);
+    if (effect.env() != NULL) out.live_ |= effect.env()->live_;
+    return out;
+  }
+
+  typedef std::map<std::string, int> SymbolTable;
+
+  bool IsAlive(const std::string& name) const {
+    SymbolTable::iterator code = symbol_table_.find(name);
+    if (code == symbol_table_.end()) return false;
+    return live_[code->second];
+  }
+
+  bool Equal(const Environment& env) {
+    return live_ == env.live_;
+  }
+
+  Environment Define(const std::string& name) const {
+    return Environment(*this, SymbolToCode(name));
+  }
+
+  void MDefine(const std::string& name) {
+    live_.set(SymbolToCode(name));
+  }
+
+  static int SymbolToCode(const std::string& name) {
+    SymbolTable::iterator code = symbol_table_.find(name);
+
+    if (code == symbol_table_.end()) {
+      int new_code = symbol_table_.size();
+      symbol_table_.insert(std::make_pair(name, new_code));
+      return new_code;
+    }
+
+    return code->second;
+  }
+
+  static void ClearSymbolTable() {
+    std::vector<Environment*>::iterator end = envs_.end();
+    for (std::vector<Environment*>::iterator i = envs_.begin();
+         i != end;
+         ++i) {
+      delete *i;
+    }
+    envs_.clear();
+    symbol_table_.clear();
+  }
+
+  void Print() const {
+    bool comma = false;
+    std::cout << "{";
+    SymbolTable::iterator end = symbol_table_.end();
+    for (SymbolTable::iterator i = symbol_table_.begin();
+         i != end;
+         ++i) {
+      if (live_[i->second]) {
+        if (comma) std::cout << ", ";
+        std::cout << i->first;
+        comma = true;
       }
     }
-  } else if (const clang::RecordType* type =
-                 canonical_type->getAs<clang::RecordType>()) {
-    if (const clang::ClassTemplateSpecializationDecl* t =
-        dyn_cast<clang::ClassTemplateSpecializationDecl>(type->getDecl())) {
-      if (t->getSpecializedTemplate()->getDeclName() == handleDeclName) {
-        return true;
-      }
+    std::cout << "}";
+  }
+
+  static Environment* Allocate(const Environment& env) {
+    Environment* allocated_env = new Environment(env);
+    envs_.push_back(allocated_env);
+    return allocated_env;
+  }
+
+ private:
+  Environment(const Environment& l, const Environment& r)
+      : live_(l.live_ & r.live_) {
+  }
+
+  Environment(const Environment& l, int code)
+      : live_(l.live_) {
+    live_.set(code);
+  }
+
+  static SymbolTable symbol_table_;
+  static std::vector<Environment* > envs_;
+
+  static const int kMaxNumberOfLocals = 256;
+  std::bitset<kMaxNumberOfLocals> live_;
+
+  friend class ExprEffect;
+  friend class CallProps;
+};
+
+
+class CallProps {
+ public:
+  CallProps() : env_(NULL) { }
+
+  void SetEffect(int arg, ExprEffect in) {
+    if (in.hasGC()) gc_.set(arg);
+    if (in.hasRawDef()) raw_def_.set(arg);
+    if (in.hasRawUse()) raw_use_.set(arg);
+    if (in.env() != NULL) {
+      if (env_ == NULL) env_ = in.env();
+      env_->live_ |= in.env()->live_;
     }
   }
 
-  return false;
+  ExprEffect ComputeCumulativeEffect(bool result_is_raw) {
+    ExprEffect out = ExprEffect::NoneWithEnv(env_);
+    if (gc_.any()) out.setGC();
+    if (raw_use_.any()) out.setRawUse();
+    if (result_is_raw) out.setRawDef();
+    return out;
+  }
+
+  bool IsSafe() {
+    if (!gc_.any()) return true;
+    std::bitset<kMaxNumberOfArguments> raw = (raw_def_ | raw_use_);
+    if (!raw.any()) return true;
+    return gc_.count() == 1 && !((raw ^ gc_).any());
+  }
+
+ private:
+  static const int kMaxNumberOfArguments = 64;
+  std::bitset<kMaxNumberOfArguments> raw_def_;
+  std::bitset<kMaxNumberOfArguments> raw_use_;
+  std::bitset<kMaxNumberOfArguments> gc_;
+  Environment* env_;
+};
+
+
+Environment::SymbolTable Environment::symbol_table_;
+std::vector<Environment* > Environment::envs_;
+
+
+ExprEffect ExprEffect::Merge(ExprEffect a, ExprEffect b) {
+  Environment* a_env = a.env();
+  Environment* b_env = b.env();
+  Environment* out = NULL;
+  if (a_env != NULL && b_env != NULL) {
+    out = Environment::Allocate(*a_env);
+    out->live_ &= b_env->live_;
+  }
+  return ExprEffect(a.effect_ | b.effect_, out);
 }
 
 
-class ExpressionClassifier :
-    public clang::RecursiveASTVisitor<ExpressionClassifier> {
+ExprEffect ExprEffect::MergeSeq(ExprEffect a, ExprEffect b) {
+  Environment* a_env = b.hasGC() ? NULL : a.env();
+  Environment* b_env = b.env();
+  Environment* out = (b_env == NULL) ? a_env : b_env;
+  if (a_env != NULL && b_env != NULL) {
+    out = Environment::Allocate(*b_env);
+    out->live_ |= a_env->live_;
+  }
+  return ExprEffect(a.effect_ | b.effect_, out);
+}
+
+
+ExprEffect ExprEffect::Define(const std::string& name) {
+  Environment* e = env();
+  if (e == NULL) {
+    e = Environment::Allocate(Environment());
+  }
+  e->MDefine(name);
+  return ExprEffect(effect_, e);
+}
+
+
+static std::string THIS ("this");
+
+
+class FunctionAnalyzer {
  public:
-  ExpressionClassifier(clang::DeclarationName handleDeclName,
-                       clang::MangleContext* ctx,
-                       clang::CXXRecordDecl* objectDecl)
-      : handleDeclName_(handleDeclName),
-        ctx_(ctx),
-        objectDecl_(objectDecl) {
+  FunctionAnalyzer(clang::MangleContext* ctx,
+                   clang::DeclarationName handle_decl_name,
+                   clang::CXXRecordDecl* object_decl,
+                   clang::CXXRecordDecl* smi_decl,
+                   clang::Diagnostic& d,
+                   clang::SourceManager& sm,
+                   bool dead_vars_analysis)
+      : ctx_(ctx),
+        handle_decl_name_(handle_decl_name),
+        object_decl_(object_decl),
+        smi_decl_(smi_decl),
+        d_(d),
+        sm_(sm),
+        block_(NULL),
+        dead_vars_analysis_(dead_vars_analysis) {
   }
 
-  bool IsBadExpression(clang::Expr* expr) {
-    has_derefs_ = has_gc_ = false;
-    TraverseStmt(expr);
-    return has_derefs_ && has_gc_;
+
+  // --------------------------------------------------------------------------
+  // Expressions
+  // --------------------------------------------------------------------------
+
+  ExprEffect VisitExpr(clang::Expr* expr, const Environment& env) {
+#define VISIT(type) do {                                                \
+      clang::type* concrete_expr = dyn_cast_or_null<clang::type>(expr); \
+      if (concrete_expr != NULL) {                                      \
+        return Visit##type (concrete_expr, env);                        \
+      }                                                                 \
+    } while(0);
+
+    VISIT(AbstractConditionalOperator);
+    VISIT(AddrLabelExpr);
+    VISIT(ArraySubscriptExpr);
+    VISIT(BinaryOperator);
+    VISIT(BinaryTypeTraitExpr);
+    VISIT(BlockDeclRefExpr);
+    VISIT(BlockExpr);
+    VISIT(CallExpr);
+    VISIT(CastExpr);
+    VISIT(CharacterLiteral);
+    VISIT(ChooseExpr);
+    VISIT(CompoundLiteralExpr);
+    VISIT(CXXBindTemporaryExpr);
+    VISIT(CXXBoolLiteralExpr);
+    VISIT(CXXConstructExpr);
+    VISIT(CXXDefaultArgExpr);
+    VISIT(CXXDeleteExpr);
+    VISIT(CXXDependentScopeMemberExpr);
+    VISIT(CXXNewExpr);
+    VISIT(CXXNoexceptExpr);
+    VISIT(CXXNullPtrLiteralExpr);
+    VISIT(CXXPseudoDestructorExpr);
+    VISIT(CXXScalarValueInitExpr);
+    VISIT(CXXThisExpr);
+    VISIT(CXXThrowExpr);
+    VISIT(CXXTypeidExpr);
+    VISIT(CXXUnresolvedConstructExpr);
+    VISIT(CXXUuidofExpr);
+    VISIT(DeclRefExpr);
+    VISIT(DependentScopeDeclRefExpr);
+    VISIT(DesignatedInitExpr);
+    VISIT(ExprWithCleanups);
+    VISIT(ExtVectorElementExpr);
+    VISIT(FloatingLiteral);
+    VISIT(GNUNullExpr);
+    VISIT(ImaginaryLiteral);
+    VISIT(ImplicitValueInitExpr);
+    VISIT(InitListExpr);
+    VISIT(IntegerLiteral);
+    VISIT(MemberExpr);
+    VISIT(OffsetOfExpr);
+    VISIT(OpaqueValueExpr);
+    VISIT(OverloadExpr);
+    VISIT(PackExpansionExpr);
+    VISIT(ParenExpr);
+    VISIT(ParenListExpr);
+    VISIT(PredefinedExpr);
+    VISIT(ShuffleVectorExpr);
+    VISIT(SizeOfPackExpr);
+    VISIT(StmtExpr);
+    VISIT(StringLiteral);
+    VISIT(SubstNonTypeTemplateParmPackExpr);
+    VISIT(UnaryExprOrTypeTraitExpr);
+    VISIT(UnaryOperator);
+    VISIT(UnaryTypeTraitExpr);
+    VISIT(VAArgExpr);
+#undef VISIT
+
+    return ExprEffect::None();
   }
 
-  bool IsBadCallSite(clang::Expr* expr) {
-    if (isa<clang::CallExpr>(expr)) {
-      clang::CallExpr* call = cast<clang::CallExpr>(expr);
+#define DECL_VISIT_EXPR(type)                                           \
+  ExprEffect Visit##type (clang::type* expr, const Environment& env)
 
-      MarkGCSuspectAsArgument(call);
-      MarkHandleDereferenceAsArgument(call);
+#define IGNORE_EXPR(type)                                               \
+  ExprEffect Visit##type (clang::type* expr, const Environment& env) {  \
+    return ExprEffect::None();                                          \
+  }
 
-      return derefs_.any() &&
-          ((gc_.count() > 1) || (gc_.any() && (gc_ ^ derefs_).any()));
+  IGNORE_EXPR(AddrLabelExpr);
+  IGNORE_EXPR(BinaryTypeTraitExpr);
+  IGNORE_EXPR(BlockExpr);
+  IGNORE_EXPR(CharacterLiteral);
+  IGNORE_EXPR(ChooseExpr);
+  IGNORE_EXPR(CompoundLiteralExpr);
+  IGNORE_EXPR(CXXBoolLiteralExpr);
+  IGNORE_EXPR(CXXDependentScopeMemberExpr);
+  IGNORE_EXPR(CXXNullPtrLiteralExpr);
+  IGNORE_EXPR(CXXPseudoDestructorExpr);
+  IGNORE_EXPR(CXXScalarValueInitExpr);
+  IGNORE_EXPR(CXXNoexceptExpr);
+  IGNORE_EXPR(CXXTypeidExpr);
+  IGNORE_EXPR(CXXUnresolvedConstructExpr);
+  IGNORE_EXPR(CXXUuidofExpr);
+  IGNORE_EXPR(DependentScopeDeclRefExpr);
+  IGNORE_EXPR(DesignatedInitExpr);
+  IGNORE_EXPR(ExtVectorElementExpr);
+  IGNORE_EXPR(FloatingLiteral);
+  IGNORE_EXPR(ImaginaryLiteral);
+  IGNORE_EXPR(IntegerLiteral);
+  IGNORE_EXPR(OffsetOfExpr);
+  IGNORE_EXPR(ImplicitValueInitExpr);
+  IGNORE_EXPR(PackExpansionExpr);
+  IGNORE_EXPR(PredefinedExpr);
+  IGNORE_EXPR(ShuffleVectorExpr);
+  IGNORE_EXPR(SizeOfPackExpr);
+  IGNORE_EXPR(StmtExpr);
+  IGNORE_EXPR(StringLiteral);
+  IGNORE_EXPR(SubstNonTypeTemplateParmPackExpr);
+  IGNORE_EXPR(UnaryExprOrTypeTraitExpr);
+  IGNORE_EXPR(UnaryTypeTraitExpr);
+  IGNORE_EXPR(VAArgExpr);
+  IGNORE_EXPR(GNUNullExpr);
+  IGNORE_EXPR(OverloadExpr);
+
+  DECL_VISIT_EXPR(CXXThisExpr) {
+    return Use(expr, expr->getType(), THIS, env);
+  }
+
+  DECL_VISIT_EXPR(AbstractConditionalOperator) {
+    Environment after_cond = env.ApplyEffect(VisitExpr(expr->getCond(), env));
+    return ExprEffect::Merge(VisitExpr(expr->getTrueExpr(), after_cond),
+                             VisitExpr(expr->getFalseExpr(), after_cond));
+  }
+
+  DECL_VISIT_EXPR(ArraySubscriptExpr) {
+    clang::Expr* exprs[2] = {expr->getBase(), expr->getIdx()};
+    return Par(expr, 2, exprs, env);
+  }
+
+  bool IsRawPointerVar(clang::Expr* expr, std::string* var_name) {
+    if (isa<clang::BlockDeclRefExpr>(expr)) {
+      *var_name = cast<clang::BlockDeclRefExpr>(expr)->getDecl()->
+          getNameAsString();
+      return true;
+    } else if (isa<clang::DeclRefExpr>(expr)) {
+      *var_name = cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
+      return true;
     }
     return false;
   }
 
-  virtual bool VisitExpr(clang::Expr* expr) {
-    has_derefs_ = has_derefs_ || IsRawPointerType(expr);
-    return !has_gc_ || !has_derefs_;
+  DECL_VISIT_EXPR(BinaryOperator) {
+    clang::Expr* lhs = expr->getLHS();
+    clang::Expr* rhs = expr->getRHS();
+    clang::Expr* exprs[2] = {lhs, rhs};
+
+    switch (expr->getOpcode()) {
+      case clang::BO_Comma:
+        return Seq(expr, 2, exprs, env);
+
+      case clang::BO_LAnd:
+      case clang::BO_LOr:
+        return ExprEffect::Merge(VisitExpr(lhs, env), VisitExpr(rhs, env));
+
+      case clang::BO_Assign: {
+        std::string var_name;
+        if (IsRawPointerVar(lhs, &var_name)) {
+          return VisitExpr(rhs, env).Define(var_name);
+        }
+        return Par(expr, 2, exprs, env);
+      }
+
+      default:
+        return Par(expr, 2, exprs, env);
+    }
   }
 
-  virtual bool VisitCallExpr(clang::CallExpr* expr) {
-    has_gc_ = has_gc_ || CanCauseGC(expr);
-    return !has_gc_ || !has_derefs_;
+  DECL_VISIT_EXPR(CXXBindTemporaryExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
   }
- private:
-  void MarkHandleDereferenceAsArgument(clang::CallExpr* call) {
-    derefs_.reset();
 
-    if (clang::CXXMemberCallExpr* memcall =
-            dyn_cast<clang::CXXMemberCallExpr>(call)) {
-      if (ManipulatesRawPointers(memcall->getImplicitObjectArgument())) {
-        derefs_.set(0);
+  DECL_VISIT_EXPR(CXXConstructExpr) {
+    return VisitArguments<>(expr, env);
+  }
+
+  DECL_VISIT_EXPR(CXXDefaultArgExpr) {
+    return VisitExpr(expr->getExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXDeleteExpr) {
+    return VisitExpr(expr->getArgument(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXNewExpr) {
+    return Par(expr,
+               expr->getNumConstructorArgs(),
+               expr->getConstructorArgs(),
+               env);
+  }
+
+  DECL_VISIT_EXPR(ExprWithCleanups) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXThrowExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(InitListExpr) {
+    return Seq(expr, expr->getNumInits(), expr->getInits(), env);
+  }
+
+  DECL_VISIT_EXPR(MemberExpr) {
+    return VisitExpr(expr->getBase(), env);
+  }
+
+  DECL_VISIT_EXPR(OpaqueValueExpr) {
+    return VisitExpr(expr->getSourceExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ParenExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ParenListExpr) {
+    return Par(expr, expr->getNumExprs(), expr->getExprs(), env);
+  }
+
+  DECL_VISIT_EXPR(UnaryOperator) {
+    // TODO We are treating all expressions that look like &raw_pointer_var
+    //      as definitions of raw_pointer_var. This should be changed to
+    //      recognize less generic pattern:
+    //
+    //         if (maybe_object->ToObject(&obj)) return maybe_object;
+    //
+    if (expr->getOpcode() == clang::UO_AddrOf) {
+      std::string var_name;
+      if (IsRawPointerVar(expr->getSubExpr(), &var_name)) {
+        return ExprEffect::None().Define(var_name);
       }
     }
+    return VisitExpr(expr->getSubExpr(), env);
+  }
 
+  DECL_VISIT_EXPR(CastExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(DeclRefExpr) {
+    return Use(expr, expr->getDecl(), env);
+  }
+
+  DECL_VISIT_EXPR(BlockDeclRefExpr) {
+    return Use(expr, expr->getDecl(), env);
+  }
+
+  ExprEffect Par(clang::Expr* parent,
+                 int n,
+                 clang::Expr** exprs,
+                 const Environment& env) {
+    CallProps props;
+
+    for (int i = 0; i < n; ++i) {
+      props.SetEffect(i, VisitExpr(exprs[i], env));
+    }
+
+    if (!props.IsSafe()) ReportUnsafe(parent, BAD_EXPR_MSG);
+
+    return props.ComputeCumulativeEffect(IsRawPointerType(parent->getType()));
+  }
+
+  ExprEffect Seq(clang::Stmt* parent,
+                 int n,
+                 clang::Expr** exprs,
+                 const Environment& env) {
+    ExprEffect out = ExprEffect::None();
+    Environment out_env = env;
+    for (int i = 0; i < n; ++i) {
+      out = ExprEffect::MergeSeq(out, VisitExpr(exprs[i], out_env));
+      out_env = out_env.ApplyEffect(out);
+    }
+    return out;
+  }
+
+  ExprEffect Use(const clang::Expr* parent,
+                 const clang::QualType& var_type,
+                 const std::string& var_name,
+                 const Environment& env) {
+    if (IsRawPointerType(var_type)) {
+      if (!env.IsAlive(var_name) && dead_vars_analysis_) {
+        ReportUnsafe(parent, DEAD_VAR_MSG);
+      }
+      return ExprEffect::RawUse();
+    }
+    return ExprEffect::None();
+  }
+
+  ExprEffect Use(const clang::Expr* parent,
+                 const clang::ValueDecl* var,
+                 const Environment& env) {
+    return Use(parent, var->getType(), var->getNameAsString(), env);
+  }
+
+
+  template<typename ExprType>
+  ExprEffect VisitArguments(ExprType* call, const Environment& env) {
+    CallProps props;
+    VisitArguments<>(call, &props, env);
+    if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+    return props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+  }
+
+  template<typename ExprType>
+  void VisitArguments(ExprType* call,
+                      CallProps* props,
+                      const Environment& env) {
     for (unsigned arg = 0; arg < call->getNumArgs(); arg++) {
-      if (ManipulatesRawPointers(call->getArg(arg))) derefs_.set(arg + 1);
+      props->SetEffect(arg + 1, VisitExpr(call->getArg(arg), env));
     }
   }
 
-  void MarkGCSuspectAsArgument(clang::CallExpr* call) {
-    gc_.reset();
+
+  ExprEffect VisitCallExpr(clang::CallExpr* call,
+                           const Environment& env) {
+    CallProps props;
 
     clang::CXXMemberCallExpr* memcall =
         dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
-    if (memcall != NULL && CanCauseGC(memcall->getImplicitObjectArgument())) {
-      gc_.set(0);
+    if (memcall != NULL) {
+      clang::Expr* receiver = memcall->getImplicitObjectArgument();
+      props.SetEffect(0, VisitExpr(receiver, env));
     }
 
-    for (unsigned arg = 0; arg < call->getNumArgs(); arg++) {
-      if (CanCauseGC(call->getArg(arg))) gc_.set(arg + 1);
+    VisitArguments<>(call, &props, env);
+
+    if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+
+    ExprEffect out =
+        props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+
+    clang::FunctionDecl* callee = call->getDirectCallee();
+    if ((callee != NULL) && KnownToCauseGC(ctx_, callee)) {
+      out.setGC();
     }
+
+    return out;
+  }
+
+  // --------------------------------------------------------------------------
+  // Statements
+  // --------------------------------------------------------------------------
+
+  Environment VisitStmt(clang::Stmt* stmt, const Environment& env) {
+#define VISIT(type) do {                                                \
+      clang::type* concrete_stmt = dyn_cast_or_null<clang::type>(stmt); \
+      if (concrete_stmt != NULL) {                                      \
+        return Visit##type (concrete_stmt, env);                        \
+      }                                                                 \
+    } while(0);
+
+    if (clang::Expr* expr = dyn_cast_or_null<clang::Expr>(stmt)) {
+      return env.ApplyEffect(VisitExpr(expr, env));
+    }
+
+    VISIT(AsmStmt);
+    VISIT(BreakStmt);
+    VISIT(CompoundStmt);
+    VISIT(ContinueStmt);
+    VISIT(CXXCatchStmt);
+    VISIT(CXXTryStmt);
+    VISIT(DeclStmt);
+    VISIT(DoStmt);
+    VISIT(ForStmt);
+    VISIT(GotoStmt);
+    VISIT(IfStmt);
+    VISIT(IndirectGotoStmt);
+    VISIT(LabelStmt);
+    VISIT(NullStmt);
+    VISIT(ReturnStmt);
+    VISIT(CaseStmt);
+    VISIT(DefaultStmt);
+    VISIT(SwitchStmt);
+    VISIT(WhileStmt);
+#undef VISIT
+
+    return env;
+  }
+
+#define DECL_VISIT_STMT(type)                                           \
+  Environment Visit##type (clang::type* stmt, const Environment& env)
+
+#define IGNORE_STMT(type)                                               \
+  Environment Visit##type (clang::type* stmt, const Environment& env) { \
+    return env;                                                         \
+  }
+
+  IGNORE_STMT(IndirectGotoStmt);
+  IGNORE_STMT(NullStmt);
+  IGNORE_STMT(AsmStmt);
+
+  // We are ignoring control flow for simplicity.
+  IGNORE_STMT(GotoStmt);
+  IGNORE_STMT(LabelStmt);
+
+  // We are ignoring try/catch because V8 does not use them.
+  IGNORE_STMT(CXXCatchStmt);
+  IGNORE_STMT(CXXTryStmt);
+
+  class Block {
+   public:
+    Block(const Environment& in,
+          FunctionAnalyzer* owner)
+        : in_(in),
+          out_(Environment::Unreachable()),
+          changed_(false),
+          owner_(owner) {
+      parent_ = owner_->EnterBlock(this);
+    }
+
+    ~Block() {
+      owner_->LeaveBlock(parent_);
+    }
+
+    void MergeIn(const Environment& env) {
+      Environment old_in = in_;
+      in_ = Environment::Merge(in_, env);
+      changed_ = !old_in.Equal(in_);
+    }
+
+    bool changed() {
+      if (changed_) {
+        changed_ = false;
+        return true;
+      }
+      return false;
+    }
+
+    const Environment& in() {
+      return in_;
+    }
+
+    const Environment& out() {
+      return out_;
+    }
+
+    void MergeOut(const Environment& env) {
+      out_ = Environment::Merge(out_, env);
+    }
+
+    void Seq(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+      Environment a_out = owner_->VisitStmt(a, in());
+      Environment b_out = owner_->VisitStmt(b, a_out);
+      Environment c_out = owner_->VisitStmt(c, b_out);
+      MergeOut(c_out);
+    }
+
+    void Seq(clang::Stmt* a, clang::Stmt* b) {
+      Environment a_out = owner_->VisitStmt(a, in());
+      Environment b_out = owner_->VisitStmt(b, a_out);
+      MergeOut(b_out);
+    }
+
+    void Loop(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+      Seq(a, b, c);
+      MergeIn(out());
+    }
+
+    void Loop(clang::Stmt* a, clang::Stmt* b) {
+      Seq(a, b);
+      MergeIn(out());
+    }
+
+
+   private:
+    Environment in_;
+    Environment out_;
+    bool changed_;
+    FunctionAnalyzer* owner_;
+    Block* parent_;
+  };
+
+
+  DECL_VISIT_STMT(BreakStmt) {
+    block_->MergeOut(env);
+    return Environment::Unreachable();
+  }
+
+  DECL_VISIT_STMT(ContinueStmt) {
+    block_->MergeIn(env);
+    return Environment::Unreachable();
+  }
+
+  DECL_VISIT_STMT(CompoundStmt) {
+    Environment out = env;
+    clang::CompoundStmt::body_iterator end = stmt->body_end();
+    for (clang::CompoundStmt::body_iterator s = stmt->body_begin();
+         s != end;
+         ++s) {
+      out = VisitStmt(*s, out);
+    }
+    return out;
+  }
+
+  DECL_VISIT_STMT(WhileStmt) {
+    Block block (env, this);
+    do {
+      block.Loop(stmt->getCond(), stmt->getBody());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(DoStmt) {
+    Block block (env, this);
+    do {
+      block.Loop(stmt->getBody(), stmt->getCond());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(ForStmt) {
+    Block block (VisitStmt(stmt->getInit(), env), this);
+    do {
+      block.Loop(stmt->getCond(),
+                 stmt->getBody(),
+                 stmt->getInc());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(IfStmt) {
+    Environment cond_out = VisitStmt(stmt->getCond(), env);
+    Environment then_out = VisitStmt(stmt->getThen(), cond_out);
+    Environment else_out = VisitStmt(stmt->getElse(), cond_out);
+    return Environment::Merge(then_out, else_out);
+  }
+
+  DECL_VISIT_STMT(SwitchStmt) {
+    Block block (env, this);
+    block.Seq(stmt->getCond(), stmt->getBody());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(CaseStmt) {
+    Environment in = Environment::Merge(env, block_->in());
+    Environment after_lhs = VisitStmt(stmt->getLHS(), in);
+    return VisitStmt(stmt->getSubStmt(), after_lhs);
+  }
+
+  DECL_VISIT_STMT(DefaultStmt) {
+    Environment in = Environment::Merge(env, block_->in());
+    return VisitStmt(stmt->getSubStmt(), in);
+  }
+
+  DECL_VISIT_STMT(ReturnStmt) {
+    VisitExpr(stmt->getRetValue(), env);
+    return Environment::Unreachable();
   }
 
   const clang::TagType* ToTagType(const clang::Type* t) {
@@ -311,11 +1061,14 @@
     }
   }
 
-  bool IsRawPointerType(clang::Expr* expr) {
-    clang::QualType result = expr->getType();
+  bool IsDerivedFrom(clang::CXXRecordDecl* record,
+                     clang::CXXRecordDecl* base) {
+    return (record == base) || record->isDerivedFrom(base);
+  }
 
+  bool IsRawPointerType(clang::QualType qtype) {
     const clang::PointerType* type =
-        dyn_cast_or_null<clang::PointerType>(expr->getType().getTypePtr());
+        dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
     if (type == NULL) return false;
 
     const clang::TagType* pointee =
@@ -326,146 +1079,154 @@
         dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl());
     if (record == NULL) return false;
 
-    return InV8Namespace(record) &&
-        record->hasDefinition() &&
-        ((record == objectDecl_) || record->isDerivedFrom(objectDecl_));
+    if (!InV8Namespace(record)) return false;
+
+    if (!record->hasDefinition()) return false;
+
+    record = record->getDefinition();
+
+    return IsDerivedFrom(record, object_decl_) &&
+        !IsDerivedFrom(record, smi_decl_);
   }
 
-  bool IsHandleDereference(clang::Expr* expr) {
-    if (expr == NULL) {
-      return false;
-    } else if (isa<clang::UnaryOperator>(expr)) {
-      clang::UnaryOperator* unop = cast<clang::UnaryOperator>(expr);
-      return unop->getOpcode() == clang::UO_Deref &&
-          IsHandleType(handleDeclName_, unop->getSubExpr()->getType());
-    } else if (isa<clang::CXXOperatorCallExpr>(expr)) {
-      clang::CXXOperatorCallExpr* op = cast<clang::CXXOperatorCallExpr>(expr);
-      return (op->getOperator() == clang::OO_Star ||
-              op->getOperator() == clang::OO_Arrow) &&
-          IsHandleType(handleDeclName_, op->getArg(0)->getType());
-    } else {
-      return false;
-    }
-  }
+  Environment VisitDecl(clang::Decl* decl, const Environment& env) {
+    if (clang::VarDecl* var = dyn_cast<clang::VarDecl>(decl)) {
+      Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env;
 
-  bool CanCauseGC(clang::Expr* expr) {
-    if (expr == NULL) return false;
-
-    has_gc_ = false;
-    has_derefs_ = true;
-    TraverseStmt(expr);
-    return has_gc_;
-  }
-
-  bool ManipulatesRawPointers(clang::Expr* expr) {
-    if (expr == NULL) return false;
-
-    has_gc_ = true;
-    has_derefs_ = false;
-    TraverseStmt(expr);
-    return has_derefs_;
-  }
-
-  bool CanCauseGC(const clang::CallExpr* call) {
-    const clang::FunctionDecl* fn = call->getDirectCallee();
-    return (fn != NULL) && KnownToCauseGC(ctx_, fn);
-  }
-
-  // For generic expression classification.
-  bool has_derefs_;
-  bool has_gc_;
-
-  // For callsite classification.
-  static const int kMaxNumberOfArguments = 64;
-  std::bitset<kMaxNumberOfArguments> derefs_;
-  std::bitset<kMaxNumberOfArguments> gc_;
-
-  clang::DeclarationName handleDeclName_;
-  clang::MangleContext* ctx_;
-  clang::CXXRecordDecl* objectDecl_;
-};
-
-const std::string BAD_EXPRESSION_MSG("Possible problem with evaluation order.");
-
-class ExpressionsFinder : public clang::ASTConsumer,
-                          public clang::RecursiveASTVisitor<ExpressionsFinder> {
- public:
-  explicit ExpressionsFinder(clang::Diagnostic& d, clang::SourceManager& sm)
-      : d_(d), sm_(sm) { }
-
-  struct Resolver {
-    explicit Resolver(clang::ASTContext& ctx)
-        : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
-    }
-
-    Resolver(clang::ASTContext& ctx, clang::DeclContext* decl_ctx)
-        : ctx_(ctx), decl_ctx_(decl_ctx) {
-    }
-
-    clang::DeclarationName ResolveName(const char* n) {
-      clang::IdentifierInfo* ident = &ctx_.Idents.get(n);
-      return ctx_.DeclarationNames.getIdentifier(ident);
-    }
-
-    Resolver ResolveNamespace(const char* n) {
-      return Resolver(ctx_, Resolve<clang::NamespaceDecl>(n));
-    }
-
-    template<typename T>
-    T* Resolve(const char* n) {
-      if (decl_ctx_ == NULL) return NULL;
-
-      clang::DeclContext::lookup_result result =
-          decl_ctx_->lookup(ResolveName(n));
-
-      for (clang::DeclContext::lookup_iterator i = result.first,
-               e = result.second;
-           i != e;
-           i++) {
-        if (isa<T>(*i)) return cast<T>(*i);
+      if (IsRawPointerType(var->getType())) {
+        out = out.Define(var->getNameAsString());
       }
 
-      return NULL;
+      return out;
     }
+    // TODO: handle other declarations?
+    return env;
+  }
 
-   private:
-    clang::ASTContext& ctx_;
-    clang::DeclContext* decl_ctx_;
-  };
+  DECL_VISIT_STMT(DeclStmt) {
+    Environment out = env;
+    clang::DeclStmt::decl_iterator end = stmt->decl_end();
+    for (clang::DeclStmt::decl_iterator decl = stmt->decl_begin();
+         decl != end;
+         ++decl) {
+      out = VisitDecl(*decl, out);
+    }
+    return out;
+  }
+
+
+  void DefineParameters(const clang::FunctionDecl* f,
+                        Environment* env) {
+    env->MDefine(THIS);
+    clang::FunctionDecl::param_const_iterator end = f->param_end();
+    for (clang::FunctionDecl::param_const_iterator p = f->param_begin();
+         p != end;
+         ++p) {
+      env->MDefine((*p)->getNameAsString());
+    }
+  }
+
+
+  void AnalyzeFunction(const clang::FunctionDecl* f) {
+    const clang::FunctionDecl* body = NULL;
+    if (f->hasBody(body)) {
+      Environment env;
+      DefineParameters(body, &env);
+      VisitStmt(body->getBody(), env);
+      Environment::ClearSymbolTable();
+    }
+  }
+
+  Block* EnterBlock(Block* block) {
+    Block* parent = block_;
+    block_ = block;
+    return parent;
+  }
+
+  void LeaveBlock(Block* block) {
+    block_ = block;
+  }
+
+ private:
+  void ReportUnsafe(const clang::Expr* expr, const std::string& msg) {
+    d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
+              d_.getCustomDiagID(clang::Diagnostic::Warning, msg));
+  }
+
+
+  clang::MangleContext* ctx_;
+  clang::DeclarationName handle_decl_name_;
+  clang::CXXRecordDecl* object_decl_;
+  clang::CXXRecordDecl* smi_decl_;
+
+  clang::Diagnostic& d_;
+  clang::SourceManager& sm_;
+
+  Block* block_;
+  bool dead_vars_analysis_;
+};
+
+
+class ProblemsFinder : public clang::ASTConsumer,
+                       public clang::RecursiveASTVisitor<ProblemsFinder> {
+ public:
+  ProblemsFinder(clang::Diagnostic& d,
+                 clang::SourceManager& sm,
+                 const std::vector<std::string>& args)
+      : d_(d), sm_(sm), dead_vars_analysis_(false) {
+    for (unsigned i = 0; i < args.size(); ++i) {
+      if (args[i] == "--dead-vars") {
+        dead_vars_analysis_ = true;
+      }
+    }
+  }
 
   virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
     Resolver r(ctx);
 
-    clang::CXXRecordDecl* objectDecl =
+    clang::CXXRecordDecl* object_decl =
         r.ResolveNamespace("v8").ResolveNamespace("internal").
             Resolve<clang::CXXRecordDecl>("Object");
 
-    if (objectDecl != NULL) {
-      expression_classifier_ =
-          new ExpressionClassifier(r.ResolveName("Handle"),
-                                   clang::createItaniumMangleContext(ctx, d_),
-                                   objectDecl);
+    clang::CXXRecordDecl* smi_decl =
+        r.ResolveNamespace("v8").ResolveNamespace("internal").
+            Resolve<clang::CXXRecordDecl>("Smi");
+
+    if (object_decl != NULL) object_decl = object_decl->getDefinition();
+
+    if (smi_decl != NULL) smi_decl = smi_decl->getDefinition();
+
+    if (object_decl != NULL && smi_decl != NULL) {
+      function_analyzer_ =
+          new FunctionAnalyzer(clang::createItaniumMangleContext(ctx, d_),
+                               r.ResolveName("Handle"),
+                               object_decl,
+                               smi_decl,
+                               d_,
+                               sm_,
+                               dead_vars_analysis_);
       TraverseDecl(ctx.getTranslationUnitDecl());
     } else {
-      std::cerr << "Failed to resolve v8::internal::Object" << std::endl;
+      if (object_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::Object\n";
+      }
+      if (smi_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::Smi\n";
+      }
     }
   }
 
-  virtual bool VisitExpr(clang::Expr* expr) {
-    if ( expression_classifier_->IsBadCallSite(expr) ) {
-      d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
-                d_.getCustomDiagID(clang::Diagnostic::Warning,
-                                   BAD_EXPRESSION_MSG));
-    }
-
+  virtual bool VisitFunctionDecl(clang::FunctionDecl* decl) {
+    function_analyzer_->AnalyzeFunction(decl);
     return true;
   }
 
  private:
   clang::Diagnostic& d_;
   clang::SourceManager& sm_;
+  bool dead_vars_analysis_;
 
-  ExpressionClassifier* expression_classifier_;
+  FunctionAnalyzer* function_analyzer_;
 };
 
 
@@ -474,22 +1235,27 @@
  protected:
   clang::ASTConsumer *CreateASTConsumer(clang::CompilerInstance &CI,
                                         llvm::StringRef InFile) {
-    return new ConsumerType(CI.getDiagnostics(), CI.getSourceManager());
+    return new ConsumerType(CI.getDiagnostics(), CI.getSourceManager(), args_);
   }
 
   bool ParseArgs(const clang::CompilerInstance &CI,
                  const std::vector<std::string>& args) {
+    args_ = args;
     return true;
   }
 
-  void PrintHelp(llvm::raw_ostream& ros) { }
+  void PrintHelp(llvm::raw_ostream& ros) {
+  }
+ private:
+  std::vector<std::string> args_;
 };
 
 
 }
 
-static clang::FrontendPluginRegistry::Add<Action<ExpressionsFinder> >
-FindProblems("find-problems", "Find possible problems with evaluations order.");
+static clang::FrontendPluginRegistry::Add<Action<ProblemsFinder> >
+FindProblems("find-problems", "Find GC-unsafe places.");
 
-static clang::FrontendPluginRegistry::Add<Action<FunctionDeclarationFinder> >
+static clang::FrontendPluginRegistry::Add<
+  Action<FunctionDeclarationFinder> >
 DumpCallees("dump-callees", "Dump callees for each function.");
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index 7fb8de0..f8d3b62 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -29,8 +29,44 @@
 -- Usage: CLANG_BIN=clang-bin-dir lua tools/gcmole/gcmole.lua [arm|ia32|x64]
 
 local DIR = arg[0]:match("^(.+)/[^/]+$")
- 
-local ARCHS = arg[1] and { arg[1] } or { 'ia32', 'arm', 'x64' }
+
+local FLAGS = {
+   -- Do not build gcsuspects file and reuse previously generated one.
+   reuse_gcsuspects = false;
+
+   -- Print commands to console before executing them.
+   verbose = false;
+
+   -- Perform dead variable analysis (generates many false positives).
+   -- TODO add some sort of whiteliste to filter out false positives.
+   dead_vars = false;
+
+   -- When building gcsuspects whitelist certain functions as if they
+   -- can be causing GC. Currently used to reduce number of false
+   -- positives in dead variables analysis. See TODO for WHITELIST
+   -- below.
+   whitelist = true;
+}
+local ARGS = {}
+
+for i = 1, #arg do
+   local flag = arg[i]:match "^%-%-([%w_-]+)$"
+   if flag then
+      local no, real_flag = flag:match "^(no)([%w_-]+)$"
+      if real_flag then flag = real_flag end
+
+      flag = flag:gsub("%-", "_")
+      if FLAGS[flag] ~= nil then
+         FLAGS[flag] = (no ~= "no")
+      else
+         error("Unknown flag: " .. flag)
+      end
+   else
+      table.insert(ARGS, arg[i])
+   end
+end
+
+local ARCHS = ARGS[1] and { ARGS[1] } or { 'ia32', 'arm', 'x64' }
 
 local io = require "io"
 local os = require "os"
@@ -43,33 +79,38 @@
 -------------------------------------------------------------------------------
 -- Clang invocation
 
-local CLANG_BIN = os.getenv "CLANG_BIN" 
+local CLANG_BIN = os.getenv "CLANG_BIN"
 
 if not CLANG_BIN or CLANG_BIN == "" then
    error "CLANG_BIN not set"
-end 
+end
 
-local function MakeClangCommandLine(plugin, triple, arch_define)
-   return CLANG_BIN .. "/clang -cc1 -load " .. DIR .. "/libgcmole.so" 
+local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
+   if plugin_args then
+     for i = 1, #plugin_args do
+        plugin_args[i] = "-plugin-arg-" .. plugin .. " " .. plugin_args[i]
+     end
+     plugin_args = " " .. table.concat(plugin_args, " ")
+   end
+   return CLANG_BIN .. "/clang -cc1 -load " .. DIR .. "/libgcmole.so"
       .. " -plugin "  .. plugin
-      .. " -triple " .. triple 
+      .. (plugin_args or "")
+      .. " -triple " .. triple
       .. " -D" .. arch_define
-      .. " -DENABLE_VMSTATE_TRACKING" 
-      .. " -DENABLE_LOGGING_AND_PROFILING" 
       .. " -DENABLE_DEBUGGER_SUPPORT"
       .. " -Isrc"
 end
 
 function InvokeClangPluginForEachFile(filenames, cfg, func)
    local cmd_line = MakeClangCommandLine(cfg.plugin,
-					 cfg.triple,
-					 cfg.arch_define)
+                                         cfg.plugin_args,
+                                         cfg.triple,
+                                         cfg.arch_define)
 
-   for _, filename in ipairs(filenames) do 
+   for _, filename in ipairs(filenames) do
       log("-- %s", filename)
-
       local action = cmd_line .. " src/" .. filename .. " 2>&1"
-
+      if FLAGS.verbose then print('popen ', action) end
       local pipe = io.popen(action)
       func(filename, pipe:lines())
       pipe:close()
@@ -84,7 +125,7 @@
    local sconscript = f:read('*a')
    f:close()
 
-   local SOURCES = sconscript:match "SOURCES = {(.-)}"; 
+   local SOURCES = sconscript:match "SOURCES = {(.-)}";
 
    local sources = {}
 
@@ -93,13 +134,13 @@
       local files = {}
       for file in list:gmatch "[^%s]+" do table.insert(files, file) end
       sources[condition] = files
-   end 
+   end
 
    for condition, list in SOURCES:gmatch "'([^']-)': %[(.-)%]" do
       local files = {}
       for file in list:gmatch "'([^']-)'" do table.insert(files, file) end
       sources[condition] = files
-   end 
+   end
 
    return sources
 end
@@ -119,7 +160,7 @@
    local list = {}
    for condition, files in pairs(sources) do
       if EvaluateCondition(condition, props) then
-	 for i = 1, #files do table.insert(list, files[i]) end
+         for i = 1, #files do table.insert(list, files[i]) end
       end
    end
    return list
@@ -129,9 +170,9 @@
 
 local function FilesForArch(arch)
    return BuildFileList(sources, { os = 'linux',
-				   arch = arch,
-				   mode = 'debug',
-				   simulator = ''})
+                                   arch = arch,
+                                   mode = 'debug',
+                                   simulator = ''})
 end
 
 local mtConfig = {}
@@ -149,29 +190,67 @@
 
 local ARCHITECTURES = {
    ia32 = config { triple = "i586-unknown-linux",
-		   arch_define = "V8_TARGET_ARCH_IA32" },
+                   arch_define = "V8_TARGET_ARCH_IA32" },
    arm = config { triple = "i586-unknown-linux",
-		  arch_define = "V8_TARGET_ARCH_ARM" },
+                  arch_define = "V8_TARGET_ARCH_ARM" },
    x64 = config { triple = "x86_64-unknown-linux",
-		  arch_define = "V8_TARGET_ARCH_X64" }
+                  arch_define = "V8_TARGET_ARCH_X64" }
 }
 
 -------------------------------------------------------------------------------
--- GCSuspects Generation 
+-- GCSuspects Generation
 
-local gc = {}
-local funcs = {}
+local gc, gc_caused, funcs
+
+local WHITELIST = {
+   -- The following functions call CEntryStub which is always present.
+   "MacroAssembler.*CallExternalReference",
+   "MacroAssembler.*CallRuntime",
+   "CompileCallLoadPropertyWithInterceptor",
+   "CallIC.*GenerateMiss",
+
+   -- DirectCEntryStub is a special stub used on ARM. 
+   -- It is pinned and always present.
+   "DirectCEntryStub.*GenerateCall",  
+
+   -- TODO GCMole currently is sensitive enough to understand that certain 
+   --      functions only cause GC and return Failure simulataneously. 
+   --      Callsites of such functions are safe as long as they are properly 
+   --      check return value and propagate the Failure to the caller.
+   --      It should be possible to extend GCMole to understand this.
+   "Heap.*AllocateFunctionPrototype"
+};
+
+local function AddCause(name, cause)
+   local t = gc_caused[name]
+   if not t then
+      t = {}
+      gc_caused[name] = t
+   end
+   table.insert(t, cause)
+end
 
 local function resolve(name)
    local f = funcs[name]
-   
-   if not f then 
+
+   if not f then
       f = {}
       funcs[name] = f
-      
-      if name:match "Collect.*Garbage" then gc[name] = true end
+
+      if name:match "Collect.*Garbage" then
+         gc[name] = true
+         AddCause(name, "<GC>")
+      end
+
+      if FLAGS.whitelist then
+         for i = 1, #WHITELIST do
+            if name:match(WHITELIST[i]) then
+               gc[name] = false
+            end
+         end
+      end
    end
-   
+
     return f
 end
 
@@ -180,11 +259,11 @@
 
    for funcname in lines do
       if funcname:sub(1, 1) ~= '\t' then
-	 resolve(funcname)
-	 scope = funcname
+         resolve(funcname)
+         scope = funcname
       else
-	 local name = funcname:sub(2)
-	 resolve(name)[scope] = true
+         local name = funcname:sub(2)
+         resolve(name)[scope] = true
       end
    end
 end
@@ -192,60 +271,82 @@
 local function propagate ()
    log "** Propagating GC information"
 
-   local function mark(callers)
-      for caller, _ in pairs(callers) do 
-	 if not gc[caller] then
-	    gc[caller] = true
-	    mark(funcs[caller]) 
-	 end
+   local function mark(from, callers)
+      for caller, _ in pairs(callers) do
+         if gc[caller] == nil then
+            gc[caller] = true
+            mark(caller, funcs[caller])
+         end
+         AddCause(caller, from)
       end
    end
 
    for funcname, callers in pairs(funcs) do
-      if gc[funcname] then mark(callers) end
+      if gc[funcname] then mark(funcname, callers) end
    end
 end
 
 local function GenerateGCSuspects(arch, files, cfg)
+   -- Reset the global state.
+   gc, gc_caused, funcs = {}, {}, {}
+
    log ("** Building GC Suspects for %s", arch)
    InvokeClangPluginForEachFile (files,
                                  cfg:extend { plugin = "dump-callees" },
                                  parse)
-   
+
    propagate()
 
    local out = assert(io.open("gcsuspects", "w"))
-   for name, _ in pairs(gc) do out:write (name, '\n') end
+   for name, value in pairs(gc) do if value then out:write (name, '\n') end end
    out:close()
+
+   local out = assert(io.open("gccauses", "w"))
+   out:write "GC = {"
+   for name, causes in pairs(gc_caused) do
+      out:write("['", name, "'] = {")
+      for i = 1, #causes do out:write ("'", causes[i], "';") end
+      out:write("};\n")
+   end
+   out:write "}"
+   out:close()
+
    log ("** GCSuspects generated for %s", arch)
 end
 
--------------------------------------------------------------------------------
+--------------------------------------------------------------------------------
 -- Analysis
 
-local function CheckCorrectnessForArch(arch) 
+local function CheckCorrectnessForArch(arch)
    local files = FilesForArch(arch)
    local cfg = ARCHITECTURES[arch]
 
-   GenerateGCSuspects(arch, files, cfg)
+   if not FLAGS.reuse_gcsuspects then
+      GenerateGCSuspects(arch, files, cfg)
+   end
 
    local processed_files = 0
    local errors_found = false
    local function SearchForErrors(filename, lines)
       processed_files = processed_files + 1
       for l in lines do
-	 errors_found = errors_found or
-	    l:match "^[^:]+:%d+:%d+:" or
-	    l:match "error" or
-	    l:match "warning"
+         errors_found = errors_found or
+            l:match "^[^:]+:%d+:%d+:" or
+            l:match "error" or
+            l:match "warning"
          print(l)
       end
    end
 
-   log("** Searching for evaluation order problems for %s", arch)
+   log("** Searching for evaluation order problems%s for %s",
+       FLAGS.dead_vars and " and dead variables" or "",
+       arch)
+   local plugin_args
+   if FLAGS.dead_vars then plugin_args = { "--dead-vars" } end
    InvokeClangPluginForEachFile(files,
-				cfg:extend { plugin = "find-problems" },
-			        SearchForErrors)
+                                cfg:extend { plugin = "find-problems",
+                                             plugin_args = plugin_args },
+                                SearchForErrors)
    log("** Done processing %d files. %s",
        processed_files,
        errors_found and "Errors found" or "No errors found")
diff --git a/tools/gdb-v8-support.py b/tools/gdb-v8-support.py
new file mode 100644
index 0000000..9cc046c
--- /dev/null
+++ b/tools/gdb-v8-support.py
@@ -0,0 +1,154 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+kSmiTag = 0
+kSmiTagSize = 1
+kSmiTagMask = (1 << kSmiTagSize) - 1
+
+
+kHeapObjectTag = 1
+kHeapObjectTagSize = 2
+kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1
+
+
+kFailureTag = 3
+kFailureTagSize = 2
+kFailureTagMask = (1 << kFailureTagSize) - 1
+
+
+kSmiShiftSize32 = 0
+kSmiValueSize32 = 31
+kSmiShiftBits32 = kSmiTagSize + kSmiShiftSize32
+
+
+kSmiShiftSize64 = 31
+kSmiValueSize64 = 32
+kSmiShiftBits64 = kSmiTagSize + kSmiShiftSize64
+
+
+kAllBits = 0xFFFFFFFF
+kTopBit32 = 0x80000000
+kTopBit64 = 0x8000000000000000
+
+
+t_u32 = gdb.lookup_type('unsigned int')
+t_u64 = gdb.lookup_type('unsigned long long')
+
+
+def has_smi_tag(v):
+  return v & kSmiTagMask == kSmiTag
+
+
+def has_failure_tag(v):
+  return v & kFailureTagMask == kFailureTag
+
+
+def has_heap_object_tag(v):
+  return v & kHeapObjectTagMask == kHeapObjectTag
+
+
+def raw_heap_object(v):
+  return v - kHeapObjectTag
+
+
+def smi_to_int_32(v):
+  v = v & kAllBits
+  if (v & kTopBit32) == kTopBit32:
+    return ((v & kAllBits) >> kSmiShiftBits32) - 2147483648
+  else:
+    return (v & kAllBits) >> kSmiShiftBits32
+
+
+def smi_to_int_64(v):
+  return (v >> kSmiShiftBits64)
+
+
+def decode_v8_value(v, bitness):
+  base_str = 'v8[%x]' % v
+  if has_smi_tag(v):
+    if bitness == 32:
+      return base_str + (" SMI(%d)" % smi_to_int_32(v))
+    else:
+      return base_str + (" SMI(%d)" % smi_to_int_64(v))
+  elif has_failure_tag(v):
+    return base_str + " (failure)"
+  elif has_heap_object_tag(v):
+    return base_str + (" H(0x%x)" % raw_heap_object(v))
+  else:
+    return base_str
+
+
+class V8ValuePrinter(object):
+  "Print a v8value."
+  def __init__(self, val):
+    self.val = val
+  def to_string(self):
+    if self.val.type.sizeof == 4:
+      v_u32 = self.val.cast(t_u32)
+      return decode_v8_value(int(v_u32), 32)
+    elif self.val.type.sizeof == 8:
+      v_u64 = self.val.cast(t_u64)
+      return decode_v8_value(int(v_u64), 64)
+    else:
+      return 'v8value?'
+  def display_hint(self):
+    return 'v8value'
+
+
+def v8_pretty_printers(val):
+  lookup_tag = val.type.tag
+  if lookup_tag == None:
+    return None
+  elif lookup_tag == 'v8value':
+    return V8ValuePrinter(val)
+  return None
+gdb.pretty_printers.append(v8_pretty_printers)
+
+
+def v8_to_int(v):
+  if v.type.sizeof == 4:
+    return int(v.cast(t_u32))
+  elif v.type.sizeof == 8:
+    return int(v.cast(t_u64))
+  else:
+    return '?'
+
+
+def v8_get_value(vstring):
+  v = gdb.parse_and_eval(vstring)
+  return v8_to_int(v)
+
+
+class V8PrintObject (gdb.Command):
+  """Prints a v8 object."""
+  def __init__ (self):
+    super (V8PrintObject, self).__init__ ("v8print", gdb.COMMAND_DATA)
+  def invoke (self, arg, from_tty):
+    v = v8_get_value(arg)
+    gdb.execute('call __gdb_print_v8_object(%d)' % v)
+V8PrintObject()
diff --git a/tools/grokdump.py b/tools/grokdump.py
index de681b2..6bc49c6 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -291,6 +291,7 @@
     self.exception = None
     self.exception_context = None
     self.memory_list = None
+    self.memory_list64 = None
     self.thread_map = {}
     for d in directories:
       DebugPrint(d)
@@ -311,16 +312,17 @@
           self.thread_map[thread.id] = thread
       elif d.stream_type == MD_MEMORY_LIST_STREAM:
         print >>sys.stderr, "Warning: not a full minidump"
-        ml = MINIDUMP_MEMORY_LIST.Read(self.minidump, d.location.rva)
-        DebugPrint(ml)
-        for m in ml.ranges:
-          DebugPrint(m)
-      elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
         assert self.memory_list is None
-        self.memory_list = MINIDUMP_MEMORY_LIST64.Read(
+        self.memory_list = MINIDUMP_MEMORY_LIST.Read(
           self.minidump, d.location.rva)
         assert ctypes.sizeof(self.memory_list) == d.location.data_size
         DebugPrint(self.memory_list)
+      elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
+        assert self.memory_list64 is None
+        self.memory_list64 = MINIDUMP_MEMORY_LIST64.Read(
+          self.minidump, d.location.rva)
+        assert ctypes.sizeof(self.memory_list64) == d.location.data_size
+        DebugPrint(self.memory_list64)
 
   def IsValidAddress(self, address):
     return self.FindLocation(address) is not None
@@ -338,12 +340,16 @@
     return self.minidump[location:location + size]
 
   def FindLocation(self, address):
-    # TODO(vitalyr): only works for full minidumps (...64 structure variants).
     offset = 0
-    for r in self.memory_list.ranges:
-      if r.start <= address < r.start + r.size:
-        return self.memory_list.base_rva + offset + address - r.start
-      offset += r.size
+    if self.memory_list64 is not None:
+      for r in self.memory_list64.ranges:
+        if r.start <= address < r.start + r.size:
+          return self.memory_list64.base_rva + offset + address - r.start
+        offset += r.size
+    if self.memory_list is not None:
+      for r in self.memory_list.ranges:
+        if r.start <= address < r.start + r.memory.data_size:
+          return r.memory.rva + address - r.start
     return None
 
   def GetDisasmLines(self, address, size):
@@ -393,41 +399,45 @@
   130: "ODDBALL_TYPE",
   131: "JS_GLOBAL_PROPERTY_CELL_TYPE",
   132: "HEAP_NUMBER_TYPE",
-  133: "PROXY_TYPE",
+  133: "FOREIGN_TYPE",
   134: "BYTE_ARRAY_TYPE",
-  135: "PIXEL_ARRAY_TYPE",
-  136: "EXTERNAL_BYTE_ARRAY_TYPE",
-  137: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
-  138: "EXTERNAL_SHORT_ARRAY_TYPE",
-  139: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
-  140: "EXTERNAL_INT_ARRAY_TYPE",
-  141: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
-  142: "EXTERNAL_FLOAT_ARRAY_TYPE",
-  143: "FILLER_TYPE",
-  144: "ACCESSOR_INFO_TYPE",
-  145: "ACCESS_CHECK_INFO_TYPE",
-  146: "INTERCEPTOR_INFO_TYPE",
-  147: "CALL_HANDLER_INFO_TYPE",
-  148: "FUNCTION_TEMPLATE_INFO_TYPE",
-  149: "OBJECT_TEMPLATE_INFO_TYPE",
-  150: "SIGNATURE_INFO_TYPE",
-  151: "TYPE_SWITCH_INFO_TYPE",
-  152: "SCRIPT_TYPE",
-  153: "CODE_CACHE_TYPE",
-  156: "FIXED_ARRAY_TYPE",
-  157: "SHARED_FUNCTION_INFO_TYPE",
-  158: "JS_MESSAGE_OBJECT_TYPE",
-  159: "JS_VALUE_TYPE",
-  160: "JS_OBJECT_TYPE",
-  161: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
-  162: "JS_GLOBAL_OBJECT_TYPE",
-  163: "JS_BUILTINS_OBJECT_TYPE",
-  164: "JS_GLOBAL_PROXY_TYPE",
-  165: "JS_ARRAY_TYPE",
-  166: "JS_REGEXP_TYPE",
-  167: "JS_FUNCTION_TYPE",
-  154: "DEBUG_INFO_TYPE",
-  155: "BREAK_POINT_INFO_TYPE",
+  135: "EXTERNAL_BYTE_ARRAY_TYPE",
+  136: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
+  137: "EXTERNAL_SHORT_ARRAY_TYPE",
+  138: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
+  139: "EXTERNAL_INT_ARRAY_TYPE",
+  140: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
+  141: "EXTERNAL_FLOAT_ARRAY_TYPE",
+  143: "EXTERNAL_PIXEL_ARRAY_TYPE",
+  145: "FILLER_TYPE",
+  146: "ACCESSOR_INFO_TYPE",
+  147: "ACCESS_CHECK_INFO_TYPE",
+  148: "INTERCEPTOR_INFO_TYPE",
+  149: "CALL_HANDLER_INFO_TYPE",
+  150: "FUNCTION_TEMPLATE_INFO_TYPE",
+  151: "OBJECT_TEMPLATE_INFO_TYPE",
+  152: "SIGNATURE_INFO_TYPE",
+  153: "TYPE_SWITCH_INFO_TYPE",
+  154: "SCRIPT_TYPE",
+  155: "CODE_CACHE_TYPE",
+  156: "POLYMORPHIC_CODE_CACHE_TYPE",
+  159: "FIXED_ARRAY_TYPE",
+  160: "SHARED_FUNCTION_INFO_TYPE",
+  161: "JS_MESSAGE_OBJECT_TYPE",
+  162: "JS_VALUE_TYPE",
+  163: "JS_OBJECT_TYPE",
+  164: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  165: "JS_GLOBAL_OBJECT_TYPE",
+  166: "JS_BUILTINS_OBJECT_TYPE",
+  167: "JS_GLOBAL_PROXY_TYPE",
+  168: "JS_ARRAY_TYPE",
+  169: "JS_PROXY_TYPE",
+  170: "JS_WEAK_MAP_TYPE",
+  171: "JS_REGEXP_TYPE",
+  172: "JS_FUNCTION_TYPE",
+  173: "JS_FUNCTION_PROXY_TYPE",
+  157: "DEBUG_INFO_TYPE",
+  158: "BREAK_POINT_INFO_TYPE",
 }
 
 
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 77a6194..5014417 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -26,180 +26,59 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
-  'variables': {
-    'use_system_v8%': 0,
-    'msvs_use_common_release': 0,
-    'gcc_version%': 'unknown',
-    'v8_target_arch%': '<(target_arch)',
-    'v8_use_snapshot%': 'true',
-    'v8_use_liveobjectlist%': 'false',
-  },
+  'includes': ['../../build/common.gypi'],
   'conditions': [
     ['use_system_v8==0', {
-      'target_defaults': {
-        'defines': [
-          'ENABLE_LOGGING_AND_PROFILING',
-          'ENABLE_DEBUGGER_SUPPORT',
-          'ENABLE_VMSTATE_TRACKING',
-          'V8_FAST_TLS',
-        ],
-        'conditions': [
-          ['OS!="mac"', {
-            # TODO(mark): The OS!="mac" conditional is temporary. It can be
-            # removed once the Mac Chromium build stops setting target_arch to
-            # ia32 and instead sets it to mac. Other checks in this file for
-            # OS=="mac" can be removed at that time as well. This can be cleaned
-            # up once http://crbug.com/44205 is fixed.
-            'conditions': [
-              ['v8_target_arch=="arm"', {
-                'defines': [
-                  'V8_TARGET_ARCH_ARM',
-                ],
-              }],
-              ['v8_target_arch=="ia32"', {
-                'defines': [
-                  'V8_TARGET_ARCH_IA32',
-                ],
-              }],
-              ['v8_target_arch=="x64"', {
-                'defines': [
-                  'V8_TARGET_ARCH_X64',
-                ],
-              }],
-            ],
-          }],
-          ['v8_use_liveobjectlist=="true"', {
-            'defines': [
-              'ENABLE_DEBUGGER_SUPPORT',
-              'INSPECTOR',
-              'OBJECT_PRINT',
-              'LIVEOBJECTLIST',
-            ],
-          }],
-        ],
-        'configurations': {
-          'Debug': {
-            'defines': [
-              'DEBUG',
-              '_DEBUG',
-              'ENABLE_DISASSEMBLER',
-              'V8_ENABLE_CHECKS',
-              'OBJECT_PRINT',
-            ],
-            'msvs_settings': {
-              'VCCLCompilerTool': {
-                'Optimization': '0',
-
-                'conditions': [
-                  ['OS=="win" and component=="shared_library"', {
-                    'RuntimeLibrary': '3',  # /MDd
-                  }, {
-                    'RuntimeLibrary': '1',  # /MTd
-                  }],
-                ],
-              },
-              'VCLinkerTool': {
-                'LinkIncremental': '2',
-              },
-            },
-            'conditions': [
-             ['OS=="freebsd" or OS=="openbsd"', {
-               'cflags': [ '-I/usr/local/include' ],
-             }],
-           ],
-          },
-          'Release': {
-            'conditions': [
-              ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-                'cflags!': [
-                  '-O2',
-                  '-Os',
-                ],
-                'cflags': [
-                  '-fomit-frame-pointer',
-                  '-O3',
-                ],
-                'conditions': [
-                  [ 'gcc_version==44', {
-                    'cflags': [
-                      # Avoid crashes with gcc 4.4 in the v8 test suite.
-                      '-fno-tree-vrp',
-                    ],
-                  }],
-                ],
-              }],
-             ['OS=="freebsd" or OS=="openbsd"', {
-               'cflags': [ '-I/usr/local/include' ],
-             }],
-              ['OS=="mac"', {
-                'xcode_settings': {
-                  'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
-
-                  # -fstrict-aliasing.  Mainline gcc
-                  # enables this at -O2 and above,
-                  # but Apple gcc does not unless it
-                  # is specified explicitly.
-                  'GCC_STRICT_ALIASING': 'YES',
-                },
-              }],
-              ['OS=="win"', {
-                'msvs_configuration_attributes': {
-                  'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
-                  'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
-                  'CharacterSet': '1',
-                },
-                'msvs_settings': {
-                  'VCCLCompilerTool': {
-                    'Optimization': '2',
-                    'InlineFunctionExpansion': '2',
-                    'EnableIntrinsicFunctions': 'true',
-                    'FavorSizeOrSpeed': '0',
-                    'OmitFramePointers': 'true',
-                    'StringPooling': 'true',
-
-                    'conditions': [
-                      ['OS=="win" and component=="shared_library"', {
-                        'RuntimeLibrary': '2',  #/MD
-                      }, {
-                        'RuntimeLibrary': '0',  #/MT
-                      }],
-                    ],
-                  },
-                  'VCLinkerTool': {
-                    'LinkIncremental': '1',
-                    'OptimizeReferences': '2',
-                    'OptimizeForWindows98': '1',
-                    'EnableCOMDATFolding': '2',
-                  },
-                },
-              }],
-            ],
-          },
-        },
-      },
       'targets': [
         {
           'target_name': 'v8',
           'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+            }, {
+              'toolsets': ['target'],
+            }],
             ['v8_use_snapshot=="true"', {
               'dependencies': ['v8_snapshot'],
             },
             {
               'dependencies': ['v8_nosnapshot'],
             }],
-            ['OS=="win" and component=="shared_library"', {
+            ['component=="shared_library"', {
               'type': '<(component)',
               'sources': [
+                # Note: on non-Windows we still build this file so that gyp
+                # has some sources to link into the component.
                 '../../src/v8dll-main.cc',
               ],
-              'defines': [
-                'BUILDING_V8_SHARED'
+              'conditions': [
+                ['OS=="win"', {
+                  'defines': [
+                    'BUILDING_V8_SHARED',
+                  ],
+                  'direct_dependent_settings': {
+                    'defines': [
+                      'USING_V8_SHARED',
+                    ],
+                  },
+                }, {
+                  'defines': [
+                    'V8_SHARED',
+                  ],
+                  'direct_dependent_settings': {
+                    'defines': [
+                      'V8_SHARED',
+                    ],
+                  },
+                }],
+                ['soname_version!=""', {
+                  # Ideally, we'd like to specify the full filename for the
+                  # library and set it to "libv8.so.<(soname_version)",
+                  # but currently the best we can do is use 'product_name' and
+                  # get "libv8-<(soname_version).so".
+                  'product_name': 'v8-<(soname_version)',
+                }],
               ],
-              'direct_dependent_settings': {
-                'defines': [
-                  'USING_V8_SHARED',
-                ],
-              },
             },
             {
               'type': 'none',
@@ -207,7 +86,7 @@
           ],
           'direct_dependent_settings': {
             'include_dirs': [
-               '../../include',
+              '../../include',
             ],
           },
         },
@@ -215,22 +94,46 @@
           'target_name': 'v8_snapshot',
           'type': '<(library)',
           'conditions': [
-            ['OS=="win" and component=="shared_library"', {
-              'defines': [
-                'BUILDING_V8_SHARED',
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+              'dependencies': ['mksnapshot#host', 'js2c#host'],
+            }, {
+              'toolsets': ['target'],
+              'dependencies': ['mksnapshot', 'js2c'],
+            }],
+            ['component=="shared_library"', {
+              'conditions': [
+                ['OS=="win"', {
+                  'defines': [
+                    'BUILDING_V8_SHARED',
+                  ],
+                  'direct_dependent_settings': {
+                    'defines': [
+                      'USING_V8_SHARED',
+                    ],
+                  },
+                }, {
+                  'defines': [
+                    'V8_SHARED',
+                  ],
+                  'direct_dependent_settings': {
+                    'defines': [
+                      'V8_SHARED',
+                    ],
+                  },
+                }],
               ],
             }],
           ],
           'dependencies': [
-            'mksnapshot#host',
-            'js2c#host',
             'v8_base',
           ],
           'include_dirs+': [
             '../../src',
           ],
           'sources': [
-            '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc',
+            '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
             '<(INTERMEDIATE_DIR)/snapshot.cc',
           ],
           'actions': [
@@ -242,16 +145,57 @@
               'outputs': [
                 '<(INTERMEDIATE_DIR)/snapshot.cc',
               ],
-              'action': ['<@(_inputs)', '<@(_outputs)'],
+              'variables': {
+                'mksnapshot_flags': [
+                  '--log-snapshot-positions',
+                  '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
+                ],
+              },
+              'conditions': [
+                ['v8_target_arch=="arm"', {
+                  # The following rules should be consistent with chromium's
+                  # common.gypi and V8's runtime rule to ensure they all generate
+                  # the same correct machine code. The following issue is about
+                  # V8's runtime rule about vfpv3 and neon:
+                  # http://code.google.com/p/v8/issues/detail?id=914
+                  'conditions': [
+                    ['armv7==1', {
+                      # The ARM Architecture Manual mandates VFPv3 if NEON is
+                      # available.
+                      # The current V8 doesn't use d16-d31, so for vfpv3-d16, we can
+                      # also enable vfp3 for the better performance.
+                      'conditions': [
+                        ['arm_neon!=1 and arm_fpu!="vfpv3" and arm_fpu!="vfpv3-d16"', {
+                          'variables': {
+                            'mksnapshot_flags': [
+                              '--noenable_vfp3',
+                            ],
+                          },
+                        }],
+                      ],
+                    },{ # else: armv7!=1
+                      'variables': {
+                        'mksnapshot_flags': [
+                          '--noenable_armv7',
+                          '--noenable_vfp3',
+                        ],
+                      },
+                    }],
+                  ],
+                }],
+              ],
+              'action': [
+                '<@(_inputs)',
+                '<@(mksnapshot_flags)',
+                '<@(_outputs)'
+              ],
             },
           ],
         },
         {
           'target_name': 'v8_nosnapshot',
           'type': '<(library)',
-          'toolsets': ['host', 'target'],
           'dependencies': [
-            'js2c#host',
             'v8_base',
           ],
           'include_dirs+': [
@@ -259,18 +203,21 @@
           ],
           'sources': [
             '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
             '../../src/snapshot-empty.cc',
           ],
           'conditions': [
-            # The ARM assembler assumes the host is 32 bits, so force building
-            # 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
-              'cflags': ['-m32'],
-              'ldflags': ['-m32'],
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+              'dependencies': ['js2c#host'],
+            }, {
+              'toolsets': ['target'],
+              'dependencies': ['js2c'],
             }],
-            ['OS=="win" and component=="shared_library"', {
+            ['component=="shared_library"', {
               'defines': [
                 'BUILDING_V8_SHARED',
+                'V8_SHARED',
               ],
             }],
           ]
@@ -278,7 +225,6 @@
         {
           'target_name': 'v8_base',
           'type': '<(library)',
-          'toolsets': ['host', 'target'],
           'include_dirs+': [
             '../../src',
           ],
@@ -318,7 +264,6 @@
             '../../src/code-stubs.cc',
             '../../src/code-stubs.h',
             '../../src/code.h',
-            '../../src/codegen-inl.h',
             '../../src/codegen.cc',
             '../../src/codegen.h',
             '../../src/compilation-cache.cc',
@@ -350,11 +295,13 @@
             '../../src/disasm.h',
             '../../src/disassembler.cc',
             '../../src/disassembler.h',
-            '../../src/dtoa.cc',
-            '../../src/dtoa.h',
             '../../src/diy-fp.cc',
             '../../src/diy-fp.h',
             '../../src/double.h',
+            '../../src/dtoa.cc',
+            '../../src/dtoa.h',
+            '../../src/elements.cc',
+            '../../src/elements.h',
             '../../src/execution.cc',
             '../../src/execution.h',
             '../../src/factory.cc',
@@ -366,8 +313,6 @@
             '../../src/fixed-dtoa.h',
             '../../src/flags.cc',
             '../../src/flags.h',
-            '../../src/frame-element.cc',
-            '../../src/frame-element.h',
             '../../src/frames-inl.h',
             '../../src/frames.cc',
             '../../src/frames.h',
@@ -399,6 +344,7 @@
             '../../src/inspector.h',
             '../../src/interpreter-irregexp.cc',
             '../../src/interpreter-irregexp.h',
+            '../../src/json-parser.h',
             '../../src/jsregexp.cc',
             '../../src/jsregexp.h',
             '../../src/isolate.cc',
@@ -439,6 +385,7 @@
             '../../src/platform-tls-win32.h',
             '../../src/platform-tls.h',
             '../../src/platform.h',
+            '../../src/preparse-data-format.h',
             '../../src/preparse-data.cc',
             '../../src/preparse-data.h',
             '../../src/preparser.cc',
@@ -467,19 +414,18 @@
             '../../src/runtime-profiler.h',
             '../../src/safepoint-table.cc',
             '../../src/safepoint-table.h',
-            '../../src/scanner-base.cc',
-            '../../src/scanner-base.h',
             '../../src/scanner.cc',
             '../../src/scanner.h',
+            '../../src/scanner-character-streams.cc',
+            '../../src/scanner-character-streams.h',
             '../../src/scopeinfo.cc',
             '../../src/scopeinfo.h',
             '../../src/scopes.cc',
             '../../src/scopes.h',
             '../../src/serialize.cc',
             '../../src/serialize.h',
-            '../../src/shell.h',
             '../../src/small-pointer-list.h',
-            '../../src/smart-pointer.h',
+            '../../src/smart-array-pointer.h',
             '../../src/snapshot-common.cc',
             '../../src/snapshot.h',
             '../../src/spaces-inl.h',
@@ -495,8 +441,6 @@
             '../../src/stub-cache.h',
             '../../src/token.cc',
             '../../src/token.h',
-            '../../src/top.cc',
-            '../../src/top.h',
             '../../src/type-info.cc',
             '../../src/type-info.h',
             '../../src/unbound-queue-inl.h',
@@ -504,6 +448,7 @@
             '../../src/unicode-inl.h',
             '../../src/unicode.cc',
             '../../src/unicode.h',
+            '../../src/utils-inl.h',
             '../../src/utils.cc',
             '../../src/utils.h',
             '../../src/v8-counters.cc',
@@ -511,10 +456,13 @@
             '../../src/v8.cc',
             '../../src/v8.h',
             '../../src/v8checks.h',
+            '../../src/v8conversions.cc',
+            '../../src/v8conversions.h',
             '../../src/v8globals.h',
             '../../src/v8memory.h',
             '../../src/v8threads.cc',
             '../../src/v8threads.h',
+            '../../src/v8utils.cc',
             '../../src/v8utils.h',
             '../../src/variables.cc',
             '../../src/variables.h',
@@ -531,10 +479,12 @@
             '../../src/extensions/gc-extension.h',
           ],
           'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+            }, {
+              'toolsets': ['target'],
+            }],
             ['v8_target_arch=="arm"', {
-              'include_dirs+': [
-                '../../src/arm',
-              ],
               'sources': [
                 '../../src/arm/assembler-arm-inl.h',
                 '../../src/arm/assembler-arm.cc',
@@ -567,19 +517,8 @@
                 '../../src/arm/simulator-arm.cc',
                 '../../src/arm/stub-cache-arm.cc',
               ],
-              'conditions': [
-                # The ARM assembler assumes the host is 32 bits,
-                # so force building 32-bit host tools.
-                ['host_arch=="x64" and _toolset=="host"', {
-                  'cflags': ['-m32'],
-                  'ldflags': ['-m32'],
-                }]
-              ]
             }],
             ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
-              'include_dirs+': [
-                '../../src/ia32',
-              ],
               'sources': [
                 '../../src/ia32/assembler-ia32-inl.h',
                 '../../src/ia32/assembler-ia32.cc',
@@ -611,9 +550,6 @@
               ],
             }],
             ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
-              'include_dirs+': [
-                '../../src/x64',
-              ],
               'sources': [
                 '../../src/x64/assembler-x64-inl.h',
                 '../../src/x64/assembler-x64.cc',
@@ -646,16 +582,43 @@
             }],
             ['OS=="linux"', {
                 'link_settings': {
-                  'libraries': [
-                    # Needed for clock_gettime() used by src/platform-linux.cc.
-                    '-lrt',
-                ]},
+                  'conditions': [
+                    ['v8_compress_startup_data=="bz2"', {
+                      'libraries': [
+                        '-lbz2',
+                    ]}],
+                  ],
+                },
                 'sources': [
                   '../../src/platform-linux.cc',
                   '../../src/platform-posix.cc'
                 ],
               }
             ],
+            ['OS=="android"', {
+                'sources': [
+                  '../../src/platform-posix.cc',
+                ],
+                'conditions': [
+                  ['host_os=="mac" and _toolset!="target"', {
+                    'sources': [
+                      '../../src/platform-macos.cc'
+                    ]
+                  }, {
+                    'sources': [
+                      '../../src/platform-linux.cc'
+                    ]
+                  }],
+                  ['_toolset=="target"', {
+                    'link_settings': {
+                      'libraries': [
+                        '-llog',
+                       ],
+                     }
+                  }],
+                ],
+              },
+            ],
             ['OS=="freebsd"', {
                 'link_settings': {
                   'libraries': [
@@ -687,15 +650,18 @@
             ['OS=="win"', {
               'sources': [
                 '../../src/platform-win32.cc',
+                '../../src/win32-math.cc',
+                '../../src/win32-math.h',
               ],
               'msvs_disabled_warnings': [4351, 4355, 4800],
               'link_settings':  {
-                'libraries': [ '-lwinmm.lib' ],
+                'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
               },
             }],
-            ['OS=="win" and component=="shared_library"', {
+            ['component=="shared_library"', {
               'defines': [
-                'BUILDING_V8_SHARED'
+                'BUILDING_V8_SHARED',
+                'V8_SHARED',
               ],
             }],
           ],
@@ -703,7 +669,13 @@
         {
           'target_name': 'js2c',
           'type': 'none',
-          'toolsets': ['host'],
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host'],
+            }, {
+              'toolsets': ['target'],
+            }],
+          ],
           'variables': {
             'library_files': [
               '../../src/runtime.js',
@@ -722,6 +694,11 @@
               '../../src/regexp.js',
               '../../src/macros.py',
             ],
+            'experimental_library_files': [
+              '../../src/macros.py',
+              '../../src/proxy.js',
+              '../../src/weakmap.js',
+            ],
           },
           'actions': [
             {
@@ -732,22 +709,39 @@
               ],
               'outputs': [
                 '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
-                '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc',
               ],
               'action': [
                 'python',
                 '../../tools/js2c.py',
                 '<@(_outputs)',
                 'CORE',
+                '<(v8_compress_startup_data)',
                 '<@(library_files)'
               ],
             },
+            {
+              'action_name': 'js2c_experimental',
+              'inputs': [
+                '../../tools/js2c.py',
+                '<@(experimental_library_files)',
+              ],
+              'outputs': [
+                '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+              ],
+              'action': [
+                'python',
+                '../../tools/js2c.py',
+                '<@(_outputs)',
+                'EXPERIMENTAL',
+                '<(v8_compress_startup_data)',
+                '<@(experimental_library_files)'
+              ],
+            },
           ],
         },
         {
           'target_name': 'mksnapshot',
           'type': 'executable',
-          'toolsets': ['host'],
           'dependencies': [
             'v8_nosnapshot',
           ],
@@ -758,13 +752,17 @@
             '../../src/mksnapshot.cc',
           ],
           'conditions': [
-            # The ARM assembler assumes the host is 32 bits, so force building
-            # 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
-              'cflags': ['-m32'],
-              'ldflags': ['-m32'],
-            }]
-          ]
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host'],
+            }, {
+              'toolsets': ['target'],
+            }],
+            ['v8_compress_startup_data=="bz2"', {
+              'libraries': [
+                '-lbz2',
+              ]}
+            ],
+          ],
         },
         {
           'target_name': 'v8_shell',
@@ -776,10 +774,83 @@
             '../../samples/shell.cc',
           ],
           'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host'],
+            }, {
+              'toolsets': ['target'],
+            }],
             ['OS=="win"', {
               # This could be gotten by not setting chromium_code, if that's OK.
               'defines': ['_CRT_SECURE_NO_WARNINGS'],
             }],
+            ['v8_compress_startup_data=="bz2"', {
+              'libraries': [
+                '-lbz2',
+              ]}],
+          ],
+        },
+        {
+          'target_name': 'preparser_lib',
+          'type': '<(library)',
+          'include_dirs+': [
+            '../../src',
+          ],
+          'sources': [
+            '../../include/v8-preparser.h',
+            '../../include/v8stdint.h',
+            '../../src/allocation.cc',
+            '../../src/allocation.h',
+            '../../src/bignum.cc',
+            '../../src/bignum.h',
+            '../../src/bignum-dtoa.cc',
+            '../../src/bignum-dtoa.h',
+            '../../src/cached-powers.cc',
+            '../../src/cached-powers.h',
+            '../../src/char-predicates-inl.h',
+            '../../src/char-predicates.h',
+            '../../src/checks.h',
+            '../../src/conversions-inl.h',
+            '../../src/conversions.cc',
+            '../../src/conversions.h',
+            '../../src/diy-fp.cc',
+            '../../src/diy-fp.h',
+            '../../src/double.h',
+            '../../src/dtoa.cc',
+            '../../src/dtoa.h',
+            '../../src/fast-dtoa.cc',
+            '../../src/fast-dtoa.h',
+            '../../src/fixed-dtoa.cc',
+            '../../src/fixed-dtoa.h',
+            '../../src/globals.h',
+            '../../src/hashmap.cc',
+            '../../src/hashmap.h',
+            '../../src/list-inl.h',
+            '../../src/list.h',
+            '../../src/preparse-data-format.h',
+            '../../src/preparse-data.cc',
+            '../../src/preparse-data.h',
+            '../../src/preparser.cc',
+            '../../src/preparser.h',
+            '../../src/preparser-api.cc',
+            '../../src/scanner.cc',
+            '../../src/scanner.h',
+            '../../src/strtod.cc',
+            '../../src/strtod.h',
+            '../../src/token.cc',
+            '../../src/token.h',
+            '../../src/unicode-inl.h',
+            '../../src/unicode.cc',
+            '../../src/unicode.h',
+            '../../src/utils-inl.h',
+            '../../src/utils.cc',
+            '../../src/utils.h',
+          ],
+          'conditions': [
+            ['OS=="win"', {
+              'sources': [
+                '../../src/win32-math.cc',
+                '../../src/win32-math.h',
+              ]}],
           ],
         },
       ],
@@ -788,6 +859,13 @@
         {
           'target_name': 'v8',
           'type': 'settings',
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+            }, {
+              'toolsets': ['target'],
+            }],
+          ],
           'link_settings': {
             'libraries': [
               '-lv8',
@@ -797,6 +875,13 @@
         {
           'target_name': 'v8_shell',
           'type': 'none',
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host'],
+            }, {
+              'toolsets': ['target'],
+            }],
+          ],
           'dependencies': [
             'v8'
           ],
diff --git a/tools/js2c.py b/tools/js2c.py
old mode 100755
new mode 100644
index 2da132f..a2ea8ea
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -33,15 +33,22 @@
 
 import os, re, sys, string
 import jsmin
+import bz2
 
 
-def ToCArray(lines):
+def ToCAsciiArray(lines):
   result = []
   for chr in lines:
     value = ord(chr)
     assert value < 128
     result.append(str(value))
-  result.append("0")
+  return ", ".join(result)
+
+
+def ToCArray(lines):
+  result = []
+  for chr in lines:
+    result.append(str(ord(chr)))
   return ", ".join(result)
 
 
@@ -87,8 +94,8 @@
     return string
 
 
-EVAL_PATTERN = re.compile(r'\beval\s*\(');
-WITH_PATTERN = re.compile(r'\bwith\s*\(');
+EVAL_PATTERN = re.compile(r'\beval\s*\(')
+WITH_PATTERN = re.compile(r'\bwith\s*\(')
 
 
 def Validate(lines, file):
@@ -204,7 +211,7 @@
 
 
 HEADER_TEMPLATE = """\
-// Copyright 2008 Google Inc. All Rights Reserved.
+// Copyright 2011 Google Inc. All Rights Reserved.
 
 // This file was generated from .js source files by SCons.  If you
 // want to make changes to this file you should either change the
@@ -212,11 +219,14 @@
 
 #include "v8.h"
 #include "natives.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-%(source_lines)s\
+  static const byte sources[] = { %(sources_data)s };
+
+%(raw_sources_declaration)s\
 
   template <>
   int NativesCollection<%(type)s>::GetBuiltinsCount() {
@@ -235,8 +245,13 @@
   }
 
   template <>
-  Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
-%(get_script_source_cases)s\
+  int NativesCollection<%(type)s>::GetRawScriptsSize() {
+    return %(raw_total_length)i;
+  }
+
+  template <>
+  Vector<const char> NativesCollection<%(type)s>::GetRawScriptSource(int index) {
+%(get_raw_script_source_cases)s\
     return Vector<const char>("", 0);
   }
 
@@ -246,27 +261,43 @@
     return Vector<const char>("", 0);
   }
 
+  template <>
+  Vector<const byte> NativesCollection<%(type)s>::GetScriptsSource() {
+    return Vector<const byte>(sources, %(total_length)i);
+  }
+
+  template <>
+  void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) {
+    ASSERT(%(raw_total_length)i == raw_source.length());
+    raw_sources = raw_source.start();
+  }
+
 }  // internal
 }  // v8
 """
 
 
-SOURCE_DECLARATION = """\
-  static const char %(id)s[] = { %(data)s };
+RAW_SOURCES_COMPRESSION_DECLARATION = """\
+  static const char* raw_sources = NULL;
 """
 
 
-GET_DEBUGGER_INDEX_CASE = """\
+RAW_SOURCES_DECLARATION = """\
+  static const char* raw_sources = reinterpret_cast<const char*>(sources);
+"""
+
+
+GET_INDEX_CASE = """\
     if (strcmp(name, "%(id)s") == 0) return %(i)i;
 """
 
 
-GET_DEBUGGER_SCRIPT_SOURCE_CASE = """\
-    if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i);
+GET_RAW_SCRIPT_SOURCE_CASE = """\
+    if (index == %(i)i) return Vector<const char>(raw_sources + %(offset)i, %(raw_length)i);
 """
 
 
-GET_DEBUGGER_SCRIPT_NAME_CASE = """\
+GET_SCRIPT_NAME_CASE = """\
     if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
 """
 
@@ -283,12 +314,10 @@
     else:
       modules.append(s)
 
-  # Build source code lines
-  source_lines = [ ]
-
   minifier = jsmin.JavaScriptMinifier()
 
-  source_lines_empty = []
+  module_offset = 0
+  all_sources = []
   for module in modules:
     filename = str(module)
     debugger = filename.endswith('-debugger.js')
@@ -297,50 +326,46 @@
     lines = ExpandMacros(lines, macros)
     Validate(lines, filename)
     lines = minifier.JSMinify(lines)
-    data = ToCArray(lines)
     id = (os.path.split(filename)[1])[:-3]
     if debugger: id = id[:-9]
+    raw_length = len(lines)
     if debugger:
-      debugger_ids.append((id, len(lines)))
+      debugger_ids.append((id, raw_length, module_offset))
     else:
-      ids.append((id, len(lines)))
-    source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
-    source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
+      ids.append((id, raw_length, module_offset))
+    all_sources.append(lines)
+    module_offset += raw_length
+  total_length = raw_total_length = module_offset
+
+  if env['COMPRESSION'] == 'off':
+    raw_sources_declaration = RAW_SOURCES_DECLARATION
+    sources_data = ToCAsciiArray("".join(all_sources))
+  else:
+    raw_sources_declaration = RAW_SOURCES_COMPRESSION_DECLARATION
+    if env['COMPRESSION'] == 'bz2':
+      all_sources = bz2.compress("".join(all_sources))
+    total_length = len(all_sources)
+    sources_data = ToCArray(all_sources)
 
   # Build debugger support functions
   get_index_cases = [ ]
-  get_script_source_cases = [ ]
+  get_raw_script_source_cases = [ ]
   get_script_name_cases = [ ]
 
   i = 0
-  for (id, length) in debugger_ids:
+  for (id, raw_length, module_offset) in debugger_ids + ids:
     native_name = "native %s.js" % id
-    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
-      'id': id,
-      'length': length,
-      'i': i
-    })
-    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
-      'name': native_name,
-      'length': len(native_name),
-      'i': i
-    });
-    i = i + 1
-
-  for (id, length) in ids:
-    native_name = "native %s.js" % id
-    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
-      'id': id,
-      'length': length,
-      'i': i
-    })
-    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
-      'name': native_name,
-      'length': len(native_name),
-      'i': i
-    });
+    get_index_cases.append(GET_INDEX_CASE % { 'id': id, 'i': i })
+    get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % {
+        'offset': module_offset,
+        'raw_length': raw_length,
+        'i': i
+        })
+    get_script_name_cases.append(GET_SCRIPT_NAME_CASE % {
+        'name': native_name,
+        'length': len(native_name),
+        'i': i
+        })
     i = i + 1
 
   # Emit result
@@ -348,33 +373,23 @@
   output.write(HEADER_TEMPLATE % {
     'builtin_count': len(ids) + len(debugger_ids),
     'debugger_count': len(debugger_ids),
-    'source_lines': "\n".join(source_lines),
+    'sources_data': sources_data,
+    'raw_sources_declaration': raw_sources_declaration,
+    'raw_total_length': raw_total_length,
+    'total_length': total_length,
     'get_index_cases': "".join(get_index_cases),
-    'get_script_source_cases': "".join(get_script_source_cases),
+    'get_raw_script_source_cases': "".join(get_raw_script_source_cases),
     'get_script_name_cases': "".join(get_script_name_cases),
     'type': env['TYPE']
   })
   output.close()
 
-  if len(target) > 1:
-    output = open(str(target[1]), "w")
-    output.write(HEADER_TEMPLATE % {
-      'builtin_count': len(ids) + len(debugger_ids),
-      'debugger_count': len(debugger_ids),
-      'source_lines': "\n".join(source_lines_empty),
-      'get_index_cases': "".join(get_index_cases),
-      'get_script_source_cases': "".join(get_script_source_cases),
-      'get_script_name_cases': "".join(get_script_name_cases),
-      'type': env['TYPE']
-    })
-    output.close()
-
 def main():
   natives = sys.argv[1]
-  natives_empty = sys.argv[2]
-  type = sys.argv[3]
+  type = sys.argv[2]
+  compression = sys.argv[3]
   source_files = sys.argv[4:]
-  JS2C(source_files, [natives, natives_empty], { 'TYPE': type })
+  JS2C(source_files, [natives], { 'TYPE': type, 'COMPRESSION': compression })
 
 if __name__ == "__main__":
   main()
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index 9789697..0b0a1fb 100755
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -8,12 +8,10 @@
 [ "$D8_PATH" ] || D8_PATH=$tools_path/..
 d8_exec=$D8_PATH/d8
 
-if [ "$1" = "--no-build" ]; then
-  shift
-else
-# compile d8 if it doesn't exist, assuming this script
-# resides in the repository.
-  [ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
+if [ ! -x $d8_exec ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'scons <flags> d8' from the V8 directory"
+  exit 1
 fi
 
 
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 7f12c13..58cbb95 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -124,7 +124,7 @@
       self.callee_ticks = collections.defaultdict(lambda: 0)
     self.callee_ticks[callee] += 1
 
-  def PrintAnnotated(self, code_info, options):
+  def PrintAnnotated(self, arch, options):
     if self.self_ticks_map is None:
       ticks_map = []
     else:
@@ -135,7 +135,7 @@
     ticks_offsets = [t[0] for t in ticks_map]
     ticks_counts = [t[1] for t in ticks_map]
     # Get a list of disassembled lines and their addresses.
-    lines = self._GetDisasmLines(code_info, options)
+    lines = self._GetDisasmLines(arch, options)
     if len(lines) == 0:
       return
     # Print annotated lines.
@@ -174,17 +174,17 @@
       self.end_address - self.start_address,
       self.origin)
 
-  def _GetDisasmLines(self, code_info, options):
+  def _GetDisasmLines(self, arch, options):
     if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
       inplace = False
-      filename = options.log + ".code"
+      filename = options.log + ".ll"
     else:
       inplace = True
       filename = self.origin
     return disasm.GetDisasmLines(filename,
                                  self.origin_offset,
                                  self.end_address - self.start_address,
-                                 code_info.arch,
+                                 arch,
                                  inplace)
 
 
@@ -304,76 +304,102 @@
     self.header_size = header_size
 
 
-class CodeLogReader(object):
-  """V8 code event log reader."""
+class SnapshotLogReader(object):
+  """V8 snapshot log reader."""
 
-  _CODE_INFO_RE = re.compile(
-    r"code-info,([^,]+),(\d+)")
+  _SNAPSHOT_CODE_NAME_RE = re.compile(
+    r"snapshot-code-name,(\d+),\"(.*)\"")
 
-  _CODE_CREATE_RE = re.compile(
-    r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(0x[a-f0-9]+),([~*])?)?(?:,(\d+))?")
+  def __init__(self, log_name):
+    self.log_name = log_name
 
-  _CODE_MOVE_RE = re.compile(
-    r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)")
+  def ReadNameMap(self):
+    log = open(self.log_name, "r")
+    try:
+      snapshot_pos_to_name = {}
+      for line in log:
+        match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
+        if match:
+          pos = int(match.group(1))
+          name = match.group(2)
+          snapshot_pos_to_name[pos] = name
+    finally:
+      log.close()
+    return snapshot_pos_to_name
 
-  _CODE_DELETE_RE = re.compile(
-    r"code-delete,(0x[a-f0-9]+)")
 
-  _SNAPSHOT_POS_RE = re.compile(
-    r"snapshot-pos,(0x[a-f0-9]+),(\d+)")
+class LogReader(object):
+  """V8 low-level (binary) log reader."""
 
-  _CODE_MOVING_GC = "code-moving-gc"
+  _ARCH_TO_POINTER_TYPE_MAP = {
+    "ia32": ctypes.c_uint32,
+    "arm": ctypes.c_uint32,
+    "x64": ctypes.c_uint64
+  }
 
-  def __init__(self, log_name, code_map, is_snapshot, snapshot_pos_to_name):
-    self.log = open(log_name, "r")
+  _CODE_CREATE_TAG = "C"
+  _CODE_MOVE_TAG = "M"
+  _CODE_DELETE_TAG = "D"
+  _SNAPSHOT_POSITION_TAG = "P"
+  _CODE_MOVING_GC_TAG = "G"
+
+  def __init__(self, log_name, code_map, snapshot_pos_to_name):
+    self.log_file = open(log_name, "r")
+    self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
+    self.log_pos = 0
     self.code_map = code_map
-    self.is_snapshot = is_snapshot
     self.snapshot_pos_to_name = snapshot_pos_to_name
     self.address_to_snapshot_name = {}
 
-  def ReadCodeInfo(self):
-    line = self.log.readline() or ""
-    match = CodeLogReader._CODE_INFO_RE.match(line)
-    assert match, "No code info in log"
-    return CodeInfo(arch=match.group(1), header_size=int(match.group(2)))
+    self.arch = self.log[:self.log.find("\0")]
+    self.log_pos += len(self.arch) + 1
+    assert self.arch in LogReader._ARCH_TO_POINTER_TYPE_MAP, \
+        "Unsupported architecture %s" % self.arch
+    pointer_type = LogReader._ARCH_TO_POINTER_TYPE_MAP[self.arch]
 
-  def ReadUpToGC(self, code_info):
-    made_progress = False
-    code_header_size = code_info.header_size
-    while True:
-      line = self.log.readline()
-      if not line:
-        return made_progress
-      made_progress = True
+    self.code_create_struct = LogReader._DefineStruct([
+        ("name_size", ctypes.c_int32),
+        ("code_address", pointer_type),
+        ("code_size", ctypes.c_int32)])
 
-      if line.startswith(CodeLogReader._CODE_MOVING_GC):
+    self.code_move_struct = LogReader._DefineStruct([
+        ("from_address", pointer_type),
+        ("to_address", pointer_type)])
+
+    self.code_delete_struct = LogReader._DefineStruct([
+        ("address", pointer_type)])
+
+    self.snapshot_position_struct = LogReader._DefineStruct([
+        ("address", pointer_type),
+        ("position", ctypes.c_int32)])
+
+  def ReadUpToGC(self):
+    while self.log_pos < self.log.size():
+      tag = self.log[self.log_pos]
+      self.log_pos += 1
+
+      if tag == LogReader._CODE_MOVING_GC_TAG:
         self.address_to_snapshot_name.clear()
-        return made_progress
+        return
 
-      match = CodeLogReader._CODE_CREATE_RE.match(line)
-      if match:
-        start_address = int(match.group(2), 16) + code_header_size
-        end_address = start_address + int(match.group(3)) - code_header_size
+      if tag == LogReader._CODE_CREATE_TAG:
+        event = self.code_create_struct.from_buffer(self.log, self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        start_address = event.code_address
+        end_address = start_address + event.code_size
         if start_address in self.address_to_snapshot_name:
           name = self.address_to_snapshot_name[start_address]
           origin = JS_SNAPSHOT_ORIGIN
         else:
-          tag = match.group(1)
-          optimization_status = match.group(6)
-          func_name = match.group(4)
-          if optimization_status:
-            name = "%s:%s%s" % (tag, optimization_status, func_name)
-          else:
-            name = "%s:%s" % (tag, func_name)
+          name = self.log[self.log_pos:self.log_pos + event.name_size]
           origin = JS_ORIGIN
-        if self.is_snapshot:
-          origin_offset = 0
-        else:
-          origin_offset = int(match.group(7))
+        self.log_pos += event.name_size
+        origin_offset = self.log_pos
+        self.log_pos += event.code_size
         code = Code(name, start_address, end_address, origin, origin_offset)
         conficting_code = self.code_map.Find(start_address)
         if conficting_code:
-          CodeLogReader._HandleCodeConflict(conficting_code, code)
+          LogReader._HandleCodeConflict(conficting_code, code)
           # TODO(vitalyr): this warning is too noisy because of our
           # attempts to reconstruct code log from the snapshot.
           # print >>sys.stderr, \
@@ -382,10 +408,11 @@
         self.code_map.Add(code)
         continue
 
-      match = CodeLogReader._CODE_MOVE_RE.match(line)
-      if match:
-        old_start_address = int(match.group(1), 16) + code_header_size
-        new_start_address = int(match.group(2), 16) + code_header_size
+      if tag == LogReader._CODE_MOVE_TAG:
+        event = self.code_move_struct.from_buffer(self.log, self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        old_start_address = event.from_address
+        new_start_address = event.to_address
         if old_start_address == new_start_address:
           # Skip useless code move entries.
           continue
@@ -402,9 +429,10 @@
         self.code_map.Add(code)
         continue
 
-      match = CodeLogReader._CODE_DELETE_RE.match(line)
-      if match:
-        old_start_address = int(match.group(1), 16) + code_header_size
+      if tag == LogReader._CODE_DELETE_TAG:
+        event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        old_start_address = event.address
         code = self.code_map.Find(old_start_address)
         if not code:
           print >>sys.stderr, "Warning: Not found %x" % old_start_address
@@ -414,40 +442,36 @@
         self.code_map.Remove(code)
         continue
 
-      match = CodeLogReader._SNAPSHOT_POS_RE.match(line)
-      if match:
-        start_address = int(match.group(1), 16) + code_header_size
-        snapshot_pos = int(match.group(2))
-        if self.is_snapshot:
-          code = self.code_map.Find(start_address)
-          if code:
-            assert code.start_address == start_address, \
-                "Inexact snapshot address %x for %s" % (start_address, code)
-            self.snapshot_pos_to_name[snapshot_pos] = code.name
-        else:
-          if snapshot_pos in self.snapshot_pos_to_name:
-            self.address_to_snapshot_name[start_address] = \
-                self.snapshot_pos_to_name[snapshot_pos]
+      if tag == LogReader._SNAPSHOT_POSITION_TAG:
+        event = self.snapshot_position_struct.from_buffer(self.log,
+                                                          self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        start_address = event.address
+        snapshot_pos = event.position
+        if snapshot_pos in self.snapshot_pos_to_name:
+          self.address_to_snapshot_name[start_address] = \
+              self.snapshot_pos_to_name[snapshot_pos]
+        continue
+
+      assert False, "Unknown tag %s" % tag
 
   def Dispose(self):
     self.log.close()
+    self.log_file.close()
+
+  @staticmethod
+  def _DefineStruct(fields):
+    class Struct(ctypes.Structure):
+      _fields_ = fields
+    return Struct
 
   @staticmethod
   def _HandleCodeConflict(old_code, new_code):
     assert (old_code.start_address == new_code.start_address and
             old_code.end_address == new_code.end_address), \
         "Conficting code log entries %s and %s" % (old_code, new_code)
-    CodeLogReader._UpdateNames(old_code, new_code)
-
-  @staticmethod
-  def _UpdateNames(old_code, new_code):
     if old_code.name == new_code.name:
       return
-    # Kludge: there are code objects with custom names that don't
-    # match their flags.
-    misnamed_code = set(["Builtin:CpuFeatures::Probe"])
-    if old_code.name in misnamed_code:
-      return
     # Code object may be shared by a few functions. Collect the full
     # set of names.
     old_code.AddName(new_code.name)
@@ -607,10 +631,10 @@
   def ReadMmap(self, header, offset):
     mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
                                                offset + self.header_size)
-    # Read null-padded filename.
+    # Read null-terminated filename.
     filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
-                          offset + header.size].rstrip(chr(0))
-    mmap_info.filename = filename
+                          offset + header.size]
+    mmap_info.filename = filename[:filename.find(chr(0))]
     return mmap_info
 
   def ReadSample(self, header, offset):
@@ -756,20 +780,24 @@
     return True
 
 
-def PrintReport(code_map, library_repo, code_info, options):
+def PrintReport(code_map, library_repo, arch, ticks, options):
   print "Ticks per symbol:"
   used_code = [code for code in code_map.UsedCode()]
   used_code.sort(key=lambda x: x.self_ticks, reverse=True)
   for i, code in enumerate(used_code):
-    print "%10d %s [%s]" % (code.self_ticks, code.FullName(), code.origin)
+    code_ticks = code.self_ticks
+    print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
+                                    code.FullName(), code.origin)
     if options.disasm_all or i < options.disasm_top:
-      code.PrintAnnotated(code_info, options)
+      code.PrintAnnotated(arch, options)
   print
   print "Ticks per library:"
   mmap_infos = [m for m in library_repo.infos]
   mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
   for mmap_info in mmap_infos:
-    print "%10d %s" % (mmap_info.ticks, mmap_info.unique_name)
+    mmap_ticks = mmap_info.ticks
+    print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
+                               mmap_info.unique_name)
 
 
 def PrintDot(code_map, options):
@@ -825,11 +853,11 @@
 
   if not options.quiet:
     if options.snapshot:
-      print "V8 logs: %s, %s, %s.code" % (options.snapshot_log,
-                                          options.log,
-                                          options.log)
+      print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
+                                        options.log,
+                                        options.log)
     else:
-      print "V8 log: %s, %s.code (no snapshot)" % (options.log, options.log)
+      print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
     print "Perf trace file: %s" % options.trace
 
   # Stats.
@@ -840,30 +868,25 @@
   mmap_time = 0
   sample_time = 0
 
-  # Initialize the log reader and get the code info.
-  code_map = CodeMap()
-  snapshot_name_map = {}
-  log_reader = CodeLogReader(log_name=options.log,
-                             code_map=code_map,
-                             is_snapshot=False,
-                             snapshot_pos_to_name=snapshot_name_map)
-  code_info = log_reader.ReadCodeInfo()
-  if not options.quiet:
-    print "Generated code architecture: %s" % code_info.arch
-    print
-
   # Process the snapshot log to fill the snapshot name map.
+  snapshot_name_map = {}
   if options.snapshot:
-    snapshot_log_reader = CodeLogReader(log_name=options.snapshot_log,
-                                        code_map=CodeMap(),
-                                        is_snapshot=True,
-                                        snapshot_pos_to_name=snapshot_name_map)
-    while snapshot_log_reader.ReadUpToGC(code_info):
-      pass
+    snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
+    snapshot_name_map = snapshot_log_reader.ReadNameMap()
+
+  # Initialize the log reader.
+  code_map = CodeMap()
+  log_reader = LogReader(log_name=options.log + ".ll",
+                         code_map=code_map,
+                         snapshot_pos_to_name=snapshot_name_map)
+  if not options.quiet:
+    print "Generated code architecture: %s" % log_reader.arch
+    print
+    sys.stdout.flush()
 
   # Process the code and trace logs.
   library_repo = LibraryRepo()
-  log_reader.ReadUpToGC(code_info)
+  log_reader.ReadUpToGC()
   trace_reader = TraceReader(options.trace)
   while True:
     header, offset = trace_reader.ReadEventHeader()
@@ -874,7 +897,7 @@
       start = time.time()
       mmap_info = trace_reader.ReadMmap(header, offset)
       if mmap_info.filename == V8_GC_FAKE_MMAP:
-        log_reader.ReadUpToGC(code_info)
+        log_reader.ReadUpToGC()
       else:
         library_repo.Load(mmap_info, code_map, options)
       mmap_time += time.time() - start
@@ -901,7 +924,7 @@
   if options.dot:
     PrintDot(code_map, options)
   else:
-    PrintReport(code_map, library_repo, code_info, options)
+    PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
 
     if not options.quiet:
       print
diff --git a/tools/oom_dump/README b/tools/oom_dump/README
index 0be7511..1d840b9 100644
--- a/tools/oom_dump/README
+++ b/tools/oom_dump/README
@@ -16,7 +16,9 @@
 
 Next step is to build v8.  Note: you should build x64 version of v8,
 if you're on 64-bit platform, otherwise you would get a link error when
-building oom_dump.
+building oom_dump.  Also, if you are testing against an older version of chrome
+you should build the corresponding version of V8 to make sure that the type-id 
+enum have the correct values.
 
 The last step is to build oom_dump itself.  The following command should work:
 
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 1d80f92..fda7ba9 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -42,6 +42,7 @@
 import re
 import sys
 import subprocess
+from subprocess import PIPE
 
 # Disabled LINT rules and reason.
 # build/include_what_you_use: Started giving false positives for variables
@@ -88,7 +89,6 @@
 whitespace/braces
 whitespace/comma
 whitespace/comments
-whitespace/end_of_line
 whitespace/ending_newline
 whitespace/indent
 whitespace/labels
@@ -231,11 +231,29 @@
 
 class SourceProcessor(SourceFileProcessor):
   """
-  Check that all files include a copyright notice.
+  Check that all files include a copyright notice and no trailing whitespaces.
   """
 
   RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
-      'SConstruct', '.status']
+      'SConstruct', '.status', '.gyp', '.gypi']
+
+  # Overwriting the one in the parent class.
+  def FindFilesIn(self, path):
+    if os.path.exists(path+'/.git'):
+      output = subprocess.Popen('git ls-files --full-name',
+                                stdout=PIPE, cwd=path, shell=True)
+      result = []
+      for file in output.stdout.read().split():
+        for dir_part in os.path.dirname(file).split(os.sep):
+          if self.IgnoreDir(dir_part):
+            break
+        else:
+          if self.IsRelevant(file) and not self.IgnoreFile(file):
+            result.append(join(path, file))
+      if output.wait() == 0:
+        return result
+    return super(SourceProcessor, self).FindFilesIn(path)
+
   def IsRelevant(self, name):
     for ext in SourceProcessor.RELEVANT_EXTENSIONS:
       if name.endswith(ext):
@@ -248,12 +266,19 @@
   def IgnoreDir(self, name):
     return (super(SourceProcessor, self).IgnoreDir(name)
               or (name == 'third_party')
+              or (name == 'gyp')
+              or (name == 'out')
               or (name == 'obj'))
 
-  IGNORE_COPYRIGHTS = ['earley-boyer.js', 'raytrace.js', 'crypto.js',
-      'libraries.cc', 'libraries-empty.cc', 'jsmin.py', 'regexp-pcre.js']
-  IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js',
-      'html-comments.js']
+  IGNORE_COPYRIGHTS = ['cpplint.py',
+                       'earley-boyer.js',
+                       'raytrace.js',
+                       'crypto.js',
+                       'libraries.cc',
+                       'libraries-empty.cc',
+                       'jsmin.py',
+                       'regexp-pcre.js']
+  IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
 
   def ProcessContents(self, name, contents):
     result = True
@@ -266,17 +291,37 @@
       if not COPYRIGHT_HEADER_PATTERN.search(contents):
         print "%s is missing a correct copyright header." % name
         result = False
+    ext = base.split('.').pop()
+    if ' \n' in contents or contents.endswith(' '):
+      line = 0
+      lines = []
+      parts = contents.split(' \n')
+      if not contents.endswith(' '):
+        parts.pop()
+      for part in parts:
+        line += part.count('\n') + 1
+        lines.append(str(line))
+      linenumbers = ', '.join(lines)
+      if len(lines) > 1:
+        print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
+      else:
+        print "%s has trailing whitespaces in line %s." % (name, linenumbers)
+      result = False
     return result
 
   def ProcessFiles(self, files, path):
     success = True
+    violations = 0
     for file in files:
       try:
         handle = open(file)
         contents = handle.read()
-        success = self.ProcessContents(file, contents) and success
+        if not self.ProcessContents(file, contents):
+          success = False
+          violations += 1
       finally:
         handle.close()
+    print "Total violating files: %s" % violations
     return success
 
 
@@ -292,8 +337,10 @@
   parser = GetOptions()
   (options, args) = parser.parse_args()
   success = True
+  print "Running C++ lint check..."
   if not options.no_lint:
     success = CppLintProcessor().Run(workspace) and success
+  print "Running copyright header and trailing whitespaces check..."
   success = SourceProcessor().Run(workspace) and success
   if success:
     return 0
diff --git a/tools/process-heap-prof.py b/tools/process-heap-prof.py
index 6a2c397..a26cbf1 100755
--- a/tools/process-heap-prof.py
+++ b/tools/process-heap-prof.py
@@ -47,7 +47,7 @@
     itemname = 'heap-js-cons-item'
   else:
     itemname = 'heap-sample-item'
-  
+
   first_call_time = None
   sample_time = 0.0
   sampling = False
@@ -108,11 +108,11 @@
   if not ProcessOptions(options):
     parser.print_help()
     sys.exit();
-  
+
   if not args:
     print "Missing logfile"
     sys.exit();
-    
+
   ProcessLogFile(args[0], options)
 
 
diff --git a/tools/profile.js b/tools/profile.js
index c9c9437..10a07f8 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -162,8 +162,16 @@
     // Function object has been overwritten with a new one.
     func.name = name;
   }
-  var entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
-  this.codeMap_.addCode(start, entry);
+  var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (entry) {
+    if (entry.size === size && entry.func === func) {
+      // Entry state has changed.
+      entry.state = state;
+    }
+  } else {
+    entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
+    this.codeMap_.addCode(start, entry);
+  }
   return entry;
 };
 
@@ -374,6 +382,31 @@
 
 
 /**
+ * Cleans up function entries that are not referenced by code entries.
+ */
+Profile.prototype.cleanUpFuncEntries = function() {
+  var referencedFuncEntries = [];
+  var entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if (entries[i][1].constructor === Profile.FunctionEntry) {
+      entries[i][1].used = false;
+    }
+  }
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if ("func" in entries[i][1]) {
+      entries[i][1].func.used = true;
+    }
+  }
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if (entries[i][1].constructor === Profile.FunctionEntry &&
+        !entries[i][1].used) {
+      this.codeMap_.deleteCode(entries[i][0]);
+    }
+  }
+};
+
+
+/**
  * Creates a dynamic code entry.
  *
  * @param {number} size Code size.
@@ -408,6 +441,11 @@
 };
 
 
+Profile.DynamicCodeEntry.prototype.toString = function() {
+  return this.getName() + ': ' + this.size.toString(16);
+};
+
+
 /**
  * Creates a dynamic code entry.
  *
@@ -448,6 +486,11 @@
 };
 
 
+Profile.DynamicFuncCodeEntry.prototype.toString = function() {
+  return this.getName() + ': ' + this.size.toString(16);
+};
+
+
 /**
  * Creates a shared function object entry.
  *
@@ -473,6 +516,7 @@
   return name;
 };
 
+Profile.FunctionEntry.prototype.toString = CodeMap.CodeEntry.prototype.toString;
 
 /**
  * Constructs a call graph.
diff --git a/tools/push-to-trunk.sh b/tools/push-to-trunk.sh
new file mode 100755
index 0000000..761b733
--- /dev/null
+++ b/tools/push-to-trunk.sh
@@ -0,0 +1,431 @@
+#!/bin/bash
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+########## Global variable definitions
+
+BRANCHNAME=prepare-push
+TRUNKBRANCH=trunk-push
+TEMP_BRANCH=v8-push-to-trunk-script-temporary-branch
+VERSION_FILE="src/version.cc"
+PERSISTFILE_BASENAME=/tmp/v8-push-to-trunk-tempfile
+CHANGELOG_ENTRY_FILE="$PERSISTFILE_BASENAME-changelog-entry"
+PATCH_FILE="$PERSISTFILE_BASENAME-patch"
+COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg"
+TOUCHED_FILES_FILE="$PERSISTFILE_BASENAME-touched-files"
+STEP=0
+
+
+########## Function definitions
+
+usage() {
+cat << EOF
+usage: $0 OPTIONS
+
+Performs the necessary steps for a V8 push to trunk. Only works for \
+git checkouts.
+
+OPTIONS:
+  -h    Show this message
+  -s    Specify the step where to start work. Default: 0.
+  -l    Manually specify the git commit ID of the last push to trunk.
+EOF
+}
+
+die() {
+  [[ -n "$1" ]] && echo "Error: $1"
+  echo "Exiting."
+  exit 1
+}
+
+confirm() {
+  echo -n "$1 [Y/n] "
+  read ANSWER
+  if [[ -z "$ANSWER" || "$ANSWER" == "Y" || "$ANSWER" == "y" ]] ; then
+    return 0
+  else
+    return 1
+  fi
+}
+
+delete_branch() {
+  local MATCH=$(git branch | grep $1 | awk '{print $NF}' )
+  if [ "$MATCH" == "$1" ] ; then
+    confirm "Branch $1 exists, do you want to delete it?"
+    if [ $? -eq 0 ] ; then
+      git branch -D $1 || die "Deleting branch '$1' failed."
+      echo "Branch $1 deleted."
+    else
+      die "Can't continue. Please delete branch $1 and try again."
+    fi
+  fi
+}
+
+# Persist and restore variables to support canceling/resuming execution
+# of this script.
+persist() {
+  local VARNAME=$1
+  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
+  echo "${!VARNAME}" > $FILE
+}
+
+restore() {
+  local VARNAME=$1
+  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
+  local VALUE="$(cat $FILE)"
+  eval "$VARNAME=\"$VALUE\""
+}
+
+restore_if_unset() {
+  local VARNAME=$1
+  [[ -z "${!VARNAME}" ]] && restore "$VARNAME"
+  [[ -z "${!VARNAME}" ]] && die "Variable '$VARNAME' could not be restored."
+}
+
+
+########## Option parsing
+
+while getopts ":hs:l:" OPTION ; do
+  case $OPTION in
+    h)  usage
+        exit 0
+        ;;
+    s)  STEP=$OPTARG
+        ;;
+    l)  LASTPUSH=$OPTARG
+        ;;
+    ?)  echo "Illegal option: -$OPTARG"
+        usage
+        exit 1
+        ;;
+  esac
+done
+
+
+########## Regular workflow
+
+# Cancel if this is not a git checkout.
+[[ -d .git ]] \
+  || die "This is not a git checkout, this script won't work for you."
+
+# Cancel if EDITOR is unset or not executable.
+[[ -n "$EDITOR" && -x "$(which $EDITOR)" ]] \
+  || die "Please set your EDITOR environment variable, you'll need it."
+
+if [ $STEP -le 0 ] ; then
+  echo ">>> Step 0: Preparation"
+  # Check for a clean workdir.
+  [[ -z "$(git status -s -uno)" ]] \
+    || die "Workspace is not clean. Please commit or undo your changes."
+
+  # Persist current branch.
+  CURRENT_BRANCH=$(git status -s -b -uno | grep "^##" | awk '{print $2}')
+  persist "CURRENT_BRANCH"
+  # Get ahold of a safe temporary branch and check it out.
+  if [ "$CURRENT_BRANCH" != "$TEMP_BRANCH" ] ; then
+    delete_branch $TEMP_BRANCH
+    git checkout -b $TEMP_BRANCH
+  fi
+  # Delete branches if they exist.
+  delete_branch $BRANCHNAME
+  delete_branch $TRUNKBRANCH
+fi
+
+if [ $STEP -le 1 ] ; then
+  echo ">>> Step 1: Fetch unfetched revisions."
+  git svn fetch || die "'git svn fetch' failed."
+fi
+
+if [ $STEP -le 2 ] ; then
+  echo ">>> Step 2: Create a fresh branch."
+  git checkout -b $BRANCHNAME svn/bleeding_edge \
+    || die "Creating branch $BRANCHNAME failed."
+fi
+
+if [ $STEP -le 3 ] ; then
+  echo ">>> Step 3: Detect commit ID of last push to trunk."
+  [[ -n "$LASTPUSH" ]] || LASTPUSH=$(git log -1 --format=%H ChangeLog)
+  LOOP=1
+  while [ $LOOP -eq 1 ] ; do
+    # Print assumed commit, circumventing git's pager.
+    git log -1 $LASTPUSH | cat
+    confirm "Is the commit printed above the last push to trunk?"
+    if [ $? -eq 0 ] ; then
+      LOOP=0
+    else
+      LASTPUSH=$(git log -1 --format=%H $LASTPUSH^ ChangeLog)
+    fi
+  done
+  persist "LASTPUSH"
+fi
+
+if [ $STEP -le 4 ] ; then
+  echo ">>> Step 4: Prepare raw ChangeLog entry."
+# These version numbers are used again later for the trunk commit.
+  MAJOR=$(grep "#define MAJOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
+  persist "MAJOR"
+  MINOR=$(grep "#define MINOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
+  persist "MINOR"
+  BUILD=$(grep "#define BUILD_NUMBER" "$VERSION_FILE" | awk '{print $NF}')
+  persist "BUILD"
+
+  DATE=$(date +%Y-%m-%d)
+  persist "DATE"
+  echo "$DATE: Version $MAJOR.$MINOR.$BUILD" > "$CHANGELOG_ENTRY_FILE"
+  echo "" >> "$CHANGELOG_ENTRY_FILE"
+  COMMITS=$(git log $LASTPUSH..HEAD --format=%H)
+  for commit in $COMMITS ; do
+    # Get the commit's title line.
+    git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
+    # Grep for "BUG=xxxx" lines in the commit message.
+    git log -1 $commit --format="%b" | grep BUG= | grep -v "BUG=$" \
+                                     | sed -e 's/^/        /' \
+                                     >> "$CHANGELOG_ENTRY_FILE"
+    # Append the commit's author for reference.
+    git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
+    echo "" >> "$CHANGELOG_ENTRY_FILE"
+  done
+fi
+
+if [ $STEP -le 5 ] ; then
+  echo ">>> Step 5: Edit ChangeLog entry."
+  echo -n "Please press <Return> to have your EDITOR open the ChangeLog entry, \
+then edit its contents to your liking. When you're done, save the file and \
+exit your EDITOR. "
+  read ANSWER
+  $EDITOR "$CHANGELOG_ENTRY_FILE"
+  NEWCHANGELOG=$(mktemp)
+  # Eliminate any trailing newlines by going through a shell variable.
+  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE")
+  [[ -n "$CHANGELOGENTRY" ]] || die "Empty ChangeLog entry."
+  echo "$CHANGELOGENTRY" > "$NEWCHANGELOG"
+  echo "" >> "$NEWCHANGELOG" # Explicitly insert two empty lines.
+  echo "" >> "$NEWCHANGELOG"
+  cat ChangeLog >> "$NEWCHANGELOG"
+  mv "$NEWCHANGELOG" ChangeLog
+fi
+
+if [ $STEP -le 6 ] ; then
+  echo ">>> Step 6: Increment version number."
+  restore_if_unset "BUILD"
+  NEWBUILD=$(($BUILD + 1))
+  confirm "Automatically increment BUILD_NUMBER? (Saying 'n' will fire up \
+your EDITOR on $VERSION_FILE so you can make arbitrary changes. When \
+you're done, save the file and exit your EDITOR.)"
+  if [ $? -eq 0 ] ; then
+    sed -e "/#define BUILD_NUMBER/s/[0-9]*$/$NEWBUILD/" \
+        -i "$VERSION_FILE"
+  else
+    $EDITOR "$VERSION_FILE"
+  fi
+  NEWMAJOR=$(grep "#define MAJOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
+  persist "NEWMAJOR"
+  NEWMINOR=$(grep "#define MINOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
+  persist "NEWMINOR"
+  NEWBUILD=$(grep "#define BUILD_NUMBER" "$VERSION_FILE" | awk '{print $NF}')
+  persist "NEWBUILD"
+fi
+
+if [ $STEP -le 7 ] ; then
+  echo ">>> Step 7: Commit to local branch."
+  restore_if_unset "NEWMAJOR"
+  restore_if_unset "NEWMINOR"
+  restore_if_unset "NEWBUILD"
+  git commit -a -m "Prepare push to trunk.  \
+Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD." \
+    || die "'git commit -a' failed."
+fi
+
+if [ $STEP -le 8 ] ; then
+  echo ">>> Step 8: Upload for code review."
+  echo -n "Please enter the email address of a V8 reviewer for your patch: "
+  read REVIEWER
+  git cl upload -r $REVIEWER --send-mail \
+    || die "'git cl upload' failed, please try again."
+fi
+
+if [ $STEP -le 9 ] ; then
+  echo ">>> Step 9: Commit to the repository."
+  echo "Please wait for an LGTM, then type \"LGTM<Return>\" to commit your \
+change. (If you need to iterate on the patch, do so in another shell.)"
+  unset ANSWER
+  while [ "$ANSWER" != "LGTM" ] ; do
+    [[ -n "$ANSWER" ]] && echo "That was not 'LGTM'."
+    echo -n "> "
+    read ANSWER
+  done
+  # Re-read the ChangeLog entry (to pick up possible changes).
+  cat ChangeLog | awk --posix '{
+    if ($0 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}:/) {
+      if (in_firstblock == 1) {
+        exit 0;
+      } else {
+        in_firstblock = 1;
+      }
+    };
+    print $0;
+  }' > "$CHANGELOG_ENTRY_FILE"
+  git cl dcommit || die "'git cl dcommit' failed, please try again."
+fi
+
+if [ $STEP -le 10 ] ; then
+  echo ">>> Step 10: NOP"
+  # Present in the manual guide, not necessary (even harmful!) for this script.
+fi
+
+if [ $STEP -le 11 ] ; then
+  echo ">>> Step 11: Squash commits into one."
+  # Instead of relying on "git rebase -i", we'll just create a diff, because
+  # that's easier to automate.
+  git diff svn/trunk > "$PATCH_FILE"
+  # Convert the ChangeLog entry to commit message format:
+  # - remove date
+  # - remove indentation
+  # - merge paragraphs into single long lines, keeping empty lines between them.
+  restore_if_unset "DATE"
+  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE")
+  echo "$CHANGELOGENTRY" \
+    | sed -e "s/^$DATE: //" \
+    | sed -e 's/^ *//' \
+    | awk '{
+        if (need_space == 1) {
+          printf(" ");
+        };
+        printf("%s", $0);
+        if ($0 ~ /^$/) {
+          printf("\n\n");
+          need_space = 0;
+        } else {
+          need_space = 1;
+        }
+      }' > "$COMMITMSG_FILE" || die "Commit message editing failed."
+  LOOP=1
+  while [ $LOOP -eq 1 ] ; do
+    echo "This is the trunk commit message:"
+    echo "--------------------"
+    cat "$COMMITMSG_FILE"
+    echo -e "\n--------------------"
+    confirm "Does this look good to you? (Saying 'n' will fire up your \
+EDITOR so you can change the commit message. When you're done, save the \
+file and exit your EDITOR.)"
+    if [ $? -eq 0 ] ; then
+      LOOP=0
+    else
+      $EDITOR "$COMMITMSG_FILE"
+    fi
+  done
+  rm -f "$CHANGELOG_ENTRY_FILE"
+fi
+
+if [ $STEP -le 12 ] ; then
+  echo ">>> Step 12: Create a new branch from trunk."
+  git checkout -b $TRUNKBRANCH svn/trunk \
+    || die "Checking out a new branch '$TRUNKBRANCH' failed."
+fi
+
+if [ $STEP -le 13 ] ; then
+  echo ">>> Step 13: Apply squashed changes."
+  patch -p1 < "$PATCH_FILE" | tee >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
+  [[ $? -eq 0 ]] || die "Applying the patch to trunk failed."
+  # Stage added and modified files.
+  TOUCHED_FILES=$(cat "$TOUCHED_FILES_FILE")
+  for FILE in $TOUCHED_FILES ; do
+    git add "$FILE"
+  done
+  # Stage deleted files.
+  DELETED_FILES=$(git status -s -uno --porcelain | grep "^ D" \
+                                                 | awk '{print $NF}')
+  for FILE in $DELETED_FILES ; do
+    git rm "$FILE"
+  done
+  rm -f "$PATCH_FILE"
+  rm -f "$TOUCHED_FILES_FILE"
+fi
+
+if [ $STEP -le 14 ] ; then
+  echo ">>> Step 14: Set correct version for trunk."
+  restore_if_unset "MAJOR"
+  restore_if_unset "MINOR"
+  restore_if_unset "BUILD"
+  sed -e "/#define MAJOR_VERSION/s/[0-9]*$/$MAJOR/" \
+      -e "/#define MINOR_VERSION/s/[0-9]*$/$MINOR/" \
+      -e "/#define BUILD_NUMBER/s/[0-9]*$/$BUILD/" \
+      -e "/#define PATCH_LEVEL/s/[0-9]*$/0/" \
+      -e "/#define IS_CANDIDATE_VERSION/s/[0-9]*$/0/" \
+      -i "$VERSION_FILE" || die "Patching $VERSION_FILE failed."
+fi
+
+if [ $STEP -le 15 ] ; then
+  echo ">>> Step 15: Commit to local trunk branch."
+  git add "$VERSION_FILE"
+  git commit -F "$COMMITMSG_FILE" || die "'git commit' failed."
+  rm -f "$COMMITMSG_FILE"
+fi
+
+if [ $STEP -le 16 ] ; then
+  echo ">>> Step 16: Sanity check."
+  confirm "Please check if your local checkout is sane: Inspect $VERSION_FILE, \
+compile, run tests. Do you want to commit this new trunk revision to the \
+repository?"
+  [[ $? -eq 0 ]] || die "Execution canceled."
+fi
+
+if [ $STEP -le 17 ] ; then
+  echo ">>> Step 17. Commit to SVN."
+  git svn dcommit || die "'git svn dcommit' failed."
+fi
+
+if [ $STEP -le 18 ] ; then
+  echo ">>> Step 18: Tag the new revision."
+  restore_if_unset "MAJOR"
+  restore_if_unset "MINOR"
+  restore_if_unset "BUILD"
+  git svn tag $MAJOR.$MINOR.$BUILD -m "Tagging version $MAJOR.$MINOR.$BUILD" \
+    || die "'git svn tag' failed."
+fi
+
+if [ $STEP -le 19 ] ; then
+  echo ">>> Step 19: Cleanup."
+  restore_if_unset "CURRENT_BRANCH"
+  git checkout -f $CURRENT_BRANCH
+  [[ "$TEMP_BRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TEMP_BRANCH
+  [[ "$BRANCHNAME" != "$CURRENT_BRANCH" ]] && git branch -D $BRANCHNAME
+  [[ "$TRUNKBRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TRUNKBRANCH
+fi
+
+if [ $STEP -le 20 ] ; then
+  echo ">>> Step 20: Done!"
+  restore_if_unset "MAJOR"
+  restore_if_unset "MINOR"
+  restore_if_unset "BUILD"
+  echo "Congratulations, you have successfully created the trunk revision \
+$MAJOR.$MINOR.$BUILD. Please don't forget to update the v8rel spreadsheet, \
+and to roll this new version into Chromium."
+  # Clean up all temporary files.
+  rm -f "$PERSISTFILE_BASENAME"*
+fi
diff --git a/tools/splaytree.js b/tools/splaytree.js
index 1c9aab9..d272a9e 100644
--- a/tools/splaytree.js
+++ b/tools/splaytree.js
@@ -191,6 +191,17 @@
 
 
 /**
+ * @return {Array<*>} An array containing all the values of tree's nodes paired
+ *     with keys.
+ */
+SplayTree.prototype.exportKeysAndValues = function() {
+  var result = [];
+  this.traverse_(function(node) { result.push([node.key, node.value]); });
+  return result;
+};
+
+
+/**
  * @return {Array<*>} An array containing all the values of tree's nodes.
  */
 SplayTree.prototype.exportValues = function() {
diff --git a/tools/stats-viewer.py b/tools/stats-viewer.py
index 05cb762..ab8e287 100755
--- a/tools/stats-viewer.py
+++ b/tools/stats-viewer.py
@@ -104,10 +104,12 @@
         sys.exit(1)
       maps_file = open(maps_name, "r")
       try:
-        m = re.search(r"/dev/shm/\S*", maps_file.read())
-        if m is not None and os.path.exists(m.group(0)):
-          self.data_name = m.group(0)
-        else:
+        self.data_name = None
+        for m in re.finditer(r"/dev/shm/\S*", maps_file.read()):
+          if os.path.exists(m.group(0)):
+            self.data_name = m.group(0)
+            break
+        if self.data_name is None:
           print "Can't find counter file in maps for PID %s." % self.data_name
           sys.exit(1)
       finally:
@@ -414,7 +416,8 @@
   individual counters contained in the file."""
 
   _HEADER_SIZE = 4 * 4
-  _NAME_SIZE = 32
+  _COUNTER_NAME_SIZE = 64
+  _THREAD_NAME_SIZE = 32
 
   def __init__(self, data):
     """Create a new instance.
@@ -426,22 +429,23 @@
     self.max_counters = data.IntAt(8)
     self.max_threads = data.IntAt(12)
     self.counter_names_offset = \
-        self._HEADER_SIZE + self.max_threads * (self._NAME_SIZE + 2 * 4)
+        self._HEADER_SIZE + self.max_threads * (self._THREAD_NAME_SIZE + 2 * 4)
     self.counter_values_offset = \
-        self.counter_names_offset + self.max_counters * self._NAME_SIZE
+        self.counter_names_offset + self.max_counters * self._COUNTER_NAME_SIZE
 
   def CountersInUse(self):
     """Return the number of counters in active use."""
     for i in xrange(self.max_counters):
-      if self.data.ByteAt(self.counter_names_offset + i * self._NAME_SIZE) == 0:
+      name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
+      if self.data.ByteAt(name_offset) == 0:
         return i
     return self.max_counters
 
   def Counter(self, i):
     """Return the i'th counter."""
-    return ChromeCounter(self.data,
-                         self.counter_names_offset + i * self._NAME_SIZE,
-                         self.counter_values_offset + i * self.max_threads * 4)
+    name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
+    value_offset = self.counter_values_offset + i * self.max_threads * 4
+    return ChromeCounter(self.data, name_offset, value_offset)
 
 
 def Main(data_file, name_filter):
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
new file mode 100755
index 0000000..ad5449a
--- /dev/null
+++ b/tools/test-wrapper-gypbuild.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# This is a convenience script to run the existing tools/test.py script
+# when using the gyp/make based build.
+# It is intended as a stop-gap rather than a long-term solution.
+
+
+import optparse
+import os
+from os.path import join, dirname, abspath
+import subprocess
+import sys
+
+
+PROGRESS_INDICATORS = ['verbose', 'dots', 'color', 'mono']
+
+
+def BuildOptions():
+  result = optparse.OptionParser()
+
+  # Flags specific to this wrapper script:
+  result.add_option("--arch-and-mode",
+                    help='Architecture and mode in the format "arch.mode"',
+                    default=None)
+  result.add_option("--outdir",
+                    help='Base output directory',
+                    default='out')
+  result.add_option("--no-presubmit",
+                    help='Skip presubmit checks',
+                    default=False, action="store_true")
+
+  # Flags this wrapper script handles itself:
+  result.add_option("-m", "--mode",
+                    help="The test modes in which to run (comma-separated)",
+                    default='release,debug')
+  result.add_option("--arch",
+                    help='The architectures to run tests for (comma-separated)',
+                    default='ia32,x64,arm')
+
+  # Flags that are passed on to the wrapped test.py script:
+  result.add_option("-v", "--verbose", help="Verbose output",
+      default=False, action="store_true")
+  result.add_option("-p", "--progress",
+      help="The style of progress indicator (verbose, dots, color, mono)",
+      choices=PROGRESS_INDICATORS, default="mono")
+  result.add_option("--report", help="Print a summary of the tests to be run",
+      default=False, action="store_true")
+  result.add_option("-s", "--suite", help="A test suite",
+      default=[], action="append")
+  result.add_option("-t", "--timeout", help="Timeout in seconds",
+      default=60, type="int")
+  result.add_option("--snapshot", help="Run the tests with snapshot turned on",
+      default=False, action="store_true")
+  result.add_option("--special-command", default=None)
+  result.add_option("--valgrind", help="Run tests through valgrind",
+      default=False, action="store_true")
+  result.add_option("--cat", help="Print the source of the tests",
+      default=False, action="store_true")
+  result.add_option("--warn-unused", help="Report unused rules",
+      default=False, action="store_true")
+  result.add_option("-j", help="The number of parallel tasks to run",
+      default=1, type="int")
+  result.add_option("--time", help="Print timing information after running",
+      default=False, action="store_true")
+  result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
+        dest="suppress_dialogs", default=True, action="store_true")
+  result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
+        dest="suppress_dialogs", action="store_false")
+  result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
+  result.add_option("--store-unexpected-output",
+      help="Store the temporary JS files from tests that fails",
+      dest="store_unexpected_output", default=True, action="store_true")
+  result.add_option("--no-store-unexpected-output",
+      help="Deletes the temporary JS files from tests that fails",
+      dest="store_unexpected_output", action="store_false")
+  result.add_option("--stress-only",
+                    help="Only run tests with --always-opt --stress-opt",
+                    default=False, action="store_true")
+  result.add_option("--nostress",
+                    help="Don't run crankshaft --always-opt --stress-op test",
+                    default=False, action="store_true")
+  result.add_option("--crankshaft",
+                    help="Run with the --crankshaft flag",
+                    default=False, action="store_true")
+  result.add_option("--shard-count",
+                    help="Split testsuites into this number of shards",
+                    default=1, type="int")
+  result.add_option("--shard-run",
+                    help="Run this shard from the split up tests.",
+                    default=1, type="int")
+  result.add_option("--noprof", help="Disable profiling support",
+                    default=False)
+
+  # Flags present in the original test.py that are unsupported in this wrapper:
+  # -S [-> scons_flags] (we build with gyp/make, not scons)
+  # --no-build (always true)
+  # --build-only (always false)
+  # --build-system (always 'gyp')
+  # --simulator (always true if arch==arm, always false otherwise)
+  # --shell (automatically chosen depending on arch and mode)
+
+  return result
+
+
+def ProcessOptions(options):
+  if options.arch_and_mode != None and options.arch_and_mode != "":
+    tokens = options.arch_and_mode.split(".")
+    options.arch = tokens[0]
+    options.mode = tokens[1]
+  options.mode = options.mode.split(',')
+  for mode in options.mode:
+    if not mode in ['debug', 'release']:
+      print "Unknown mode %s" % mode
+      return False
+  options.arch = options.arch.split(',')
+  for arch in options.arch:
+    if not arch in ['ia32', 'x64', 'arm']:
+      print "Unknown architecture %s" % arch
+      return False
+
+  return True
+
+
+def PassOnOptions(options):
+  result = []
+  if options.verbose:
+    result += ['--verbose']
+  if options.progress != 'mono':
+    result += ['--progress=' + options.progress]
+  if options.report:
+    result += ['--report']
+  if options.suite != []:
+    for suite in options.suite:
+      result += ['--suite=../../test/' + suite]
+  if options.timeout != 60:
+    result += ['--timeout=%s' % options.timeout]
+  if options.snapshot:
+    result += ['--snapshot']
+  if options.special_command:
+    result += ['--special-command=' + options.special_command]
+  if options.valgrind:
+    result += ['--valgrind']
+  if options.cat:
+    result += ['--cat']
+  if options.warn_unused:
+    result += ['--warn-unused']
+  if options.j != 1:
+    result += ['-j%s' % options.j]
+  if options.time:
+    result += ['--time']
+  if not options.suppress_dialogs:
+    result += ['--no-suppress-dialogs']
+  if options.isolates:
+    result += ['--isolates']
+  if not options.store_unexpected_output:
+    result += ['--no-store-unexpected_output']
+  if options.stress_only:
+    result += ['--stress-only']
+  if options.nostress:
+    result += ['--nostress']
+  if options.crankshaft:
+    result += ['--crankshaft']
+  if options.shard_count != 1:
+    result += ['--shard_count=%s' % options.shard_count]
+  if options.shard_run != 1:
+    result += ['--shard_run=%s' % options.shard_run]
+  if options.noprof:
+    result += ['--noprof']
+  return result
+
+
+def Main():
+  parser = BuildOptions()
+  (options, args) = parser.parse_args()
+  if not ProcessOptions(options):
+    parser.print_help()
+    return 1
+
+  workspace = abspath(join(dirname(sys.argv[0]), '..'))
+
+  if not options.no_presubmit:
+    print ">>> running presubmit tests"
+    subprocess.call([workspace + '/tools/presubmit.py'])
+
+  args_for_children = [workspace + '/tools/test.py'] + PassOnOptions(options)
+  args_for_children += ['--no-build', '--build-system=gyp']
+  for arg in args:
+    args_for_children += [arg]
+  returncodes = 0
+  env = os.environ
+
+  for mode in options.mode:
+    for arch in options.arch:
+      print ">>> running tests for %s.%s" % (arch, mode)
+      shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
+      env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
+      shell = shellpath + "/d8"
+      child = subprocess.Popen(' '.join(args_for_children +
+                                        ['--arch=' + arch] +
+                                        ['--mode=' + mode] +
+                                        ['--shell=' + shell]),
+                               shell=True,
+                               cwd=workspace,
+                               env=env)
+      returncodes += child.wait()
+
+  return returncodes
+
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/tools/test.py b/tools/test.py
index 707e725..ecc0062 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -117,6 +117,8 @@
         start = time.time()
         output = case.Run()
         case.duration = (time.time() - start)
+      except BreakNowException:
+        self.terminate = True
       except IOError, e:
         assert self.terminate
         return
@@ -318,6 +320,12 @@
 # --- F r a m e w o r k ---
 # -------------------------
 
+class BreakNowException(Exception):
+  def __init__(self, value):
+    self.value = value
+  def __str__(self):
+    return repr(self.value)
+
 
 class CommandOutput(object):
 
@@ -379,9 +387,12 @@
 
   def Run(self):
     self.BeforeRun()
-    result = "exception"
+    result = None
     try:
       result = self.RunCommand(self.GetCommand())
+    except:
+      self.terminate = True
+      raise BreakNowException("User pressed CTRL+C or IO went wrong")
     finally:
       self.AfterRun(result)
     return result
@@ -423,7 +434,7 @@
              self.output.exit_code != -signal.SIGABRT
 
   def HasTimedOut(self):
-    return self.output.timed_out;
+    return self.output.timed_out
 
   def HasFailed(self):
     execution_failed = self.test.DidFail(self.output)
@@ -451,7 +462,7 @@
   prev_error_mode = SEM_INVALID_VALUE
   try:
     import ctypes
-    prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
+    prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode)
   except ImportError:
     pass
   return prev_error_mode
@@ -459,16 +470,16 @@
 def RunProcess(context, timeout, args, **rest):
   if context.verbose: print "#", " ".join(args)
   popen_args = args
-  prev_error_mode = SEM_INVALID_VALUE;
+  prev_error_mode = SEM_INVALID_VALUE
   if utils.IsWindows():
     popen_args = '"' + subprocess.list2cmdline(args) + '"'
     if context.suppress_dialogs:
       # Try to change the error mode to avoid dialogs on fatal errors. Don't
       # touch any existing error mode flags by merging the existing error mode.
       # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
-      error_mode = SEM_NOGPFAULTERRORBOX;
-      prev_error_mode = Win32SetErrorMode(error_mode);
-      Win32SetErrorMode(error_mode | prev_error_mode);
+      error_mode = SEM_NOGPFAULTERRORBOX
+      prev_error_mode = Win32SetErrorMode(error_mode)
+      Win32SetErrorMode(error_mode | prev_error_mode)
   process = subprocess.Popen(
     shell = utils.IsWindows(),
     args = popen_args,
@@ -516,7 +527,7 @@
       os.unlink(name)
       return
     except OSError, e:
-      retry_count += 1;
+      retry_count += 1
       time.sleep(retry_count * 0.1)
   PrintError("os.unlink() " + str(e))
 
@@ -555,6 +566,13 @@
     return (path[0], path[1:])
 
 
+# Use this to run several variants of the tests, e.g.:
+# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
+VARIANT_FLAGS = [[],
+                 ['--stress-opt', '--always-opt'],
+                 ['--nocrankshaft']]
+
+
 class TestConfiguration(object):
 
   def __init__(self, context, root):
@@ -572,6 +590,11 @@
   def GetTestStatus(self, sections, defs):
     pass
 
+  def VariantFlags(self):
+    return VARIANT_FLAGS
+
+
+
 
 class TestSuite(object):
 
@@ -582,13 +605,6 @@
     return self.name
 
 
-# Use this to run several variants of the tests, e.g.:
-# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[],
-                 ['--stress-opt', '--always-opt'],
-                 ['--nocrankshaft']]
-
-
 class TestRepository(TestSuite):
 
   def __init__(self, path):
@@ -616,12 +632,11 @@
     return self.GetConfiguration(context).GetBuildRequirements()
 
   def AddTestsToList(self, result, current_path, path, context, mode):
-    for v in VARIANT_FLAGS:
+    for v in self.GetConfiguration(context).VariantFlags():
       tests = self.GetConfiguration(context).ListTests(current_path, path, mode, v)
       for t in tests: t.variant_flags = v
       result += tests
 
-
   def GetTestStatus(self, context, sections, defs):
     self.GetConfiguration(context).GetTestStatus(sections, defs)
 
@@ -702,7 +717,12 @@
 
 def RunTestCases(cases_to_run, progress, tasks):
   progress = PROGRESS_INDICATORS[progress](cases_to_run)
-  return progress.Run(tasks)
+  result = 0
+  try:
+    result = progress.Run(tasks)
+  except Exception, e:
+    print "\n", e
+  return result
 
 
 def BuildRequirements(context, requirements, mode, scons_flags):
@@ -1144,6 +1164,7 @@
 
 
 ARCH_GUESS = utils.GuessArchitecture()
+TIMEOUT_DEFAULT = 60;
 
 
 def BuildOptions():
@@ -1161,12 +1182,14 @@
       default=False, action="store_true")
   result.add_option("--build-only", help="Only build requirements, don't run the tests",
       default=False, action="store_true")
+  result.add_option("--build-system", help="Build system in use (scons or gyp)",
+      default='scons')
   result.add_option("--report", help="Print a summary of the tests to be run",
       default=False, action="store_true")
   result.add_option("-s", "--suite", help="A test suite",
       default=[], action="append")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
-      default=60, type="int")
+      default=-1, type="int")
   result.add_option("--arch", help='The architecture to run tests for',
       default='none')
   result.add_option("--snapshot", help="Run the tests with snapshot turned on",
@@ -1188,7 +1211,7 @@
         dest="suppress_dialogs", default=True, action="store_true")
   result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
         dest="suppress_dialogs", action="store_false")
-  result.add_option("--shell", help="Path to V8 shell", default="shell")
+  result.add_option("--shell", help="Path to V8 shell", default="d8")
   result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
   result.add_option("--store-unexpected-output",
       help="Store the temporary JS files from tests that fails",
@@ -1240,6 +1263,12 @@
     if options.arch == 'none':
       options.arch = ARCH_GUESS
     options.scons_flags.append("arch=" + options.arch)
+  # Simulators are slow, therefore allow a longer default timeout.
+  if options.timeout == -1:
+    if options.arch == 'arm' or options.arch == 'mips':
+      options.timeout = 2 * TIMEOUT_DEFAULT;
+    else:
+      options.timeout = TIMEOUT_DEFAULT;
   if options.snapshot:
     options.scons_flags.append("snapshot=on")
   global VARIANT_FLAGS
@@ -1251,16 +1280,30 @@
     if options.special_command:
       options.special_command += " --crankshaft"
     else:
-      options.special_command = "@--crankshaft"
+      options.special_command = "@ --crankshaft"
+  if options.shell.endswith("d8"):
+    if options.special_command:
+      options.special_command += " --test"
+    else:
+      options.special_command = "@ --test"
   if options.noprof:
     options.scons_flags.append("prof=off")
     options.scons_flags.append("profilingsupport=off")
+  if options.build_system == 'gyp':
+    if options.build_only:
+      print "--build-only not supported for gyp, please build manually."
+      options.build_only = False
   return True
 
 
+def DoSkip(case):
+  return (SKIP in case.outcomes) or (SLOW in case.outcomes)
+
+
 REPORT_TEMPLATE = """\
 Total: %(total)i tests
  * %(skipped)4d tests will be skipped
+ * %(timeout)4d tests are expected to timeout sometimes
  * %(nocrash)4d tests are expected to be flaky but not crash
  * %(pass)4d tests are expected to pass
  * %(fail_ok)4d tests are expected to fail that we won't fix
@@ -1272,10 +1315,11 @@
     return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
   def IsFailOk(o):
     return (len(o) == 2) and (FAIL in o) and (OKAY in o)
-  unskipped = [c for c in cases if not SKIP in c.outcomes]
+  unskipped = [c for c in cases if not DoSkip(c)]
   print REPORT_TEMPLATE % {
     'total': len(cases),
     'skipped': len(cases) - len(unskipped),
+    'timeout': len([t for t in unskipped if TIMEOUT in t.outcomes]),
     'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
     'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
     'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
@@ -1339,11 +1383,11 @@
     print "shard-run not a valid number, should be in [1:shard-count]"
     print "defaulting back to running all tests"
     return tests
-  count = 0;
+  count = 0
   shard = []
   for test in tests:
     if count % options.shard_count == options.shard_run - 1:
-      shard.append(test);
+      shard.append(test)
     count += 1
   return shard
 
@@ -1374,6 +1418,9 @@
     run_valgrind = join(workspace, "tools", "run-valgrind.py")
     options.special_command = "python -u " + run_valgrind + " @"
 
+  if options.build_system == 'gyp':
+    SUFFIX['debug'] = ''
+
   shell = abspath(options.shell)
   buildspace = dirname(shell)
 
@@ -1417,7 +1464,8 @@
         'system': utils.GuessOS(),
         'arch': options.arch,
         'simulator': options.simulator,
-        'crankshaft': options.crankshaft
+        'crankshaft': options.crankshaft,
+        'isolates': options.isolates
       }
       test_list = root.ListTests([], path, context, mode, [])
       unclassified_tests += test_list
@@ -1446,15 +1494,14 @@
     for rule in globally_unused_rules:
       print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
 
+  if not options.isolates:
+    all_cases = [c for c in all_cases if not c.TestsIsolates()]
+
   if options.report:
     PrintReport(all_cases)
 
   result = None
-  def DoSkip(case):
-    return SKIP in case.outcomes or SLOW in case.outcomes
   cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
-  if not options.isolates:
-    cases_to_run = [c for c in cases_to_run if not c.TestsIsolates()]
   if len(cases_to_run) == 0:
     print "No tests to run."
     return 0
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 9d6bfb6..5f57835 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -169,17 +169,12 @@
           processor: this.processHeapSampleBegin },
       'heap-sample-end': { parsers: [null, null],
           processor: this.processHeapSampleEnd },
-      'heap-js-prod-item': { parsers: [null, 'var-args'],
-          processor: this.processJSProducer },
       // Ignored events.
       'profiler': null,
       'function-creation': null,
       'function-move': null,
       'function-delete': null,
-      'heap-sample-stats': null,
       'heap-sample-item': null,
-      'heap-js-cons-item': null,
-      'heap-js-ret-item': null,
       // Obsolete row types.
       'code-allocate': null,
       'begin-code-region': null,
@@ -401,17 +396,6 @@
 };
 
 
-TickProcessor.prototype.processJSProducer = function(constructor, stack) {
-  if (!this.currentProducerProfile_) return;
-  if (stack.length == 0) return;
-  var first = stack.shift();
-  var processedStack =
-      this.profile_.resolveAndFilterFuncs_(this.processStack(first, 0, stack));
-  processedStack.unshift(constructor);
-  this.currentProducerProfile_.addPath(processedStack);
-};
-
-
 TickProcessor.prototype.printStatistics = function() {
   print('Statistical profiling result from ' + this.lastLogFileName_ +
         ', (' + this.ticks_.total +
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
deleted file mode 100644
index 77ac2e1..0000000
--- a/tools/v8.xcodeproj/project.pbxproj
+++ /dev/null
@@ -1,2743 +0,0 @@
-// !$*UTF8*$!
-{
-	archiveVersion = 1;
-	classes = {
-	};
-	objectVersion = 45;
-	objects = {
-
-/* Begin PBXAggregateTarget section */
-		7BF891930E73098D000BAF8A /* All */ = {
-			isa = PBXAggregateTarget;
-			buildConfigurationList = 7BF8919F0E7309BE000BAF8A /* Build configuration list for PBXAggregateTarget "All" */;
-			buildPhases = (
-			);
-			dependencies = (
-				89EED40D12B69A0A0075BE1C /* PBXTargetDependency */,
-				7BF891970E73099F000BAF8A /* PBXTargetDependency */,
-				7BF891990E73099F000BAF8A /* PBXTargetDependency */,
-				893988100F2A3647007D5254 /* PBXTargetDependency */,
-				896FD03E0E78D731003DFB6A /* PBXTargetDependency */,
-				896FD0400E78D735003DFB6A /* PBXTargetDependency */,
-				8938A29912D63A680080CDDE /* PBXTargetDependency */,
-				8938A29712D63A680080CDDE /* PBXTargetDependency */,
-				8938A29512D63A680080CDDE /* PBXTargetDependency */,
-			);
-			name = All;
-			productName = All;
-		};
-/* End PBXAggregateTarget section */
-
-/* Begin PBXBuildFile section */
-		8900116C0E71CA2300F91F35 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
-		890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
-		890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */; };
-		890A14020EE9C4B400E49346 /* regexp-macro-assembler-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */; };
-		890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */; };
-		890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */; };
-		8924315C12F8539900906AB2 /* lithium-gap-resolver-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */; };
-		8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8938A2A212D63B630080CDDE /* lithium-x64.cc */; };
-		893988070F2A35FA007D5254 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
-		8939880D0F2A362A007D5254 /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
-		893988160F2A3688007D5254 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; };
-		893988330F2A3B8F007D5254 /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; };
-		893A72240F7B101400303DD2 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
-		893A72250F7B101B00303DD2 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
-		893CCE640E71D83700357A03 /* code-stubs.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1110E719B8F00D62E90 /* code-stubs.cc */; };
-		893E24A812B14B3D0083370F /* bignum-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248612B14B3D0083370F /* bignum-dtoa.cc */; };
-		893E24A912B14B3D0083370F /* bignum.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248812B14B3D0083370F /* bignum.cc */; };
-		893E24AA12B14B3D0083370F /* cached-powers.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248A12B14B3D0083370F /* cached-powers.cc */; };
-		893E24AB12B14B3D0083370F /* deoptimizer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248B12B14B3D0083370F /* deoptimizer.cc */; };
-		893E24AC12B14B3D0083370F /* hydrogen-instructions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248D12B14B3D0083370F /* hydrogen-instructions.cc */; };
-		893E24AD12B14B3D0083370F /* hydrogen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248F12B14B3D0083370F /* hydrogen.cc */; };
-		893E24AE12B14B3D0083370F /* lithium-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249312B14B3D0083370F /* lithium-allocator.cc */; };
-		893E24AF12B14B3D0083370F /* preparse-data.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249512B14B3D0083370F /* preparse-data.cc */; };
-		893E24B112B14B3D0083370F /* preparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249812B14B3D0083370F /* preparser.cc */; };
-		893E24B212B14B3D0083370F /* runtime-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249A12B14B3D0083370F /* runtime-profiler.cc */; };
-		893E24B312B14B3D0083370F /* safepoint-table.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249C12B14B3D0083370F /* safepoint-table.cc */; };
-		893E24B412B14B3D0083370F /* scanner-base.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249E12B14B3D0083370F /* scanner-base.cc */; };
-		893E24B512B14B3D0083370F /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A112B14B3D0083370F /* string-search.cc */; };
-		893E24B612B14B3D0083370F /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A312B14B3D0083370F /* strtod.cc */; };
-		893E24B712B14B3D0083370F /* bignum-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248612B14B3D0083370F /* bignum-dtoa.cc */; };
-		893E24B812B14B3D0083370F /* bignum.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248812B14B3D0083370F /* bignum.cc */; };
-		893E24B912B14B3D0083370F /* cached-powers.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248A12B14B3D0083370F /* cached-powers.cc */; };
-		893E24BA12B14B3D0083370F /* deoptimizer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248B12B14B3D0083370F /* deoptimizer.cc */; };
-		893E24BB12B14B3D0083370F /* hydrogen-instructions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248D12B14B3D0083370F /* hydrogen-instructions.cc */; };
-		893E24BC12B14B3D0083370F /* hydrogen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248F12B14B3D0083370F /* hydrogen.cc */; };
-		893E24BD12B14B3D0083370F /* lithium-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249312B14B3D0083370F /* lithium-allocator.cc */; };
-		893E24BE12B14B3D0083370F /* preparse-data.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249512B14B3D0083370F /* preparse-data.cc */; };
-		893E24C012B14B3D0083370F /* preparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249812B14B3D0083370F /* preparser.cc */; };
-		893E24C112B14B3D0083370F /* runtime-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249A12B14B3D0083370F /* runtime-profiler.cc */; };
-		893E24C212B14B3D0083370F /* safepoint-table.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249C12B14B3D0083370F /* safepoint-table.cc */; };
-		893E24C312B14B3D0083370F /* scanner-base.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249E12B14B3D0083370F /* scanner-base.cc */; };
-		893E24C412B14B3D0083370F /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A112B14B3D0083370F /* string-search.cc */; };
-		893E24C512B14B3D0083370F /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A312B14B3D0083370F /* strtod.cc */; };
-		893E24CC12B14B520083370F /* deoptimizer-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C612B14B510083370F /* deoptimizer-arm.cc */; };
-		893E24CD12B14B520083370F /* lithium-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C712B14B510083370F /* lithium-arm.cc */; };
-		893E24CE12B14B520083370F /* lithium-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C912B14B520083370F /* lithium-codegen-arm.cc */; };
-		893E24D512B14B8A0083370F /* deoptimizer-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */; };
-		893E24D612B14B8A0083370F /* lithium-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */; };
-		893E24D712B14B8A0083370F /* lithium-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D312B14B8A0083370F /* lithium-ia32.cc */; };
-		893E24DC12B14B9F0083370F /* externalize-string-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D812B14B9F0083370F /* externalize-string-extension.cc */; };
-		893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24DA12B14B9F0083370F /* gc-extension.cc */; };
-		893E24DE12B14B9F0083370F /* externalize-string-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D812B14B9F0083370F /* externalize-string-extension.cc */; };
-		893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24DA12B14B9F0083370F /* gc-extension.cc */; };
-		8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
-		8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
-		894599A30F5D8729008DA8FB /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
-		8946827512C26EB700C914BC /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; };
-		8946827612C26EB700C914BC /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; };
-		89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
-		89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
-		894A59E912D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
-		894A59EA12D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
-		894A59EB12D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
-		8956922A12D4ED240072C313 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
-		8956922B12D4ED240072C313 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
-		8956922C12D4ED240072C313 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
-		8956922D12D4ED240072C313 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
-		8956922F12D4ED240072C313 /* assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1030E719B8F00D62E90 /* assembler.cc */; };
-		8956923012D4ED240072C313 /* ast.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1050E719B8F00D62E90 /* ast.cc */; };
-		8956923112D4ED240072C313 /* bootstrapper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1070E719B8F00D62E90 /* bootstrapper.cc */; };
-		8956923312D4ED240072C313 /* builtins.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10B0E719B8F00D62E90 /* builtins.cc */; };
-		8956923412D4ED240072C313 /* checks.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10F0E719B8F00D62E90 /* checks.cc */; };
-		8956923512D4ED240072C313 /* circular-queue.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B370F114FF62D007CDAF4 /* circular-queue.cc */; };
-		8956923612D4ED240072C313 /* code-stubs.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1110E719B8F00D62E90 /* code-stubs.cc */; };
-		8956923812D4ED240072C313 /* codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1170E719B8F00D62E90 /* codegen.cc */; };
-		8956923912D4ED240072C313 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
-		8956923A12D4ED240072C313 /* compiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1190E719B8F00D62E90 /* compiler.cc */; };
-		8956923B12D4ED240072C313 /* contexts.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11C0E719B8F00D62E90 /* contexts.cc */; };
-		8956923C12D4ED240072C313 /* conversions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11F0E719B8F00D62E90 /* conversions.cc */; };
-		8956923D12D4ED240072C313 /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
-		8956923E12D4ED240072C313 /* counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1210E719B8F00D62E90 /* counters.cc */; };
-		8956924012D4ED240072C313 /* cpu-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */; };
-		8956924112D4ED240072C313 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
-		8956924212D4ED240072C313 /* dateparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1260E719B8F00D62E90 /* dateparser.cc */; };
-		8956924312D4ED240072C313 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
-		8956924512D4ED240072C313 /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
-		8956924612D4ED240072C313 /* debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1280E719B8F00D62E90 /* debug.cc */; };
-		8956924812D4ED240072C313 /* disassembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12D0E719B8F00D62E90 /* disassembler.cc */; };
-		8956924912D4ED240072C313 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
-		8956924A12D4ED240072C313 /* execution.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1300E719B8F00D62E90 /* execution.cc */; };
-		8956924B12D4ED240072C313 /* factory.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1320E719B8F00D62E90 /* factory.cc */; };
-		8956924C12D4ED240072C313 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
-		8956924D12D4ED240072C313 /* flags.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1350E719B8F00D62E90 /* flags.cc */; };
-		8956924E12D4ED240072C313 /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
-		8956925012D4ED240072C313 /* frames.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13C0E719B8F00D62E90 /* frames.cc */; };
-		8956925212D4ED240072C313 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
-		8956925312D4ED240072C313 /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
-		8956925412D4ED240072C313 /* global-handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13E0E719B8F00D62E90 /* global-handles.cc */; };
-		8956925512D4ED240072C313 /* handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1420E719B8F00D62E90 /* handles.cc */; };
-		8956925612D4ED240072C313 /* hashmap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1440E719B8F00D62E90 /* hashmap.cc */; };
-		8956925712D4ED240072C313 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
-		8956925812D4ED240072C313 /* heap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1470E719B8F00D62E90 /* heap.cc */; };
-		8956925912D4ED240072C313 /* ic-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14A0E719B8F00D62E90 /* ic-ia32.cc */; };
-		8956925A12D4ED240072C313 /* ic.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14C0E719B8F00D62E90 /* ic.cc */; };
-		8956925B12D4ED240072C313 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
-		8956925C12D4ED240072C313 /* jsregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14E0E719B8F00D62E90 /* jsregexp.cc */; };
-		8956926012D4ED240072C313 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
-		8956926112D4ED240072C313 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
-		8956926212D4ED240072C313 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
-		8956926312D4ED240072C313 /* log.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1520E719B8F00D62E90 /* log.cc */; };
-		8956926512D4ED240072C313 /* mark-compact.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1590E719B8F00D62E90 /* mark-compact.cc */; };
-		8956926612D4ED240072C313 /* messages.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF15C0E719B8F00D62E90 /* messages.cc */; };
-		8956926712D4ED240072C313 /* objects-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1600E719B8F00D62E90 /* objects-debug.cc */; };
-		8956926812D4ED240072C313 /* objects.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1620E719B8F00D62E90 /* objects.cc */; };
-		8956926A12D4ED240072C313 /* parser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1640E719B8F00D62E90 /* parser.cc */; };
-		8956926B12D4ED240072C313 /* platform-macos.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1670E719B8F00D62E90 /* platform-macos.cc */; };
-		8956926C12D4ED240072C313 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
-		8956926D12D4ED240072C313 /* prettyprinter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16B0E719B8F00D62E90 /* prettyprinter.cc */; };
-		8956926E12D4ED240072C313 /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
-		8956926F12D4ED240072C313 /* property.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16D0E719B8F00D62E90 /* property.cc */; };
-		8956927112D4ED240072C313 /* regexp-macro-assembler-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */; };
-		8956927212D4ED240072C313 /* regexp-macro-assembler-tracer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */; };
-		8956927312D4ED240072C313 /* regexp-macro-assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */; };
-		8956927412D4ED240072C313 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
-		8956927712D4ED240072C313 /* rewriter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16F0E719B8F00D62E90 /* rewriter.cc */; };
-		8956927812D4ED240072C313 /* runtime.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1710E719B8F00D62E90 /* runtime.cc */; };
-		8956927912D4ED240072C313 /* scanner.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1730E719B8F00D62E90 /* scanner.cc */; };
-		8956927A12D4ED240072C313 /* scopeinfo.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1760E719B8F00D62E90 /* scopeinfo.cc */; };
-		8956927B12D4ED240072C313 /* scopes.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1780E719B8F00D62E90 /* scopes.cc */; };
-		8956927C12D4ED240072C313 /* serialize.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17A0E719B8F00D62E90 /* serialize.cc */; };
-		8956927D12D4ED240072C313 /* snapshot-common.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1820E719B8F00D62E90 /* snapshot-common.cc */; };
-		8956927E12D4ED240072C313 /* snapshot-empty.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1830E719B8F00D62E90 /* snapshot-empty.cc */; };
-		8956927F12D4ED240072C313 /* spaces.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1860E719B8F00D62E90 /* spaces.cc */; };
-		8956928012D4ED240072C313 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
-		8956928212D4ED240072C313 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
-		8956928312D4ED240072C313 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
-		8956928412D4ED240072C313 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
-		8956928512D4ED240072C313 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
-		8956928612D4ED240072C313 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
-		8956928712D4ED240072C313 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
-		8956928812D4ED240072C313 /* v8-counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1990E719B8F00D62E90 /* v8-counters.cc */; };
-		8956928912D4ED240072C313 /* v8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19B0E719B8F00D62E90 /* v8.cc */; };
-		8956928A12D4ED240072C313 /* v8threads.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19D0E719B8F00D62E90 /* v8threads.cc */; };
-		8956928B12D4ED240072C313 /* variables.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19F0E719B8F00D62E90 /* variables.cc */; };
-		8956928C12D4ED240072C313 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
-		8956929012D4ED240072C313 /* zone.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1A20E719B8F00D62E90 /* zone.cc */; };
-		8956929212D4ED240072C313 /* bignum-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248612B14B3D0083370F /* bignum-dtoa.cc */; };
-		8956929312D4ED240072C313 /* bignum.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248812B14B3D0083370F /* bignum.cc */; };
-		8956929412D4ED240072C313 /* cached-powers.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248A12B14B3D0083370F /* cached-powers.cc */; };
-		8956929512D4ED240072C313 /* deoptimizer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248B12B14B3D0083370F /* deoptimizer.cc */; };
-		8956929612D4ED240072C313 /* hydrogen-instructions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248D12B14B3D0083370F /* hydrogen-instructions.cc */; };
-		8956929712D4ED240072C313 /* hydrogen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248F12B14B3D0083370F /* hydrogen.cc */; };
-		8956929812D4ED240072C313 /* lithium-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249312B14B3D0083370F /* lithium-allocator.cc */; };
-		8956929912D4ED240072C313 /* preparse-data.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249512B14B3D0083370F /* preparse-data.cc */; };
-		8956929A12D4ED240072C313 /* preparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249812B14B3D0083370F /* preparser.cc */; };
-		8956929B12D4ED240072C313 /* runtime-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249A12B14B3D0083370F /* runtime-profiler.cc */; };
-		8956929C12D4ED240072C313 /* safepoint-table.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249C12B14B3D0083370F /* safepoint-table.cc */; };
-		8956929D12D4ED240072C313 /* scanner-base.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249E12B14B3D0083370F /* scanner-base.cc */; };
-		8956929E12D4ED240072C313 /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A112B14B3D0083370F /* string-search.cc */; };
-		8956929F12D4ED240072C313 /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A312B14B3D0083370F /* strtod.cc */; };
-		895692A312D4ED240072C313 /* externalize-string-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D812B14B9F0083370F /* externalize-string-extension.cc */; };
-		895692A412D4ED240072C313 /* gc-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24DA12B14B9F0083370F /* gc-extension.cc */; };
-		895692A512D4ED240072C313 /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; };
-		8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
-		895D5B531334212D00254083 /* isolate.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895D5B521334212D00254083 /* isolate.cc */; };
-		895D5B541334212D00254083 /* isolate.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895D5B521334212D00254083 /* isolate.cc */; };
-		895D5B551334212D00254083 /* isolate.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895D5B521334212D00254083 /* isolate.cc */; };
-		895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; };
-		896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */; };
-		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
-		897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; };
-		897C77D112B68E3D000767A8 /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; };
-		897C77D212B68E3D000767A8 /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
-		897C77D312B68E3D000767A8 /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
-		897C77DD12B68E6E000767A8 /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
-		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; settings = {COMPILER_FLAGS = "-I../include"; }; };
-		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
-		8981F6001010501900D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
-		8981F6011010502800D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
-		898BD20E0EF6CC930068B00A /* debug-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20D0EF6CC850068B00A /* debug-ia32.cc */; };
-		898BD20F0EF6CC9A0068B00A /* debug-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20C0EF6CC850068B00A /* debug-arm.cc */; };
-		89A15C7B0EE466EB00B48DEB /* regexp-macro-assembler-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C720EE466D000B48DEB /* regexp-macro-assembler-ia32.cc */; };
-		89A15C810EE4674900B48DEB /* regexp-macro-assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */; };
-		89A15C830EE4675E00B48DEB /* regexp-macro-assembler-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */; };
-		89A15C850EE4678B00B48DEB /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
-		89A15C8A0EE467D100B48DEB /* regexp-macro-assembler-tracer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */; };
-		89A88DEC0E71A5FF0043BA31 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
-		89A88DED0E71A6000043BA31 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
-		89A88DEE0E71A6010043BA31 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
-		89A88DEF0E71A60A0043BA31 /* assembler-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1010E719B8F00D62E90 /* assembler-ia32.cc */; };
-		89A88DF00E71A60A0043BA31 /* assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1030E719B8F00D62E90 /* assembler.cc */; };
-		89A88DF10E71A60B0043BA31 /* ast.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1050E719B8F00D62E90 /* ast.cc */; };
-		89A88DF20E71A60C0043BA31 /* bootstrapper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1070E719B8F00D62E90 /* bootstrapper.cc */; };
-		89A88DF40E71A6160043BA31 /* builtins-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10A0E719B8F00D62E90 /* builtins-ia32.cc */; };
-		89A88DF50E71A6170043BA31 /* builtins.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10B0E719B8F00D62E90 /* builtins.cc */; };
-		89A88DF60E71A61C0043BA31 /* checks.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10F0E719B8F00D62E90 /* checks.cc */; };
-		89A88DF70E71A6240043BA31 /* codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1150E719B8F00D62E90 /* codegen-ia32.cc */; };
-		89A88DF80E71A6260043BA31 /* codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1170E719B8F00D62E90 /* codegen.cc */; };
-		89A88DF90E71A6430043BA31 /* compiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1190E719B8F00D62E90 /* compiler.cc */; };
-		89A88DFA0E71A6440043BA31 /* contexts.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11C0E719B8F00D62E90 /* contexts.cc */; };
-		89A88DFB0E71A6440043BA31 /* conversions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11F0E719B8F00D62E90 /* conversions.cc */; };
-		89A88DFC0E71A6460043BA31 /* counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1210E719B8F00D62E90 /* counters.cc */; };
-		89A88DFD0E71A6470043BA31 /* cpu-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1240E719B8F00D62E90 /* cpu-ia32.cc */; };
-		89A88DFE0E71A6480043BA31 /* dateparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1260E719B8F00D62E90 /* dateparser.cc */; };
-		89A88DFF0E71A6530043BA31 /* debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1280E719B8F00D62E90 /* debug.cc */; };
-		89A88E000E71A6540043BA31 /* disasm-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */; };
-		89A88E010E71A6550043BA31 /* disassembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12D0E719B8F00D62E90 /* disassembler.cc */; };
-		89A88E030E71A65B0043BA31 /* execution.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1300E719B8F00D62E90 /* execution.cc */; };
-		89A88E040E71A65D0043BA31 /* factory.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1320E719B8F00D62E90 /* factory.cc */; };
-		89A88E050E71A65D0043BA31 /* flags.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1350E719B8F00D62E90 /* flags.cc */; };
-		89A88E060E71A6600043BA31 /* frames-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1390E719B8F00D62E90 /* frames-ia32.cc */; };
-		89A88E070E71A6610043BA31 /* frames.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13C0E719B8F00D62E90 /* frames.cc */; };
-		89A88E080E71A6620043BA31 /* global-handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13E0E719B8F00D62E90 /* global-handles.cc */; };
-		89A88E090E71A6640043BA31 /* handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1420E719B8F00D62E90 /* handles.cc */; };
-		89A88E0A0E71A6650043BA31 /* hashmap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1440E719B8F00D62E90 /* hashmap.cc */; };
-		89A88E0B0E71A66C0043BA31 /* heap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1470E719B8F00D62E90 /* heap.cc */; };
-		89A88E0C0E71A66D0043BA31 /* ic-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14A0E719B8F00D62E90 /* ic-ia32.cc */; };
-		89A88E0D0E71A66E0043BA31 /* ic.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14C0E719B8F00D62E90 /* ic.cc */; };
-		89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14E0E719B8F00D62E90 /* jsregexp.cc */; };
-		89A88E0F0E71A6740043BA31 /* log.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1520E719B8F00D62E90 /* log.cc */; };
-		89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1560E719B8F00D62E90 /* macro-assembler-ia32.cc */; };
-		89A88E110E71A6780043BA31 /* mark-compact.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1590E719B8F00D62E90 /* mark-compact.cc */; };
-		89A88E120E71A67A0043BA31 /* messages.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF15C0E719B8F00D62E90 /* messages.cc */; };
-		89A88E130E71A6860043BA31 /* objects-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1600E719B8F00D62E90 /* objects-debug.cc */; };
-		89A88E140E71A6870043BA31 /* objects.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1620E719B8F00D62E90 /* objects.cc */; };
-		89A88E150E71A68C0043BA31 /* parser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1640E719B8F00D62E90 /* parser.cc */; };
-		89A88E160E71A68E0043BA31 /* platform-macos.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1670E719B8F00D62E90 /* platform-macos.cc */; };
-		89A88E170E71A6950043BA31 /* prettyprinter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16B0E719B8F00D62E90 /* prettyprinter.cc */; };
-		89A88E180E71A6960043BA31 /* property.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16D0E719B8F00D62E90 /* property.cc */; };
-		89A88E190E71A6970043BA31 /* rewriter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16F0E719B8F00D62E90 /* rewriter.cc */; };
-		89A88E1A0E71A69B0043BA31 /* runtime.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1710E719B8F00D62E90 /* runtime.cc */; };
-		89A88E1B0E71A69D0043BA31 /* scanner.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1730E719B8F00D62E90 /* scanner.cc */; };
-		89A88E1C0E71A69E0043BA31 /* scopeinfo.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1760E719B8F00D62E90 /* scopeinfo.cc */; };
-		89A88E1D0E71A6A00043BA31 /* scopes.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1780E719B8F00D62E90 /* scopes.cc */; };
-		89A88E1E0E71A6A30043BA31 /* serialize.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17A0E719B8F00D62E90 /* serialize.cc */; };
-		89A88E1F0E71A6B40043BA31 /* snapshot-common.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1820E719B8F00D62E90 /* snapshot-common.cc */; };
-		89A88E200E71A6B60043BA31 /* snapshot-empty.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1830E719B8F00D62E90 /* snapshot-empty.cc */; };
-		89A88E210E71A6B70043BA31 /* spaces.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1860E719B8F00D62E90 /* spaces.cc */; };
-		89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
-		89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */; };
-		89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
-		89A88E250E71A6C20043BA31 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
-		89A88E260E71A6C90043BA31 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
-		89A88E270E71A6CB0043BA31 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
-		89A88E290E71A6CE0043BA31 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
-		89A88E2A0E71A6D00043BA31 /* v8-counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1990E719B8F00D62E90 /* v8-counters.cc */; };
-		89A88E2B0E71A6D10043BA31 /* v8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19B0E719B8F00D62E90 /* v8.cc */; };
-		89A88E2C0E71A6D20043BA31 /* v8threads.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19D0E719B8F00D62E90 /* v8threads.cc */; };
-		89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19F0E719B8F00D62E90 /* variables.cc */; };
-		89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1A20E719B8F00D62E90 /* zone.cc */; };
-		89B91B9B12D4EF95002FF4BC /* assembler-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B7C12D4EF95002FF4BC /* assembler-x64.cc */; };
-		89B91B9C12D4EF95002FF4BC /* builtins-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B7E12D4EF95002FF4BC /* builtins-x64.cc */; };
-		89B91B9D12D4EF95002FF4BC /* code-stubs-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B7F12D4EF95002FF4BC /* code-stubs-x64.cc */; };
-		89B91B9E12D4EF95002FF4BC /* codegen-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8212D4EF95002FF4BC /* codegen-x64.cc */; };
-		89B91B9F12D4EF95002FF4BC /* cpu-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8412D4EF95002FF4BC /* cpu-x64.cc */; };
-		89B91BA012D4EF95002FF4BC /* debug-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8512D4EF95002FF4BC /* debug-x64.cc */; };
-		89B91BA112D4EF95002FF4BC /* deoptimizer-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8612D4EF95002FF4BC /* deoptimizer-x64.cc */; };
-		89B91BA212D4EF95002FF4BC /* disasm-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8712D4EF95002FF4BC /* disasm-x64.cc */; };
-		89B91BA312D4EF95002FF4BC /* frames-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8812D4EF95002FF4BC /* frames-x64.cc */; };
-		89B91BA412D4EF95002FF4BC /* full-codegen-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8A12D4EF95002FF4BC /* full-codegen-x64.cc */; };
-		89B91BA512D4EF95002FF4BC /* ic-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8B12D4EF95002FF4BC /* ic-x64.cc */; };
-		89B91BA712D4EF95002FF4BC /* macro-assembler-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B8F12D4EF95002FF4BC /* macro-assembler-x64.cc */; };
-		89B91BA812D4EF95002FF4BC /* regexp-macro-assembler-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B9112D4EF95002FF4BC /* regexp-macro-assembler-x64.cc */; };
-		89B91BAA12D4EF95002FF4BC /* simulator-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B9612D4EF95002FF4BC /* simulator-x64.cc */; };
-		89B91BAB12D4EF95002FF4BC /* stub-cache-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89B91B9812D4EF95002FF4BC /* stub-cache-x64.cc */; };
-		89B91BB812D4F02A002FF4BC /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; settings = {COMPILER_FLAGS = "-I../include"; }; };
-		89B91BC512D4F02A002FF4BC /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; };
-		89B91BC612D4F02A002FF4BC /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; };
-		89B91BC712D4F02A002FF4BC /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
-		89B91BC812D4F02A002FF4BC /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
-		89B91BFA12D4F1AA002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; };
-		89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; };
-		89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
-		89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
-		89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */; };
-		89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
-		89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
-		89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
-		89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
-		89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
-		89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
-		89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
-		89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
-		89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
-		89F23C430E78D5B2006B2466 /* assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1030E719B8F00D62E90 /* assembler.cc */; };
-		89F23C440E78D5B2006B2466 /* ast.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1050E719B8F00D62E90 /* ast.cc */; };
-		89F23C450E78D5B2006B2466 /* bootstrapper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1070E719B8F00D62E90 /* bootstrapper.cc */; };
-		89F23C470E78D5B2006B2466 /* builtins.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10B0E719B8F00D62E90 /* builtins.cc */; };
-		89F23C480E78D5B2006B2466 /* checks.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF10F0E719B8F00D62E90 /* checks.cc */; };
-		89F23C490E78D5B2006B2466 /* code-stubs.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1110E719B8F00D62E90 /* code-stubs.cc */; };
-		89F23C4B0E78D5B2006B2466 /* codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1170E719B8F00D62E90 /* codegen.cc */; };
-		89F23C4C0E78D5B2006B2466 /* compiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1190E719B8F00D62E90 /* compiler.cc */; };
-		89F23C4D0E78D5B2006B2466 /* contexts.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11C0E719B8F00D62E90 /* contexts.cc */; };
-		89F23C4E0E78D5B2006B2466 /* conversions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF11F0E719B8F00D62E90 /* conversions.cc */; };
-		89F23C4F0E78D5B2006B2466 /* counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1210E719B8F00D62E90 /* counters.cc */; };
-		89F23C510E78D5B2006B2466 /* dateparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1260E719B8F00D62E90 /* dateparser.cc */; };
-		89F23C520E78D5B2006B2466 /* debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1280E719B8F00D62E90 /* debug.cc */; };
-		89F23C540E78D5B2006B2466 /* disassembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12D0E719B8F00D62E90 /* disassembler.cc */; };
-		89F23C560E78D5B2006B2466 /* execution.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1300E719B8F00D62E90 /* execution.cc */; };
-		89F23C570E78D5B2006B2466 /* factory.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1320E719B8F00D62E90 /* factory.cc */; };
-		89F23C580E78D5B2006B2466 /* flags.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1350E719B8F00D62E90 /* flags.cc */; };
-		89F23C5A0E78D5B2006B2466 /* frames.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13C0E719B8F00D62E90 /* frames.cc */; };
-		89F23C5B0E78D5B2006B2466 /* global-handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF13E0E719B8F00D62E90 /* global-handles.cc */; };
-		89F23C5C0E78D5B2006B2466 /* handles.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1420E719B8F00D62E90 /* handles.cc */; };
-		89F23C5D0E78D5B2006B2466 /* hashmap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1440E719B8F00D62E90 /* hashmap.cc */; };
-		89F23C5E0E78D5B2006B2466 /* heap.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1470E719B8F00D62E90 /* heap.cc */; };
-		89F23C600E78D5B2006B2466 /* ic.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14C0E719B8F00D62E90 /* ic.cc */; };
-		89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF14E0E719B8F00D62E90 /* jsregexp.cc */; };
-		89F23C620E78D5B2006B2466 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
-		89F23C630E78D5B2006B2466 /* log.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1520E719B8F00D62E90 /* log.cc */; };
-		89F23C650E78D5B2006B2466 /* mark-compact.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1590E719B8F00D62E90 /* mark-compact.cc */; };
-		89F23C660E78D5B2006B2466 /* messages.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF15C0E719B8F00D62E90 /* messages.cc */; };
-		89F23C670E78D5B2006B2466 /* objects-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1600E719B8F00D62E90 /* objects-debug.cc */; };
-		89F23C680E78D5B2006B2466 /* objects.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1620E719B8F00D62E90 /* objects.cc */; };
-		89F23C690E78D5B2006B2466 /* parser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1640E719B8F00D62E90 /* parser.cc */; };
-		89F23C6A0E78D5B2006B2466 /* platform-macos.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1670E719B8F00D62E90 /* platform-macos.cc */; };
-		89F23C6B0E78D5B2006B2466 /* prettyprinter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16B0E719B8F00D62E90 /* prettyprinter.cc */; };
-		89F23C6C0E78D5B2006B2466 /* property.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16D0E719B8F00D62E90 /* property.cc */; };
-		89F23C6D0E78D5B2006B2466 /* rewriter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF16F0E719B8F00D62E90 /* rewriter.cc */; };
-		89F23C6E0E78D5B2006B2466 /* runtime.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1710E719B8F00D62E90 /* runtime.cc */; };
-		89F23C6F0E78D5B2006B2466 /* scanner.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1730E719B8F00D62E90 /* scanner.cc */; };
-		89F23C700E78D5B2006B2466 /* scopeinfo.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1760E719B8F00D62E90 /* scopeinfo.cc */; };
-		89F23C710E78D5B2006B2466 /* scopes.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1780E719B8F00D62E90 /* scopes.cc */; };
-		89F23C720E78D5B2006B2466 /* serialize.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17A0E719B8F00D62E90 /* serialize.cc */; };
-		89F23C730E78D5B2006B2466 /* snapshot-common.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1820E719B8F00D62E90 /* snapshot-common.cc */; };
-		89F23C740E78D5B2006B2466 /* snapshot-empty.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1830E719B8F00D62E90 /* snapshot-empty.cc */; };
-		89F23C750E78D5B2006B2466 /* spaces.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1860E719B8F00D62E90 /* spaces.cc */; };
-		89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
-		89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
-		89F23C790E78D5B2006B2466 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
-		89F23C7A0E78D5B2006B2466 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
-		89F23C7B0E78D5B2006B2466 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
-		89F23C7D0E78D5B2006B2466 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
-		89F23C7E0E78D5B2006B2466 /* v8-counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1990E719B8F00D62E90 /* v8-counters.cc */; };
-		89F23C7F0E78D5B2006B2466 /* v8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19B0E719B8F00D62E90 /* v8.cc */; };
-		89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19D0E719B8F00D62E90 /* v8threads.cc */; };
-		89F23C810E78D5B2006B2466 /* variables.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19F0E719B8F00D62E90 /* variables.cc */; };
-		89F23C820E78D5B2006B2466 /* zone.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1A20E719B8F00D62E90 /* zone.cc */; };
-		89F23C8E0E78D5B6006B2466 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; settings = {COMPILER_FLAGS = "-I../include"; }; };
-		89F23C970E78D5E3006B2466 /* assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FE0E719B8F00D62E90 /* assembler-arm.cc */; };
-		89F23C980E78D5E7006B2466 /* builtins-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1090E719B8F00D62E90 /* builtins-arm.cc */; };
-		89F23C990E78D5E9006B2466 /* codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1140E719B8F00D62E90 /* codegen-arm.cc */; };
-		89F23C9A0E78D5EC006B2466 /* cpu-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1230E719B8F00D62E90 /* cpu-arm.cc */; };
-		89F23C9B0E78D5EE006B2466 /* disasm-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12A0E719B8F00D62E90 /* disasm-arm.cc */; };
-		89F23C9C0E78D5F1006B2466 /* frames-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1370E719B8F00D62E90 /* frames-arm.cc */; };
-		89F23C9D0E78D5FB006B2466 /* ic-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1490E719B8F00D62E90 /* ic-arm.cc */; };
-		89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */; };
-		89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17D0E719B8F00D62E90 /* simulator-arm.cc */; };
-		89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */; };
-		89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */; };
-		89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
-		9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
-		9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
-		9F2B3711114FF62D007CDAF4 /* circular-queue.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B370F114FF62D007CDAF4 /* circular-queue.cc */; };
-		9F2B3712114FF62D007CDAF4 /* circular-queue.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B370F114FF62D007CDAF4 /* circular-queue.cc */; };
-		9F2B37261152CEA0007CDAF4 /* cpu-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */; };
-		9F2B37271152CEA0007CDAF4 /* cpu-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */; };
-		9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
-		9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
-		9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
-		9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
-		9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
-		9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
-		9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
-		9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
-		9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
-		9FA38BB71175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
-		9FA38BB81175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
-		9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
-		9FA38BBA1175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
-		9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
-		9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
-		9FA38BBE1175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
-		9FA38BBF1175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
-		9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
-		9FA38BC51175B2E500C4CD55 /* full-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */; };
-		9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */; };
-		C2BD4BD7120165460046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
-		C2BD4BDB120165A70046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
-		C2BD4BE4120166180046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
-		C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
-		C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
-		C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
-		C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */; };
-		C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081B012251239001EAFE4 /* code-stubs-ia32.cc */; };
-/* End PBXBuildFile section */
-
-/* Begin PBXContainerItemProxy section */
-		7BF891960E73099F000BAF8A /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8970F2EF0E719FB2006AE7B5;
-			remoteInfo = v8;
-		};
-		7BF891980E73099F000BAF8A /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 897F76790E71B4CC007ACF34;
-			remoteInfo = v8_shell;
-		};
-		8938A29412D63A680080CDDE /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89B91BC012D4F02A002FF4BC;
-			remoteInfo = "d8_shell-x64";
-		};
-		8938A29612D63A680080CDDE /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89B91BB412D4F02A002FF4BC;
-			remoteInfo = "v8_shell-x64";
-		};
-		8938A29812D63A680080CDDE /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8956922712D4ED240072C313;
-			remoteInfo = "v8-x64";
-		};
-		893988020F2A35FA007D5254 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8970F2EF0E719FB2006AE7B5;
-			remoteInfo = v8;
-		};
-		8939880F0F2A3647007D5254 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 893987FE0F2A35FA007D5254;
-			remoteInfo = d8_shell;
-		};
-		896FD03B0E78D71F003DFB6A /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89F23C3C0E78D5B2006B2466;
-			remoteInfo = "v8-arm";
-		};
-		896FD03D0E78D731003DFB6A /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89F23C3C0E78D5B2006B2466;
-			remoteInfo = "v8-arm";
-		};
-		896FD03F0E78D735003DFB6A /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89F23C880E78D5B6006B2466;
-			remoteInfo = "v8_shell-arm";
-		};
-		897C77DB12B68E5D000767A8 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 89F23C3C0E78D5B2006B2466;
-			remoteInfo = "v8-arm";
-		};
-		897F76820E71B6AC007ACF34 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8970F2EF0E719FB2006AE7B5;
-			remoteInfo = v8;
-		};
-		89B91BD012D4F036002FF4BC /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8956922712D4ED240072C313;
-			remoteInfo = "v8-x64";
-		};
-		89B91BFC12D4F1BF002FF4BC /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 8956922712D4ED240072C313;
-			remoteInfo = "v8-x64";
-		};
-		89EED40C12B69A0A0075BE1C /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 897C77CB12B68E3D000767A8;
-			remoteInfo = "d8_shell-arm";
-		};
-/* End PBXContainerItemProxy section */
-
-/* Begin PBXFileReference section */
-		22A76C900FF259E600FDC694 /* log-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-inl.h"; sourceTree = "<group>"; };
-		8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
-		891C92FD1334226000FF4757 /* lithium-allocator-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "lithium-allocator-inl.h"; sourceTree = "<group>"; };
-		891C92FE133422EB00FF4757 /* isolate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = isolate.h; sourceTree = "<group>"; };
-		8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-x64.cc"; path = "x64/lithium-gap-resolver-x64.cc"; sourceTree = "<group>"; };
-		8924315B12F8539900906AB2 /* lithium-gap-resolver-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-x64.h"; path = "x64/lithium-gap-resolver-x64.h"; sourceTree = "<group>"; };
-		8938A2A212D63B630080CDDE /* lithium-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-x64.cc"; path = "x64/lithium-x64.cc"; sourceTree = "<group>"; };
-		893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
-		8939880B0F2A35FA007D5254 /* d8 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = d8; sourceTree = BUILT_PRODUCTS_DIR; };
-		893988150F2A3686007D5254 /* d8-debug.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-debug.cc"; path = "../src/d8-debug.cc"; sourceTree = "<group>"; };
-		893988320F2A3B8B007D5254 /* d8-js.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "d8-js.cc"; sourceTree = "<group>"; };
-		893A72230F7B0FF200303DD2 /* platform-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-posix.cc"; sourceTree = "<group>"; };
-		893A722A0F7B4A3200303DD2 /* dateparser-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "dateparser-inl.h"; sourceTree = "<group>"; };
-		893A72320F7B4AD700303DD2 /* d8-debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "d8-debug.h"; path = "../src/d8-debug.h"; sourceTree = "<group>"; };
-		893E248112B14AD40083370F /* v8-preparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-preparser.h"; sourceTree = "<group>"; };
-		893E248212B14AD40083370F /* v8-testing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-testing.h"; sourceTree = "<group>"; };
-		893E248312B14AD40083370F /* v8stdint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8stdint.h; sourceTree = "<group>"; };
-		893E248412B14B3D0083370F /* ast-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ast-inl.h"; sourceTree = "<group>"; };
-		893E248512B14B3D0083370F /* atomicops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops.h; sourceTree = "<group>"; };
-		893E248612B14B3D0083370F /* bignum-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "bignum-dtoa.cc"; sourceTree = "<group>"; };
-		893E248712B14B3D0083370F /* bignum-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bignum-dtoa.h"; sourceTree = "<group>"; };
-		893E248812B14B3D0083370F /* bignum.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = bignum.cc; sourceTree = "<group>"; };
-		893E248912B14B3D0083370F /* bignum.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bignum.h; sourceTree = "<group>"; };
-		893E248A12B14B3D0083370F /* cached-powers.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "cached-powers.cc"; sourceTree = "<group>"; };
-		893E248B12B14B3D0083370F /* deoptimizer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = deoptimizer.cc; sourceTree = "<group>"; };
-		893E248C12B14B3D0083370F /* deoptimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = deoptimizer.h; sourceTree = "<group>"; };
-		893E248D12B14B3D0083370F /* hydrogen-instructions.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "hydrogen-instructions.cc"; sourceTree = "<group>"; };
-		893E248E12B14B3D0083370F /* hydrogen-instructions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "hydrogen-instructions.h"; sourceTree = "<group>"; };
-		893E248F12B14B3D0083370F /* hydrogen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = hydrogen.cc; sourceTree = "<group>"; };
-		893E249012B14B3D0083370F /* hydrogen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hydrogen.h; sourceTree = "<group>"; };
-		893E249312B14B3D0083370F /* lithium-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "lithium-allocator.cc"; sourceTree = "<group>"; };
-		893E249412B14B3D0083370F /* lithium-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "lithium-allocator.h"; sourceTree = "<group>"; };
-		893E249512B14B3D0083370F /* preparse-data.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "preparse-data.cc"; sourceTree = "<group>"; };
-		893E249612B14B3D0083370F /* preparse-data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "preparse-data.h"; sourceTree = "<group>"; };
-		893E249812B14B3D0083370F /* preparser.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = preparser.cc; sourceTree = "<group>"; };
-		893E249912B14B3D0083370F /* preparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = preparser.h; sourceTree = "<group>"; };
-		893E249A12B14B3D0083370F /* runtime-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "runtime-profiler.cc"; sourceTree = "<group>"; };
-		893E249B12B14B3D0083370F /* runtime-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "runtime-profiler.h"; sourceTree = "<group>"; };
-		893E249C12B14B3D0083370F /* safepoint-table.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "safepoint-table.cc"; sourceTree = "<group>"; };
-		893E249D12B14B3D0083370F /* safepoint-table.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "safepoint-table.h"; sourceTree = "<group>"; };
-		893E249E12B14B3D0083370F /* scanner-base.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "scanner-base.cc"; sourceTree = "<group>"; };
-		893E249F12B14B3D0083370F /* scanner-base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "scanner-base.h"; sourceTree = "<group>"; };
-		893E24A012B14B3D0083370F /* simulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = simulator.h; sourceTree = "<group>"; };
-		893E24A112B14B3D0083370F /* string-search.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "string-search.cc"; sourceTree = "<group>"; };
-		893E24A212B14B3D0083370F /* string-search.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "string-search.h"; sourceTree = "<group>"; };
-		893E24A312B14B3D0083370F /* strtod.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = strtod.cc; sourceTree = "<group>"; };
-		893E24A412B14B3D0083370F /* strtod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = strtod.h; sourceTree = "<group>"; };
-		893E24A512B14B3D0083370F /* v8checks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8checks.h; sourceTree = "<group>"; };
-		893E24A612B14B3D0083370F /* v8globals.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8globals.h; sourceTree = "<group>"; };
-		893E24A712B14B3D0083370F /* v8utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8utils.h; sourceTree = "<group>"; };
-		893E24C612B14B510083370F /* deoptimizer-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "deoptimizer-arm.cc"; path = "arm/deoptimizer-arm.cc"; sourceTree = "<group>"; };
-		893E24C712B14B510083370F /* lithium-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-arm.cc"; path = "arm/lithium-arm.cc"; sourceTree = "<group>"; };
-		893E24C812B14B510083370F /* lithium-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-arm.h"; path = "arm/lithium-arm.h"; sourceTree = "<group>"; };
-		893E24C912B14B520083370F /* lithium-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-arm.cc"; path = "arm/lithium-codegen-arm.cc"; sourceTree = "<group>"; };
-		893E24CA12B14B520083370F /* lithium-codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-codegen-arm.h"; path = "arm/lithium-codegen-arm.h"; sourceTree = "<group>"; };
-		893E24CF12B14B780083370F /* atomicops_internals_x86_macosx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops_internals_x86_macosx.h; sourceTree = "<group>"; };
-		893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "deoptimizer-ia32.cc"; path = "ia32/deoptimizer-ia32.cc"; sourceTree = "<group>"; };
-		893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-ia32.cc"; path = "ia32/lithium-codegen-ia32.cc"; sourceTree = "<group>"; };
-		893E24D212B14B8A0083370F /* lithium-codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-codegen-ia32.h"; path = "ia32/lithium-codegen-ia32.h"; sourceTree = "<group>"; };
-		893E24D312B14B8A0083370F /* lithium-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-ia32.cc"; path = "ia32/lithium-ia32.cc"; sourceTree = "<group>"; };
-		893E24D412B14B8A0083370F /* lithium-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-ia32.h"; path = "ia32/lithium-ia32.h"; sourceTree = "<group>"; };
-		893E24D812B14B9F0083370F /* externalize-string-extension.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "externalize-string-extension.cc"; path = "extensions/externalize-string-extension.cc"; sourceTree = "<group>"; };
-		893E24D912B14B9F0083370F /* externalize-string-extension.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "externalize-string-extension.h"; path = "extensions/externalize-string-extension.h"; sourceTree = "<group>"; };
-		893E24DA12B14B9F0083370F /* gc-extension.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "gc-extension.cc"; path = "extensions/gc-extension.cc"; sourceTree = "<group>"; };
-		893E24DB12B14B9F0083370F /* gc-extension.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "gc-extension.h"; path = "extensions/gc-extension.h"; sourceTree = "<group>"; };
-		8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "regexp-stack.cc"; sourceTree = "<group>"; };
-		8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-stack.h"; sourceTree = "<group>"; };
-		8946827412C26EB700C914BC /* objects-printer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-printer.cc"; sourceTree = "<group>"; };
-		89471C7F0EB23EE400B6874B /* flag-definitions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flag-definitions.h"; sourceTree = "<group>"; };
-		89495E460E79FC23001F68C3 /* compilation-cache.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "compilation-cache.cc"; sourceTree = "<group>"; };
-		89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
-		894A59E712D777E80000766D /* lithium.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = lithium.cc; sourceTree = "<group>"; };
-		894A59E812D777E80000766D /* lithium.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lithium.h; sourceTree = "<group>"; };
-		895692AA12D4ED240072C313 /* libv8-x64.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-x64.a"; sourceTree = BUILT_PRODUCTS_DIR; };
-		8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; };
-		8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; };
-		895D5B4C1334210400254083 /* allocation-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "allocation-inl.h"; sourceTree = "<group>"; };
-		895D5B521334212D00254083 /* isolate.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = isolate.cc; sourceTree = "<group>"; };
-		895FA748107FFE73006F39D4 /* constants-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "constants-arm.cc"; path = "arm/constants-arm.cc"; sourceTree = "<group>"; };
-		8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; };
-		896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; };
-		896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-arm.cc"; path = "arm/lithium-gap-resolver-arm.cc"; sourceTree = "<group>"; };
-		896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-arm.h"; path = "arm/lithium-gap-resolver-arm.h"; sourceTree = "<group>"; };
-		8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
-		897C77D912B68E3D000767A8 /* d8-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
-		897F767A0E71B4CC007ACF34 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
-		897FF0D40E719A8500D62E90 /* v8-debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-debug.h"; sourceTree = "<group>"; };
-		897FF0D50E719A8500D62E90 /* v8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8.h; sourceTree = "<group>"; };
-		897FF0F60E719B8F00D62E90 /* accessors.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = accessors.cc; sourceTree = "<group>"; };
-		897FF0F70E719B8F00D62E90 /* accessors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = accessors.h; sourceTree = "<group>"; };
-		897FF0F80E719B8F00D62E90 /* allocation.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = allocation.cc; sourceTree = "<group>"; };
-		897FF0F90E719B8F00D62E90 /* allocation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocation.h; sourceTree = "<group>"; };
-		897FF0FA0E719B8F00D62E90 /* api.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = api.cc; sourceTree = "<group>"; };
-		897FF0FB0E719B8F00D62E90 /* api.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = api.h; sourceTree = "<group>"; };
-		897FF0FC0E719B8F00D62E90 /* arguments.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = arguments.h; sourceTree = "<group>"; };
-		897FF0FD0E719B8F00D62E90 /* assembler-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-arm-inl.h"; path = "arm/assembler-arm-inl.h"; sourceTree = "<group>"; };
-		897FF0FE0E719B8F00D62E90 /* assembler-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "assembler-arm.cc"; path = "arm/assembler-arm.cc"; sourceTree = "<group>"; };
-		897FF0FF0E719B8F00D62E90 /* assembler-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-arm.h"; path = "arm/assembler-arm.h"; sourceTree = "<group>"; };
-		897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-ia32-inl.h"; path = "ia32/assembler-ia32-inl.h"; sourceTree = "<group>"; };
-		897FF1010E719B8F00D62E90 /* assembler-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "assembler-ia32.cc"; path = "ia32/assembler-ia32.cc"; sourceTree = "<group>"; };
-		897FF1020E719B8F00D62E90 /* assembler-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-ia32.h"; path = "ia32/assembler-ia32.h"; sourceTree = "<group>"; };
-		897FF1030E719B8F00D62E90 /* assembler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = assembler.cc; sourceTree = "<group>"; };
-		897FF1040E719B8F00D62E90 /* assembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = assembler.h; sourceTree = "<group>"; };
-		897FF1050E719B8F00D62E90 /* ast.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ast.cc; sourceTree = "<group>"; };
-		897FF1060E719B8F00D62E90 /* ast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ast.h; sourceTree = "<group>"; };
-		897FF1070E719B8F00D62E90 /* bootstrapper.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = bootstrapper.cc; sourceTree = "<group>"; };
-		897FF1080E719B8F00D62E90 /* bootstrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bootstrapper.h; sourceTree = "<group>"; };
-		897FF1090E719B8F00D62E90 /* builtins-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "builtins-arm.cc"; path = "arm/builtins-arm.cc"; sourceTree = "<group>"; };
-		897FF10A0E719B8F00D62E90 /* builtins-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "builtins-ia32.cc"; path = "ia32/builtins-ia32.cc"; sourceTree = "<group>"; };
-		897FF10B0E719B8F00D62E90 /* builtins.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = builtins.cc; sourceTree = "<group>"; };
-		897FF10C0E719B8F00D62E90 /* builtins.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = builtins.h; sourceTree = "<group>"; };
-		897FF10D0E719B8F00D62E90 /* char-predicates-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "char-predicates-inl.h"; sourceTree = "<group>"; };
-		897FF10E0E719B8F00D62E90 /* char-predicates.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "char-predicates.h"; sourceTree = "<group>"; };
-		897FF10F0E719B8F00D62E90 /* checks.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = checks.cc; sourceTree = "<group>"; };
-		897FF1100E719B8F00D62E90 /* checks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = checks.h; sourceTree = "<group>"; };
-		897FF1110E719B8F00D62E90 /* code-stubs.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "code-stubs.cc"; sourceTree = "<group>"; };
-		897FF1120E719B8F00D62E90 /* code-stubs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "code-stubs.h"; sourceTree = "<group>"; };
-		897FF1130E719B8F00D62E90 /* code.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = code.h; sourceTree = "<group>"; };
-		897FF1140E719B8F00D62E90 /* codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "codegen-arm.cc"; path = "arm/codegen-arm.cc"; sourceTree = "<group>"; };
-		897FF1150E719B8F00D62E90 /* codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "codegen-ia32.cc"; path = "ia32/codegen-ia32.cc"; sourceTree = "<group>"; };
-		897FF1170E719B8F00D62E90 /* codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = codegen.cc; sourceTree = "<group>"; };
-		897FF1180E719B8F00D62E90 /* codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = codegen.h; sourceTree = "<group>"; };
-		897FF1190E719B8F00D62E90 /* compiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = compiler.cc; sourceTree = "<group>"; };
-		897FF11A0E719B8F00D62E90 /* compiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compiler.h; sourceTree = "<group>"; };
-		897FF11B0E719B8F00D62E90 /* constants-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "constants-arm.h"; path = "arm/constants-arm.h"; sourceTree = "<group>"; };
-		897FF11C0E719B8F00D62E90 /* contexts.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = contexts.cc; sourceTree = "<group>"; };
-		897FF11D0E719B8F00D62E90 /* contexts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = contexts.h; sourceTree = "<group>"; };
-		897FF11E0E719B8F00D62E90 /* conversions-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "conversions-inl.h"; sourceTree = "<group>"; };
-		897FF11F0E719B8F00D62E90 /* conversions.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = conversions.cc; sourceTree = "<group>"; };
-		897FF1200E719B8F00D62E90 /* conversions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conversions.h; sourceTree = "<group>"; };
-		897FF1210E719B8F00D62E90 /* counters.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = counters.cc; sourceTree = "<group>"; };
-		897FF1220E719B8F00D62E90 /* counters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = counters.h; sourceTree = "<group>"; };
-		897FF1230E719B8F00D62E90 /* cpu-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "cpu-arm.cc"; path = "arm/cpu-arm.cc"; sourceTree = "<group>"; };
-		897FF1240E719B8F00D62E90 /* cpu-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "cpu-ia32.cc"; path = "ia32/cpu-ia32.cc"; sourceTree = "<group>"; };
-		897FF1250E719B8F00D62E90 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = "<group>"; };
-		897FF1260E719B8F00D62E90 /* dateparser.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dateparser.cc; sourceTree = "<group>"; };
-		897FF1270E719B8F00D62E90 /* dateparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dateparser.h; sourceTree = "<group>"; };
-		897FF1280E719B8F00D62E90 /* debug.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = debug.cc; sourceTree = "<group>"; };
-		897FF1290E719B8F00D62E90 /* debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = debug.h; sourceTree = "<group>"; };
-		897FF12A0E719B8F00D62E90 /* disasm-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "disasm-arm.cc"; path = "arm/disasm-arm.cc"; sourceTree = "<group>"; };
-		897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "disasm-ia32.cc"; path = "ia32/disasm-ia32.cc"; sourceTree = "<group>"; };
-		897FF12C0E719B8F00D62E90 /* disasm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disasm.h; sourceTree = "<group>"; };
-		897FF12D0E719B8F00D62E90 /* disassembler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = disassembler.cc; sourceTree = "<group>"; };
-		897FF12E0E719B8F00D62E90 /* disassembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disassembler.h; sourceTree = "<group>"; };
-		897FF1300E719B8F00D62E90 /* execution.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = execution.cc; sourceTree = "<group>"; };
-		897FF1310E719B8F00D62E90 /* execution.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = execution.h; sourceTree = "<group>"; };
-		897FF1320E719B8F00D62E90 /* factory.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = factory.cc; sourceTree = "<group>"; };
-		897FF1330E719B8F00D62E90 /* factory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = factory.h; sourceTree = "<group>"; };
-		897FF1350E719B8F00D62E90 /* flags.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = flags.cc; sourceTree = "<group>"; };
-		897FF1360E719B8F00D62E90 /* flags.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = flags.h; sourceTree = "<group>"; };
-		897FF1370E719B8F00D62E90 /* frames-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "frames-arm.cc"; path = "arm/frames-arm.cc"; sourceTree = "<group>"; };
-		897FF1380E719B8F00D62E90 /* frames-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "frames-arm.h"; path = "arm/frames-arm.h"; sourceTree = "<group>"; };
-		897FF1390E719B8F00D62E90 /* frames-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "frames-ia32.cc"; path = "ia32/frames-ia32.cc"; sourceTree = "<group>"; };
-		897FF13A0E719B8F00D62E90 /* frames-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "frames-ia32.h"; path = "ia32/frames-ia32.h"; sourceTree = "<group>"; };
-		897FF13B0E719B8F00D62E90 /* frames-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "frames-inl.h"; sourceTree = "<group>"; };
-		897FF13C0E719B8F00D62E90 /* frames.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = frames.cc; sourceTree = "<group>"; };
-		897FF13D0E719B8F00D62E90 /* frames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frames.h; sourceTree = "<group>"; };
-		897FF13E0E719B8F00D62E90 /* global-handles.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "global-handles.cc"; sourceTree = "<group>"; };
-		897FF13F0E719B8F00D62E90 /* global-handles.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "global-handles.h"; sourceTree = "<group>"; };
-		897FF1400E719B8F00D62E90 /* globals.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = globals.h; sourceTree = "<group>"; };
-		897FF1410E719B8F00D62E90 /* handles-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "handles-inl.h"; sourceTree = "<group>"; };
-		897FF1420E719B8F00D62E90 /* handles.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = handles.cc; sourceTree = "<group>"; };
-		897FF1430E719B8F00D62E90 /* handles.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = handles.h; sourceTree = "<group>"; };
-		897FF1440E719B8F00D62E90 /* hashmap.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = hashmap.cc; sourceTree = "<group>"; };
-		897FF1450E719B8F00D62E90 /* hashmap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hashmap.h; sourceTree = "<group>"; };
-		897FF1460E719B8F00D62E90 /* heap-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "heap-inl.h"; sourceTree = "<group>"; };
-		897FF1470E719B8F00D62E90 /* heap.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = heap.cc; sourceTree = "<group>"; };
-		897FF1480E719B8F00D62E90 /* heap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = heap.h; sourceTree = "<group>"; };
-		897FF1490E719B8F00D62E90 /* ic-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "ic-arm.cc"; path = "arm/ic-arm.cc"; sourceTree = "<group>"; };
-		897FF14A0E719B8F00D62E90 /* ic-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "ic-ia32.cc"; path = "ia32/ic-ia32.cc"; sourceTree = "<group>"; };
-		897FF14B0E719B8F00D62E90 /* ic-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ic-inl.h"; sourceTree = "<group>"; };
-		897FF14C0E719B8F00D62E90 /* ic.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ic.cc; sourceTree = "<group>"; };
-		897FF14D0E719B8F00D62E90 /* ic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ic.h; sourceTree = "<group>"; };
-		897FF14E0E719B8F00D62E90 /* jsregexp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = jsregexp.cc; sourceTree = "<group>"; };
-		897FF14F0E719B8F00D62E90 /* jsregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jsregexp.h; sourceTree = "<group>"; };
-		897FF1500E719B8F00D62E90 /* list-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "list-inl.h"; sourceTree = "<group>"; };
-		897FF1510E719B8F00D62E90 /* list.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = list.h; sourceTree = "<group>"; };
-		897FF1520E719B8F00D62E90 /* log.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = log.cc; sourceTree = "<group>"; };
-		897FF1530E719B8F00D62E90 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = "<group>"; };
-		897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "macro-assembler-arm.cc"; path = "arm/macro-assembler-arm.cc"; sourceTree = "<group>"; };
-		897FF1550E719B8F00D62E90 /* macro-assembler-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "macro-assembler-arm.h"; path = "arm/macro-assembler-arm.h"; sourceTree = "<group>"; };
-		897FF1560E719B8F00D62E90 /* macro-assembler-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "macro-assembler-ia32.cc"; path = "ia32/macro-assembler-ia32.cc"; sourceTree = "<group>"; };
-		897FF1570E719B8F00D62E90 /* macro-assembler-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "macro-assembler-ia32.h"; path = "ia32/macro-assembler-ia32.h"; sourceTree = "<group>"; };
-		897FF1580E719B8F00D62E90 /* macro-assembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "macro-assembler.h"; sourceTree = "<group>"; };
-		897FF1590E719B8F00D62E90 /* mark-compact.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "mark-compact.cc"; sourceTree = "<group>"; };
-		897FF15A0E719B8F00D62E90 /* mark-compact.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "mark-compact.h"; sourceTree = "<group>"; };
-		897FF15B0E719B8F00D62E90 /* v8memory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8memory.h; sourceTree = "<group>"; };
-		897FF15C0E719B8F00D62E90 /* messages.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = messages.cc; sourceTree = "<group>"; };
-		897FF15D0E719B8F00D62E90 /* messages.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = messages.h; sourceTree = "<group>"; };
-		897FF15E0E719B8F00D62E90 /* mksnapshot.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = mksnapshot.cc; sourceTree = "<group>"; };
-		897FF15F0E719B8F00D62E90 /* natives.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = natives.h; sourceTree = "<group>"; };
-		897FF1600E719B8F00D62E90 /* objects-debug.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-debug.cc"; sourceTree = "<group>"; };
-		897FF1610E719B8F00D62E90 /* objects-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-inl.h"; sourceTree = "<group>"; };
-		897FF1620E719B8F00D62E90 /* objects.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = objects.cc; sourceTree = "<group>"; };
-		897FF1630E719B8F00D62E90 /* objects.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = objects.h; sourceTree = "<group>"; };
-		897FF1640E719B8F00D62E90 /* parser.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = parser.cc; sourceTree = "<group>"; };
-		897FF1650E719B8F00D62E90 /* parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parser.h; sourceTree = "<group>"; };
-		897FF1660E719B8F00D62E90 /* platform-linux.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-linux.cc"; sourceTree = "<group>"; };
-		897FF1670E719B8F00D62E90 /* platform-macos.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-macos.cc"; sourceTree = "<group>"; };
-		897FF1680E719B8F00D62E90 /* platform-nullos.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-nullos.cc"; sourceTree = "<group>"; };
-		897FF1690E719B8F00D62E90 /* platform-win32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-win32.cc"; sourceTree = "<group>"; };
-		897FF16A0E719B8F00D62E90 /* platform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = platform.h; sourceTree = "<group>"; };
-		897FF16B0E719B8F00D62E90 /* prettyprinter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = prettyprinter.cc; sourceTree = "<group>"; };
-		897FF16C0E719B8F00D62E90 /* prettyprinter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prettyprinter.h; sourceTree = "<group>"; };
-		897FF16D0E719B8F00D62E90 /* property.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = property.cc; sourceTree = "<group>"; };
-		897FF16E0E719B8F00D62E90 /* property.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = property.h; sourceTree = "<group>"; };
-		897FF16F0E719B8F00D62E90 /* rewriter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = rewriter.cc; sourceTree = "<group>"; };
-		897FF1700E719B8F00D62E90 /* rewriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rewriter.h; sourceTree = "<group>"; };
-		897FF1710E719B8F00D62E90 /* runtime.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = runtime.cc; sourceTree = "<group>"; };
-		897FF1720E719B8F00D62E90 /* runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = runtime.h; sourceTree = "<group>"; };
-		897FF1730E719B8F00D62E90 /* scanner.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = scanner.cc; sourceTree = "<group>"; };
-		897FF1740E719B8F00D62E90 /* scanner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scanner.h; sourceTree = "<group>"; };
-		897FF1750E719B8F00D62E90 /* SConscript */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SConscript; sourceTree = "<group>"; };
-		897FF1760E719B8F00D62E90 /* scopeinfo.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = scopeinfo.cc; sourceTree = "<group>"; };
-		897FF1770E719B8F00D62E90 /* scopeinfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scopeinfo.h; sourceTree = "<group>"; };
-		897FF1780E719B8F00D62E90 /* scopes.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = scopes.cc; sourceTree = "<group>"; };
-		897FF1790E719B8F00D62E90 /* scopes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scopes.h; sourceTree = "<group>"; };
-		897FF17A0E719B8F00D62E90 /* serialize.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = serialize.cc; sourceTree = "<group>"; };
-		897FF17B0E719B8F00D62E90 /* serialize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialize.h; sourceTree = "<group>"; };
-		897FF17C0E719B8F00D62E90 /* shell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shell.h; sourceTree = "<group>"; };
-		897FF17D0E719B8F00D62E90 /* simulator-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "simulator-arm.cc"; path = "arm/simulator-arm.cc"; sourceTree = "<group>"; };
-		897FF17E0E719B8F00D62E90 /* simulator-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "simulator-arm.h"; path = "arm/simulator-arm.h"; sourceTree = "<group>"; };
-		897FF17F0E719B8F00D62E90 /* simulator-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "simulator-ia32.cc"; path = "ia32/simulator-ia32.cc"; sourceTree = "<group>"; };
-		897FF1800E719B8F00D62E90 /* simulator-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "simulator-ia32.h"; path = "ia32/simulator-ia32.h"; sourceTree = "<group>"; };
-		897FF1810E719B8F00D62E90 /* smart-pointer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "smart-pointer.h"; sourceTree = "<group>"; };
-		897FF1820E719B8F00D62E90 /* snapshot-common.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "snapshot-common.cc"; sourceTree = "<group>"; };
-		897FF1830E719B8F00D62E90 /* snapshot-empty.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "snapshot-empty.cc"; sourceTree = "<group>"; };
-		897FF1840E719B8F00D62E90 /* snapshot.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = snapshot.h; sourceTree = "<group>"; };
-		897FF1850E719B8F00D62E90 /* spaces-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "spaces-inl.h"; sourceTree = "<group>"; };
-		897FF1860E719B8F00D62E90 /* spaces.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = spaces.cc; sourceTree = "<group>"; };
-		897FF1870E719B8F00D62E90 /* spaces.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spaces.h; sourceTree = "<group>"; };
-		897FF1880E719B8F00D62E90 /* string-stream.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "string-stream.cc"; sourceTree = "<group>"; };
-		897FF1890E719B8F00D62E90 /* string-stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "string-stream.h"; sourceTree = "<group>"; };
-		897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "stub-cache-arm.cc"; path = "arm/stub-cache-arm.cc"; sourceTree = "<group>"; };
-		897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "stub-cache-ia32.cc"; path = "ia32/stub-cache-ia32.cc"; sourceTree = "<group>"; };
-		897FF18C0E719B8F00D62E90 /* stub-cache.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "stub-cache.cc"; sourceTree = "<group>"; };
-		897FF18D0E719B8F00D62E90 /* stub-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "stub-cache.h"; sourceTree = "<group>"; };
-		897FF18E0E719B8F00D62E90 /* token.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = token.cc; sourceTree = "<group>"; };
-		897FF18F0E719B8F00D62E90 /* token.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = token.h; sourceTree = "<group>"; };
-		897FF1900E719B8F00D62E90 /* top.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = top.cc; sourceTree = "<group>"; };
-		897FF1920E719B8F00D62E90 /* unicode-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unicode-inl.h"; sourceTree = "<group>"; };
-		897FF1930E719B8F00D62E90 /* unicode.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = unicode.cc; sourceTree = "<group>"; };
-		897FF1940E719B8F00D62E90 /* unicode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unicode.h; sourceTree = "<group>"; };
-		897FF1970E719B8F00D62E90 /* utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = utils.cc; sourceTree = "<group>"; };
-		897FF1980E719B8F00D62E90 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; };
-		897FF1990E719B8F00D62E90 /* v8-counters.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "v8-counters.cc"; sourceTree = "<group>"; };
-		897FF19A0E719B8F00D62E90 /* v8-counters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-counters.h"; sourceTree = "<group>"; };
-		897FF19B0E719B8F00D62E90 /* v8.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = v8.cc; sourceTree = "<group>"; };
-		897FF19C0E719B8F00D62E90 /* v8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8.h; sourceTree = "<group>"; };
-		897FF19D0E719B8F00D62E90 /* v8threads.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = v8threads.cc; sourceTree = "<group>"; };
-		897FF19E0E719B8F00D62E90 /* v8threads.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8threads.h; sourceTree = "<group>"; };
-		897FF19F0E719B8F00D62E90 /* variables.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = variables.cc; sourceTree = "<group>"; };
-		897FF1A00E719B8F00D62E90 /* variables.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variables.h; sourceTree = "<group>"; };
-		897FF1A10E719B8F00D62E90 /* zone-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "zone-inl.h"; sourceTree = "<group>"; };
-		897FF1A20E719B8F00D62E90 /* zone.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = zone.cc; sourceTree = "<group>"; };
-		897FF1A30E719B8F00D62E90 /* zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = zone.h; sourceTree = "<group>"; };
-		897FF1A60E719BC100D62E90 /* apinatives.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = apinatives.js; sourceTree = "<group>"; };
-		897FF1A70E719BC100D62E90 /* array.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = array.js; sourceTree = "<group>"; };
-		897FF1AA0E719BC100D62E90 /* math.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = math.js; sourceTree = "<group>"; };
-		897FF1AB0E719BC100D62E90 /* messages.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = messages.js; sourceTree = "<group>"; };
-		897FF1AE0E719BC100D62E90 /* runtime.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = runtime.js; sourceTree = "<group>"; };
-		897FF1AF0E719BC100D62E90 /* string.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = string.js; sourceTree = "<group>"; };
-		897FF1B00E719BC100D62E90 /* uri.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = uri.js; sourceTree = "<group>"; };
-		897FF1B10E719BC100D62E90 /* v8natives.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = v8natives.js; sourceTree = "<group>"; };
-		897FF1B50E719C0900D62E90 /* shell.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = shell.cc; sourceTree = "<group>"; };
-		897FF1B60E719C2300D62E90 /* js2c.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = js2c.py; sourceTree = "<group>"; };
-		897FF1B70E719C2E00D62E90 /* macros.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; name = macros.py; path = ../src/macros.py; sourceTree = "<group>"; };
-		897FF32F0FAA0ED200136CF6 /* version.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = version.cc; sourceTree = "<group>"; };
-		897FF3300FAA0ED200136CF6 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = "<group>"; };
-		8981F5FE1010500F00D1520E /* frame-element.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "frame-element.cc"; sourceTree = "<group>"; };
-		8981F5FF1010500F00D1520E /* frame-element.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "frame-element.h"; sourceTree = "<group>"; };
-		898BD20C0EF6CC850068B00A /* debug-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-arm.cc"; path = "arm/debug-arm.cc"; sourceTree = "<group>"; };
-		898BD20D0EF6CC850068B00A /* debug-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-ia32.cc"; path = "ia32/debug-ia32.cc"; sourceTree = "<group>"; };
-		89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bytecodes-irregexp.h"; sourceTree = "<group>"; };
-		89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "interpreter-irregexp.cc"; sourceTree = "<group>"; };
-		89A15C670EE4665300B48DEB /* interpreter-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "interpreter-irregexp.h"; sourceTree = "<group>"; };
-		89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-freebsd.cc"; sourceTree = "<group>"; };
-		89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "regexp-macro-assembler-arm.cc"; path = "arm/regexp-macro-assembler-arm.cc"; sourceTree = "<group>"; };
-		89A15C710EE466D000B48DEB /* regexp-macro-assembler-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "regexp-macro-assembler-arm.h"; path = "arm/regexp-macro-assembler-arm.h"; sourceTree = "<group>"; };
-		89A15C720EE466D000B48DEB /* regexp-macro-assembler-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "regexp-macro-assembler-ia32.cc"; path = "ia32/regexp-macro-assembler-ia32.cc"; sourceTree = "<group>"; };
-		89A15C730EE466D000B48DEB /* regexp-macro-assembler-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "regexp-macro-assembler-ia32.h"; path = "ia32/regexp-macro-assembler-ia32.h"; sourceTree = "<group>"; };
-		89A15C740EE466D000B48DEB /* regexp-macro-assembler-irregexp-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-macro-assembler-irregexp-inl.h"; sourceTree = "<group>"; };
-		89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "regexp-macro-assembler-irregexp.cc"; sourceTree = "<group>"; };
-		89A15C760EE466D000B48DEB /* regexp-macro-assembler-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-macro-assembler-irregexp.h"; sourceTree = "<group>"; };
-		89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "regexp-macro-assembler-tracer.cc"; sourceTree = "<group>"; };
-		89A15C780EE466D000B48DEB /* regexp-macro-assembler-tracer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-macro-assembler-tracer.h"; sourceTree = "<group>"; };
-		89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "regexp-macro-assembler.cc"; sourceTree = "<group>"; };
-		89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-macro-assembler.h"; sourceTree = "<group>"; };
-		89A15C910EE46A1700B48DEB /* d8-readline.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-readline.cc"; path = "../src/d8-readline.cc"; sourceTree = "<group>"; };
-		89A15C920EE46A1700B48DEB /* d8.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = d8.cc; path = ../src/d8.cc; sourceTree = "<group>"; };
-		89A15C930EE46A1700B48DEB /* d8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = d8.h; path = ../src/d8.h; sourceTree = "<group>"; };
-		89A15C940EE46A1700B48DEB /* d8.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; name = d8.js; path = ../src/d8.js; sourceTree = "<group>"; };
-		89B12E8D0E7FF2A40080BA62 /* presubmit.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = presubmit.py; sourceTree = "<group>"; };
-		89B91B7B12D4EF95002FF4BC /* assembler-x64-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-x64-inl.h"; path = "x64/assembler-x64-inl.h"; sourceTree = "<group>"; };
-		89B91B7C12D4EF95002FF4BC /* assembler-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "assembler-x64.cc"; path = "x64/assembler-x64.cc"; sourceTree = "<group>"; };
-		89B91B7D12D4EF95002FF4BC /* assembler-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-x64.h"; path = "x64/assembler-x64.h"; sourceTree = "<group>"; };
-		89B91B7E12D4EF95002FF4BC /* builtins-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "builtins-x64.cc"; path = "x64/builtins-x64.cc"; sourceTree = "<group>"; };
-		89B91B7F12D4EF95002FF4BC /* code-stubs-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-x64.cc"; path = "x64/code-stubs-x64.cc"; sourceTree = "<group>"; };
-		89B91B8012D4EF95002FF4BC /* code-stubs-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-x64.h"; path = "x64/code-stubs-x64.h"; sourceTree = "<group>"; };
-		89B91B8212D4EF95002FF4BC /* codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "codegen-x64.cc"; path = "x64/codegen-x64.cc"; sourceTree = "<group>"; };
-		89B91B8312D4EF95002FF4BC /* codegen-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-x64.h"; path = "x64/codegen-x64.h"; sourceTree = "<group>"; };
-		89B91B8412D4EF95002FF4BC /* cpu-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "cpu-x64.cc"; path = "x64/cpu-x64.cc"; sourceTree = "<group>"; };
-		89B91B8512D4EF95002FF4BC /* debug-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-x64.cc"; path = "x64/debug-x64.cc"; sourceTree = "<group>"; };
-		89B91B8612D4EF95002FF4BC /* deoptimizer-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "deoptimizer-x64.cc"; path = "x64/deoptimizer-x64.cc"; sourceTree = "<group>"; };
-		89B91B8712D4EF95002FF4BC /* disasm-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "disasm-x64.cc"; path = "x64/disasm-x64.cc"; sourceTree = "<group>"; };
-		89B91B8812D4EF95002FF4BC /* frames-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "frames-x64.cc"; path = "x64/frames-x64.cc"; sourceTree = "<group>"; };
-		89B91B8912D4EF95002FF4BC /* frames-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "frames-x64.h"; path = "x64/frames-x64.h"; sourceTree = "<group>"; };
-		89B91B8A12D4EF95002FF4BC /* full-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-x64.cc"; path = "x64/full-codegen-x64.cc"; sourceTree = "<group>"; };
-		89B91B8B12D4EF95002FF4BC /* ic-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "ic-x64.cc"; path = "x64/ic-x64.cc"; sourceTree = "<group>"; };
-		89B91B8D12D4EF95002FF4BC /* lithium-codegen-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-codegen-x64.h"; path = "x64/lithium-codegen-x64.h"; sourceTree = "<group>"; };
-		89B91B8E12D4EF95002FF4BC /* lithium-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-x64.h"; path = "x64/lithium-x64.h"; sourceTree = "<group>"; };
-		89B91B8F12D4EF95002FF4BC /* macro-assembler-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "macro-assembler-x64.cc"; path = "x64/macro-assembler-x64.cc"; sourceTree = "<group>"; };
-		89B91B9012D4EF95002FF4BC /* macro-assembler-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "macro-assembler-x64.h"; path = "x64/macro-assembler-x64.h"; sourceTree = "<group>"; };
-		89B91B9112D4EF95002FF4BC /* regexp-macro-assembler-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "regexp-macro-assembler-x64.cc"; path = "x64/regexp-macro-assembler-x64.cc"; sourceTree = "<group>"; };
-		89B91B9212D4EF95002FF4BC /* regexp-macro-assembler-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "regexp-macro-assembler-x64.h"; path = "x64/regexp-macro-assembler-x64.h"; sourceTree = "<group>"; };
-		89B91B9612D4EF95002FF4BC /* simulator-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "simulator-x64.cc"; path = "x64/simulator-x64.cc"; sourceTree = "<group>"; };
-		89B91B9712D4EF95002FF4BC /* simulator-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "simulator-x64.h"; path = "x64/simulator-x64.h"; sourceTree = "<group>"; };
-		89B91B9812D4EF95002FF4BC /* stub-cache-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "stub-cache-x64.cc"; path = "x64/stub-cache-x64.cc"; sourceTree = "<group>"; };
-		89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
-		89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
-		89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-ia32.cc"; path = "ia32/lithium-gap-resolver-ia32.cc"; sourceTree = "<group>"; };
-		89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-ia32.h"; path = "ia32/lithium-gap-resolver-ia32.h"; sourceTree = "<group>"; };
-		89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "gdb-jit.cc"; sourceTree = "<group>"; };
-		89D7DDD712E8DE09001E2B82 /* gdb-jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gdb-jit.h"; sourceTree = "<group>"; };
-		89D7DDD812E8DE09001E2B82 /* inspector.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = inspector.cc; sourceTree = "<group>"; };
-		89D7DDD912E8DE09001E2B82 /* inspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inspector.h; sourceTree = "<group>"; };
-		89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
-		89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
-		89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; };
-		89FB0E360F8E531900B04B3C /* d8-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-posix.cc"; path = "../src/d8-posix.cc"; sourceTree = "<group>"; };
-		89FB0E370F8E531900B04B3C /* d8-windows.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-windows.cc"; path = "../src/d8-windows.cc"; sourceTree = "<group>"; };
-		89FE7C0513532165008662BD /* date.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = date.js; sourceTree = "<group>"; };
-		89FE7C0613532165008662BD /* debug-debugger.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "debug-debugger.js"; sourceTree = "<group>"; };
-		89FE7C0713532165008662BD /* liveedit-debugger.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "liveedit-debugger.js"; sourceTree = "<group>"; };
-		89FE7C0813532165008662BD /* mirror-debugger.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "mirror-debugger.js"; sourceTree = "<group>"; };
-		89FE7C0913532165008662BD /* regexp.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = regexp.js; sourceTree = "<group>"; };
-		9C1F8E1D133906180068B362 /* small-pointer-list.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "small-pointer-list.h"; sourceTree = "<group>"; };
-		9C76176D133FB7740057370B /* platform-tls-mac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "platform-tls-mac.h"; sourceTree = "<group>"; };
-		9C8E8061133CF772004058A5 /* platform-tls.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "platform-tls.h"; sourceTree = "<group>"; };
-		9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "heap-profiler.cc"; sourceTree = "<group>"; };
-		9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "heap-profiler.h"; sourceTree = "<group>"; };
-		9F2B370E114FF62D007CDAF4 /* circular-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "circular-queue-inl.h"; sourceTree = "<group>"; };
-		9F2B370F114FF62D007CDAF4 /* circular-queue.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "circular-queue.cc"; sourceTree = "<group>"; };
-		9F2B3710114FF62D007CDAF4 /* circular-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "circular-queue.h"; sourceTree = "<group>"; };
-		9F2B37231152CEA0007CDAF4 /* cpu-profiler-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cpu-profiler-inl.h"; sourceTree = "<group>"; };
-		9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "cpu-profiler.cc"; sourceTree = "<group>"; };
-		9F2B37251152CEA0007CDAF4 /* cpu-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cpu-profiler.h"; sourceTree = "<group>"; };
-		9F4B7B870FCC877A00DC4117 /* log-utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "log-utils.cc"; sourceTree = "<group>"; };
-		9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
-		9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "profile-generator-inl.h"; sourceTree = "<group>"; };
-		9F73E3AF114E61A100F84A5A /* profile-generator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "profile-generator.cc"; sourceTree = "<group>"; };
-		9F73E3B0114E61A100F84A5A /* profile-generator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "profile-generator.h"; sourceTree = "<group>"; };
-		9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "func-name-inferrer.cc"; sourceTree = "<group>"; };
-		9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
-		9FA36F62116BA26500C4CD55 /* v8-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-profiler.h"; sourceTree = "<group>"; };
-		9FA37332116DD9F000C4CD55 /* vm-state-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state-inl.h"; sourceTree = "<group>"; };
-		9FA37334116DD9F000C4CD55 /* vm-state.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state.h"; sourceTree = "<group>"; };
-		9FA38B9B1175B2D200C4CD55 /* cached-powers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cached-powers.h"; sourceTree = "<group>"; };
-		9FA38B9C1175B2D200C4CD55 /* data-flow.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "data-flow.cc"; sourceTree = "<group>"; };
-		9FA38B9D1175B2D200C4CD55 /* data-flow.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "data-flow.h"; sourceTree = "<group>"; };
-		9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "diy-fp.cc"; sourceTree = "<group>"; };
-		9FA38B9F1175B2D200C4CD55 /* diy-fp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "diy-fp.h"; sourceTree = "<group>"; };
-		9FA38BA01175B2D200C4CD55 /* double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = double.h; sourceTree = "<group>"; };
-		9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-dtoa.cc"; sourceTree = "<group>"; };
-		9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-dtoa.h"; sourceTree = "<group>"; };
-		9FA38BA51175B2D200C4CD55 /* full-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "full-codegen.cc"; sourceTree = "<group>"; };
-		9FA38BA61175B2D200C4CD55 /* full-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "full-codegen.h"; sourceTree = "<group>"; };
-		9FA38BA91175B2D200C4CD55 /* liveedit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = liveedit.cc; sourceTree = "<group>"; };
-		9FA38BAA1175B2D200C4CD55 /* liveedit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = liveedit.h; sourceTree = "<group>"; };
-		9FA38BAC1175B2D200C4CD55 /* splay-tree-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "splay-tree-inl.h"; sourceTree = "<group>"; };
-		9FA38BAD1175B2D200C4CD55 /* splay-tree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "splay-tree.h"; sourceTree = "<group>"; };
-		9FA38BAE1175B2D200C4CD55 /* type-info.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "type-info.cc"; sourceTree = "<group>"; };
-		9FA38BAF1175B2D200C4CD55 /* type-info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "type-info.h"; sourceTree = "<group>"; };
-		9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-ia32.cc"; path = "ia32/full-codegen-ia32.cc"; sourceTree = "<group>"; };
-		9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-arm.cc"; path = "arm/full-codegen-arm.cc"; sourceTree = "<group>"; };
-		9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
-		9FF7A28311A642EA0051B8F2 /* unbound-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue.h"; sourceTree = "<group>"; };
-		C2BD4BD5120165460046BF9F /* dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dtoa.cc; sourceTree = "<group>"; };
-		C2BD4BD6120165460046BF9F /* dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dtoa.h; sourceTree = "<group>"; };
-		C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fixed-dtoa.cc"; sourceTree = "<group>"; };
-		C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fixed-dtoa.h"; sourceTree = "<group>"; };
-		C2D1E9711212F27B00187A52 /* objects-visiting.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-visiting.cc"; sourceTree = "<group>"; };
-		C2D1E9721212F27B00187A52 /* objects-visiting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-visiting.h"; sourceTree = "<group>"; };
-		C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-arm.cc"; path = "arm/code-stubs-arm.cc"; sourceTree = "<group>"; };
-		C68081AC1225120B001EAFE4 /* code-stubs-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-arm.h"; path = "arm/code-stubs-arm.h"; sourceTree = "<group>"; };
-		C68081B012251239001EAFE4 /* code-stubs-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-ia32.cc"; path = "ia32/code-stubs-ia32.cc"; sourceTree = "<group>"; };
-		C68081B412251257001EAFE4 /* code-stubs-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-ia32.h"; path = "ia32/code-stubs-ia32.h"; sourceTree = "<group>"; };
-/* End PBXFileReference section */
-
-/* Begin PBXFrameworksBuildPhase section */
-		893988050F2A35FA007D5254 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				893988070F2A35FA007D5254 /* libv8.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		895692A612D4ED240072C313 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		8970F2EE0E719FB2006AE7B5 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		897C77D412B68E3D000767A8 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				897C77DD12B68E6E000767A8 /* libv8-arm.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		897F76780E71B4CC007ACF34 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89B91BB912D4F02A002FF4BC /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				89B91BFA12D4F1AA002FF4BC /* libv8-x64.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89B91BC912D4F02A002FF4BC /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89F23C830E78D5B2006B2466 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89F23C8F0E78D5B6006B2466 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXFrameworksBuildPhase section */
-
-/* Begin PBXGroup section */
-		8915B8660E719336009C4E19 = {
-			isa = PBXGroup;
-			children = (
-				897FF1C00E719CB600D62E90 /* Products */,
-				897FF0CF0E71996900D62E90 /* v8 */,
-			);
-			sourceTree = "<group>";
-		};
-		893E24E212B14BD20083370F /* C++ */ = {
-			isa = PBXGroup;
-			children = (
-				89B91C0C12D4F439002FF4BC /* arm */,
-				89B91C0312D4F275002FF4BC /* ia32 */,
-				89B91B7A12D4EF65002FF4BC /* x64 */,
-				897FF0F60E719B8F00D62E90 /* accessors.cc */,
-				897FF0F70E719B8F00D62E90 /* accessors.h */,
-				895D5B4C1334210400254083 /* allocation-inl.h */,
-				897FF0F80E719B8F00D62E90 /* allocation.cc */,
-				897FF0F90E719B8F00D62E90 /* allocation.h */,
-				897FF0FA0E719B8F00D62E90 /* api.cc */,
-				897FF0FB0E719B8F00D62E90 /* api.h */,
-				893986D40F29020C007D5254 /* apiutils.h */,
-				897FF0FC0E719B8F00D62E90 /* arguments.h */,
-				897FF1030E719B8F00D62E90 /* assembler.cc */,
-				897FF1040E719B8F00D62E90 /* assembler.h */,
-				893E248412B14B3D0083370F /* ast-inl.h */,
-				897FF1050E719B8F00D62E90 /* ast.cc */,
-				897FF1060E719B8F00D62E90 /* ast.h */,
-				893E248512B14B3D0083370F /* atomicops.h */,
-				893E24CF12B14B780083370F /* atomicops_internals_x86_macosx.h */,
-				893E248612B14B3D0083370F /* bignum-dtoa.cc */,
-				893E248712B14B3D0083370F /* bignum-dtoa.h */,
-				893E248812B14B3D0083370F /* bignum.cc */,
-				893E248912B14B3D0083370F /* bignum.h */,
-				897FF1070E719B8F00D62E90 /* bootstrapper.cc */,
-				897FF1080E719B8F00D62E90 /* bootstrapper.h */,
-				897FF10B0E719B8F00D62E90 /* builtins.cc */,
-				897FF10C0E719B8F00D62E90 /* builtins.h */,
-				89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */,
-				893E248A12B14B3D0083370F /* cached-powers.cc */,
-				9FA38B9B1175B2D200C4CD55 /* cached-powers.h */,
-				897FF10D0E719B8F00D62E90 /* char-predicates-inl.h */,
-				897FF10E0E719B8F00D62E90 /* char-predicates.h */,
-				897FF10F0E719B8F00D62E90 /* checks.cc */,
-				897FF1100E719B8F00D62E90 /* checks.h */,
-				9F2B370E114FF62D007CDAF4 /* circular-queue-inl.h */,
-				9F2B370F114FF62D007CDAF4 /* circular-queue.cc */,
-				9F2B3710114FF62D007CDAF4 /* circular-queue.h */,
-				897FF1110E719B8F00D62E90 /* code-stubs.cc */,
-				897FF1120E719B8F00D62E90 /* code-stubs.h */,
-				897FF1130E719B8F00D62E90 /* code.h */,
-				897FF1170E719B8F00D62E90 /* codegen.cc */,
-				897FF1180E719B8F00D62E90 /* codegen.h */,
-				89495E460E79FC23001F68C3 /* compilation-cache.cc */,
-				89495E470E79FC23001F68C3 /* compilation-cache.h */,
-				897FF1190E719B8F00D62E90 /* compiler.cc */,
-				897FF11A0E719B8F00D62E90 /* compiler.h */,
-				897FF11C0E719B8F00D62E90 /* contexts.cc */,
-				897FF11D0E719B8F00D62E90 /* contexts.h */,
-				897FF11E0E719B8F00D62E90 /* conversions-inl.h */,
-				897FF11F0E719B8F00D62E90 /* conversions.cc */,
-				897FF1200E719B8F00D62E90 /* conversions.h */,
-				897FF1210E719B8F00D62E90 /* counters.cc */,
-				897FF1220E719B8F00D62E90 /* counters.h */,
-				897FF1230E719B8F00D62E90 /* cpu-arm.cc */,
-				9F2B37231152CEA0007CDAF4 /* cpu-profiler-inl.h */,
-				9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */,
-				9F2B37251152CEA0007CDAF4 /* cpu-profiler.h */,
-				897FF1250E719B8F00D62E90 /* cpu.h */,
-				9FA38B9C1175B2D200C4CD55 /* data-flow.cc */,
-				9FA38B9D1175B2D200C4CD55 /* data-flow.h */,
-				893A722A0F7B4A3200303DD2 /* dateparser-inl.h */,
-				897FF1260E719B8F00D62E90 /* dateparser.cc */,
-				897FF1270E719B8F00D62E90 /* dateparser.h */,
-				8956B6CD0F5D86570033B5A2 /* debug-agent.cc */,
-				8956B6CE0F5D86570033B5A2 /* debug-agent.h */,
-				897FF1280E719B8F00D62E90 /* debug.cc */,
-				897FF1290E719B8F00D62E90 /* debug.h */,
-				893E248B12B14B3D0083370F /* deoptimizer.cc */,
-				893E248C12B14B3D0083370F /* deoptimizer.h */,
-				897FF12A0E719B8F00D62E90 /* disasm-arm.cc */,
-				897FF12C0E719B8F00D62E90 /* disasm.h */,
-				897FF12D0E719B8F00D62E90 /* disassembler.cc */,
-				897FF12E0E719B8F00D62E90 /* disassembler.h */,
-				9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */,
-				9FA38B9F1175B2D200C4CD55 /* diy-fp.h */,
-				9FA38BA01175B2D200C4CD55 /* double.h */,
-				C2BD4BD5120165460046BF9F /* dtoa.cc */,
-				C2BD4BD6120165460046BF9F /* dtoa.h */,
-				897FF1300E719B8F00D62E90 /* execution.cc */,
-				897FF1310E719B8F00D62E90 /* execution.h */,
-				893E24D812B14B9F0083370F /* externalize-string-extension.cc */,
-				893E24D912B14B9F0083370F /* externalize-string-extension.h */,
-				897FF1320E719B8F00D62E90 /* factory.cc */,
-				897FF1330E719B8F00D62E90 /* factory.h */,
-				9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */,
-				9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */,
-				C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */,
-				C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */,
-				89471C7F0EB23EE400B6874B /* flag-definitions.h */,
-				897FF1350E719B8F00D62E90 /* flags.cc */,
-				897FF1360E719B8F00D62E90 /* flags.h */,
-				8981F5FE1010500F00D1520E /* frame-element.cc */,
-				8981F5FF1010500F00D1520E /* frame-element.h */,
-				897FF1370E719B8F00D62E90 /* frames-arm.cc */,
-				897FF1380E719B8F00D62E90 /* frames-arm.h */,
-				897FF13B0E719B8F00D62E90 /* frames-inl.h */,
-				897FF13C0E719B8F00D62E90 /* frames.cc */,
-				897FF13D0E719B8F00D62E90 /* frames.h */,
-				9FA38BA51175B2D200C4CD55 /* full-codegen.cc */,
-				9FA38BA61175B2D200C4CD55 /* full-codegen.h */,
-				9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */,
-				9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */,
-				893E24DA12B14B9F0083370F /* gc-extension.cc */,
-				893E24DB12B14B9F0083370F /* gc-extension.h */,
-				89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */,
-				89D7DDD712E8DE09001E2B82 /* gdb-jit.h */,
-				897FF13E0E719B8F00D62E90 /* global-handles.cc */,
-				897FF13F0E719B8F00D62E90 /* global-handles.h */,
-				897FF1400E719B8F00D62E90 /* globals.h */,
-				897FF1410E719B8F00D62E90 /* handles-inl.h */,
-				897FF1420E719B8F00D62E90 /* handles.cc */,
-				897FF1430E719B8F00D62E90 /* handles.h */,
-				897FF1440E719B8F00D62E90 /* hashmap.cc */,
-				897FF1450E719B8F00D62E90 /* hashmap.h */,
-				897FF1460E719B8F00D62E90 /* heap-inl.h */,
-				9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */,
-				9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */,
-				897FF1470E719B8F00D62E90 /* heap.cc */,
-				897FF1480E719B8F00D62E90 /* heap.h */,
-				893E248D12B14B3D0083370F /* hydrogen-instructions.cc */,
-				893E248E12B14B3D0083370F /* hydrogen-instructions.h */,
-				893E248F12B14B3D0083370F /* hydrogen.cc */,
-				893E249012B14B3D0083370F /* hydrogen.h */,
-				897FF14B0E719B8F00D62E90 /* ic-inl.h */,
-				897FF14C0E719B8F00D62E90 /* ic.cc */,
-				897FF14D0E719B8F00D62E90 /* ic.h */,
-				89D7DDD812E8DE09001E2B82 /* inspector.cc */,
-				89D7DDD912E8DE09001E2B82 /* inspector.h */,
-				89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */,
-				89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
-				895D5B521334212D00254083 /* isolate.cc */,
-				891C92FE133422EB00FF4757 /* isolate.h */,
-				897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
-				897FF14F0E719B8F00D62E90 /* jsregexp.h */,
-				897FF1500E719B8F00D62E90 /* list-inl.h */,
-				897FF1510E719B8F00D62E90 /* list.h */,
-				891C92FD1334226000FF4757 /* lithium-allocator-inl.h */,
-				893E249312B14B3D0083370F /* lithium-allocator.cc */,
-				893E249412B14B3D0083370F /* lithium-allocator.h */,
-				894A59E712D777E80000766D /* lithium.cc */,
-				894A59E812D777E80000766D /* lithium.h */,
-				9FA38BA91175B2D200C4CD55 /* liveedit.cc */,
-				9FA38BAA1175B2D200C4CD55 /* liveedit.h */,
-				22A76C900FF259E600FDC694 /* log-inl.h */,
-				9F4B7B870FCC877A00DC4117 /* log-utils.cc */,
-				9F4B7B880FCC877A00DC4117 /* log-utils.h */,
-				897FF1520E719B8F00D62E90 /* log.cc */,
-				897FF1530E719B8F00D62E90 /* log.h */,
-				897FF1580E719B8F00D62E90 /* macro-assembler.h */,
-				897FF1590E719B8F00D62E90 /* mark-compact.cc */,
-				897FF15A0E719B8F00D62E90 /* mark-compact.h */,
-				897FF15B0E719B8F00D62E90 /* v8memory.h */,
-				897FF15C0E719B8F00D62E90 /* messages.cc */,
-				897FF15D0E719B8F00D62E90 /* messages.h */,
-				897FF15E0E719B8F00D62E90 /* mksnapshot.cc */,
-				897FF15F0E719B8F00D62E90 /* natives.h */,
-				897FF1600E719B8F00D62E90 /* objects-debug.cc */,
-				897FF1610E719B8F00D62E90 /* objects-inl.h */,
-				8946827412C26EB700C914BC /* objects-printer.cc */,
-				C2D1E9711212F27B00187A52 /* objects-visiting.cc */,
-				C2D1E9721212F27B00187A52 /* objects-visiting.h */,
-				897FF1620E719B8F00D62E90 /* objects.cc */,
-				897FF1630E719B8F00D62E90 /* objects.h */,
-				897FF1640E719B8F00D62E90 /* parser.cc */,
-				897FF1650E719B8F00D62E90 /* parser.h */,
-				89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */,
-				897FF1660E719B8F00D62E90 /* platform-linux.cc */,
-				897FF1670E719B8F00D62E90 /* platform-macos.cc */,
-				897FF1680E719B8F00D62E90 /* platform-nullos.cc */,
-				893A72230F7B0FF200303DD2 /* platform-posix.cc */,
-				9C76176D133FB7740057370B /* platform-tls-mac.h */,
-				9C8E8061133CF772004058A5 /* platform-tls.h */,
-				897FF1690E719B8F00D62E90 /* platform-win32.cc */,
-				897FF16A0E719B8F00D62E90 /* platform.h */,
-				893E249512B14B3D0083370F /* preparse-data.cc */,
-				893E249612B14B3D0083370F /* preparse-data.h */,
-				893E249812B14B3D0083370F /* preparser.cc */,
-				893E249912B14B3D0083370F /* preparser.h */,
-				897FF16B0E719B8F00D62E90 /* prettyprinter.cc */,
-				897FF16C0E719B8F00D62E90 /* prettyprinter.h */,
-				9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */,
-				9F73E3AF114E61A100F84A5A /* profile-generator.cc */,
-				9F73E3B0114E61A100F84A5A /* profile-generator.h */,
-				897FF16D0E719B8F00D62E90 /* property.cc */,
-				897FF16E0E719B8F00D62E90 /* property.h */,
-				89A15C740EE466D000B48DEB /* regexp-macro-assembler-irregexp-inl.h */,
-				89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */,
-				89A15C760EE466D000B48DEB /* regexp-macro-assembler-irregexp.h */,
-				89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */,
-				89A15C780EE466D000B48DEB /* regexp-macro-assembler-tracer.h */,
-				89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */,
-				89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
-				8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
-				8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
-				897FF16F0E719B8F00D62E90 /* rewriter.cc */,
-				897FF1700E719B8F00D62E90 /* rewriter.h */,
-				893E249A12B14B3D0083370F /* runtime-profiler.cc */,
-				893E249B12B14B3D0083370F /* runtime-profiler.h */,
-				897FF1710E719B8F00D62E90 /* runtime.cc */,
-				897FF1720E719B8F00D62E90 /* runtime.h */,
-				893E249C12B14B3D0083370F /* safepoint-table.cc */,
-				893E249D12B14B3D0083370F /* safepoint-table.h */,
-				893E249E12B14B3D0083370F /* scanner-base.cc */,
-				893E249F12B14B3D0083370F /* scanner-base.h */,
-				897FF1730E719B8F00D62E90 /* scanner.cc */,
-				897FF1740E719B8F00D62E90 /* scanner.h */,
-				897FF1750E719B8F00D62E90 /* SConscript */,
-				897FF1760E719B8F00D62E90 /* scopeinfo.cc */,
-				897FF1770E719B8F00D62E90 /* scopeinfo.h */,
-				897FF1780E719B8F00D62E90 /* scopes.cc */,
-				897FF1790E719B8F00D62E90 /* scopes.h */,
-				897FF17A0E719B8F00D62E90 /* serialize.cc */,
-				897FF17B0E719B8F00D62E90 /* serialize.h */,
-				897FF17C0E719B8F00D62E90 /* shell.h */,
-				893E24A012B14B3D0083370F /* simulator.h */,
-				9C1F8E1D133906180068B362 /* small-pointer-list.h */,
-				897FF1810E719B8F00D62E90 /* smart-pointer.h */,
-				897FF1820E719B8F00D62E90 /* snapshot-common.cc */,
-				897FF1830E719B8F00D62E90 /* snapshot-empty.cc */,
-				897FF1840E719B8F00D62E90 /* snapshot.h */,
-				897FF1850E719B8F00D62E90 /* spaces-inl.h */,
-				897FF1860E719B8F00D62E90 /* spaces.cc */,
-				897FF1870E719B8F00D62E90 /* spaces.h */,
-				9FA38BAC1175B2D200C4CD55 /* splay-tree-inl.h */,
-				9FA38BAD1175B2D200C4CD55 /* splay-tree.h */,
-				893E24A112B14B3D0083370F /* string-search.cc */,
-				893E24A212B14B3D0083370F /* string-search.h */,
-				897FF1880E719B8F00D62E90 /* string-stream.cc */,
-				897FF1890E719B8F00D62E90 /* string-stream.h */,
-				893E24A312B14B3D0083370F /* strtod.cc */,
-				893E24A412B14B3D0083370F /* strtod.h */,
-				897FF18C0E719B8F00D62E90 /* stub-cache.cc */,
-				897FF18D0E719B8F00D62E90 /* stub-cache.h */,
-				897FF18E0E719B8F00D62E90 /* token.cc */,
-				897FF18F0E719B8F00D62E90 /* token.h */,
-				897FF1900E719B8F00D62E90 /* top.cc */,
-				9FA38BAE1175B2D200C4CD55 /* type-info.cc */,
-				9FA38BAF1175B2D200C4CD55 /* type-info.h */,
-				9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */,
-				9FF7A28311A642EA0051B8F2 /* unbound-queue.h */,
-				897FF1920E719B8F00D62E90 /* unicode-inl.h */,
-				897FF1930E719B8F00D62E90 /* unicode.cc */,
-				897FF1940E719B8F00D62E90 /* unicode.h */,
-				897FF1970E719B8F00D62E90 /* utils.cc */,
-				897FF1980E719B8F00D62E90 /* utils.h */,
-				897FF1990E719B8F00D62E90 /* v8-counters.cc */,
-				897FF19A0E719B8F00D62E90 /* v8-counters.h */,
-				897FF19B0E719B8F00D62E90 /* v8.cc */,
-				897FF19C0E719B8F00D62E90 /* v8.h */,
-				893E24A512B14B3D0083370F /* v8checks.h */,
-				893E24A612B14B3D0083370F /* v8globals.h */,
-				897FF19D0E719B8F00D62E90 /* v8threads.cc */,
-				897FF19E0E719B8F00D62E90 /* v8threads.h */,
-				893E24A712B14B3D0083370F /* v8utils.h */,
-				897FF19F0E719B8F00D62E90 /* variables.cc */,
-				897FF1A00E719B8F00D62E90 /* variables.h */,
-				897FF32F0FAA0ED200136CF6 /* version.cc */,
-				897FF3300FAA0ED200136CF6 /* version.h */,
-				9FA37332116DD9F000C4CD55 /* vm-state-inl.h */,
-				9FA37334116DD9F000C4CD55 /* vm-state.h */,
-				897FF1A10E719B8F00D62E90 /* zone-inl.h */,
-				897FF1A20E719B8F00D62E90 /* zone.cc */,
-				897FF1A30E719B8F00D62E90 /* zone.h */,
-			);
-			name = "C++";
-			sourceTree = "<group>";
-		};
-		897FF0CF0E71996900D62E90 /* v8 */ = {
-			isa = PBXGroup;
-			children = (
-				897FF0D10E71999E00D62E90 /* include */,
-				897FF1B30E719BCE00D62E90 /* samples */,
-				897FF0D00E71999800D62E90 /* src */,
-				897FF1B40E719BE800D62E90 /* tools */,
-			);
-			name = v8;
-			path = ..;
-			sourceTree = "<group>";
-		};
-		897FF0D00E71999800D62E90 /* src */ = {
-			isa = PBXGroup;
-			children = (
-				893E24E212B14BD20083370F /* C++ */,
-				89A9C1630E71C8E300BE6CCA /* generated */,
-				897FF0D80E719ABA00D62E90 /* js */,
-			);
-			path = src;
-			sourceTree = "<group>";
-		};
-		897FF0D10E71999E00D62E90 /* include */ = {
-			isa = PBXGroup;
-			children = (
-				893E248112B14AD40083370F /* v8-preparser.h */,
-				893E248212B14AD40083370F /* v8-testing.h */,
-				893E248312B14AD40083370F /* v8stdint.h */,
-				897FF0D40E719A8500D62E90 /* v8-debug.h */,
-				9FA36F62116BA26500C4CD55 /* v8-profiler.h */,
-				897FF0D50E719A8500D62E90 /* v8.h */,
-			);
-			path = include;
-			sourceTree = "<group>";
-		};
-		897FF0D80E719ABA00D62E90 /* js */ = {
-			isa = PBXGroup;
-			children = (
-				89FE7C0513532165008662BD /* date.js */,
-				89FE7C0613532165008662BD /* debug-debugger.js */,
-				89FE7C0713532165008662BD /* liveedit-debugger.js */,
-				89FE7C0813532165008662BD /* mirror-debugger.js */,
-				89FE7C0913532165008662BD /* regexp.js */,
-				897FF1A60E719BC100D62E90 /* apinatives.js */,
-				897FF1A70E719BC100D62E90 /* array.js */,
-				897FF1AA0E719BC100D62E90 /* math.js */,
-				897FF1AB0E719BC100D62E90 /* messages.js */,
-				897FF1AE0E719BC100D62E90 /* runtime.js */,
-				897FF1AF0E719BC100D62E90 /* string.js */,
-				897FF1B00E719BC100D62E90 /* uri.js */,
-				897FF1B10E719BC100D62E90 /* v8natives.js */,
-			);
-			name = js;
-			sourceTree = "<group>";
-		};
-		897FF1B30E719BCE00D62E90 /* samples */ = {
-			isa = PBXGroup;
-			children = (
-				893988150F2A3686007D5254 /* d8-debug.cc */,
-				893A72320F7B4AD700303DD2 /* d8-debug.h */,
-				89FB0E360F8E531900B04B3C /* d8-posix.cc */,
-				89A15C910EE46A1700B48DEB /* d8-readline.cc */,
-				89FB0E370F8E531900B04B3C /* d8-windows.cc */,
-				89A15C920EE46A1700B48DEB /* d8.cc */,
-				89A15C930EE46A1700B48DEB /* d8.h */,
-				89A15C940EE46A1700B48DEB /* d8.js */,
-				897FF1B50E719C0900D62E90 /* shell.cc */,
-			);
-			path = samples;
-			sourceTree = "<group>";
-		};
-		897FF1B40E719BE800D62E90 /* tools */ = {
-			isa = PBXGroup;
-			children = (
-				897FF1B60E719C2300D62E90 /* js2c.py */,
-				897FF1B70E719C2E00D62E90 /* macros.py */,
-				89B12E8D0E7FF2A40080BA62 /* presubmit.py */,
-			);
-			path = tools;
-			sourceTree = "<group>";
-		};
-		897FF1C00E719CB600D62E90 /* Products */ = {
-			isa = PBXGroup;
-			children = (
-				897F767A0E71B4CC007ACF34 /* v8_shell */,
-				8939880B0F2A35FA007D5254 /* d8 */,
-				89F23C950E78D5B6006B2466 /* v8_shell-arm */,
-				89F23C870E78D5B2006B2466 /* libv8-arm.a */,
-				8970F2F00E719FB2006AE7B5 /* libv8.a */,
-				897C77D912B68E3D000767A8 /* d8-arm */,
-				895692AA12D4ED240072C313 /* libv8-x64.a */,
-				89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */,
-				89B91BCE12D4F02A002FF4BC /* d8-x64 */,
-			);
-			name = Products;
-			sourceTree = "<group>";
-		};
-		89A9C1630E71C8E300BE6CCA /* generated */ = {
-			isa = PBXGroup;
-			children = (
-				893988320F2A3B8B007D5254 /* d8-js.cc */,
-				8900116B0E71CA2300F91F35 /* libraries.cc */,
-			);
-			path = generated;
-			sourceTree = CONFIGURATION_TEMP_DIR;
-		};
-		89B91B7A12D4EF65002FF4BC /* x64 */ = {
-			isa = PBXGroup;
-			children = (
-				89B91B7B12D4EF95002FF4BC /* assembler-x64-inl.h */,
-				89B91B7C12D4EF95002FF4BC /* assembler-x64.cc */,
-				89B91B7D12D4EF95002FF4BC /* assembler-x64.h */,
-				89B91B7E12D4EF95002FF4BC /* builtins-x64.cc */,
-				89B91B7F12D4EF95002FF4BC /* code-stubs-x64.cc */,
-				89B91B8012D4EF95002FF4BC /* code-stubs-x64.h */,
-				89B91B8212D4EF95002FF4BC /* codegen-x64.cc */,
-				89B91B8312D4EF95002FF4BC /* codegen-x64.h */,
-				89B91B8412D4EF95002FF4BC /* cpu-x64.cc */,
-				89B91B8512D4EF95002FF4BC /* debug-x64.cc */,
-				89B91B8612D4EF95002FF4BC /* deoptimizer-x64.cc */,
-				89B91B8712D4EF95002FF4BC /* disasm-x64.cc */,
-				89B91B8812D4EF95002FF4BC /* frames-x64.cc */,
-				89B91B8912D4EF95002FF4BC /* frames-x64.h */,
-				89B91B8A12D4EF95002FF4BC /* full-codegen-x64.cc */,
-				89B91B8B12D4EF95002FF4BC /* ic-x64.cc */,
-				89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */,
-				89B91B8D12D4EF95002FF4BC /* lithium-codegen-x64.h */,
-				8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */,
-				8924315B12F8539900906AB2 /* lithium-gap-resolver-x64.h */,
-				8938A2A212D63B630080CDDE /* lithium-x64.cc */,
-				89B91B8E12D4EF95002FF4BC /* lithium-x64.h */,
-				89B91B8F12D4EF95002FF4BC /* macro-assembler-x64.cc */,
-				89B91B9012D4EF95002FF4BC /* macro-assembler-x64.h */,
-				89B91B9112D4EF95002FF4BC /* regexp-macro-assembler-x64.cc */,
-				89B91B9212D4EF95002FF4BC /* regexp-macro-assembler-x64.h */,
-				89B91B9612D4EF95002FF4BC /* simulator-x64.cc */,
-				89B91B9712D4EF95002FF4BC /* simulator-x64.h */,
-				89B91B9812D4EF95002FF4BC /* stub-cache-x64.cc */,
-			);
-			name = x64;
-			sourceTree = "<group>";
-		};
-		89B91C0312D4F275002FF4BC /* ia32 */ = {
-			isa = PBXGroup;
-			children = (
-				89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */,
-				89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */,
-				897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
-				897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
-				897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
-				897FF10A0E719B8F00D62E90 /* builtins-ia32.cc */,
-				C68081B012251239001EAFE4 /* code-stubs-ia32.cc */,
-				C68081B412251257001EAFE4 /* code-stubs-ia32.h */,
-				897FF1150E719B8F00D62E90 /* codegen-ia32.cc */,
-				8964482B0E9C00F700E7C516 /* codegen-ia32.h */,
-				897FF1240E719B8F00D62E90 /* cpu-ia32.cc */,
-				898BD20D0EF6CC850068B00A /* debug-ia32.cc */,
-				893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */,
-				897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */,
-				897FF1390E719B8F00D62E90 /* frames-ia32.cc */,
-				897FF13A0E719B8F00D62E90 /* frames-ia32.h */,
-				9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */,
-				897FF14A0E719B8F00D62E90 /* ic-ia32.cc */,
-				893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */,
-				893E24D212B14B8A0083370F /* lithium-codegen-ia32.h */,
-				893E24D312B14B8A0083370F /* lithium-ia32.cc */,
-				893E24D412B14B8A0083370F /* lithium-ia32.h */,
-				897FF1560E719B8F00D62E90 /* macro-assembler-ia32.cc */,
-				897FF1570E719B8F00D62E90 /* macro-assembler-ia32.h */,
-				89A15C720EE466D000B48DEB /* regexp-macro-assembler-ia32.cc */,
-				89A15C730EE466D000B48DEB /* regexp-macro-assembler-ia32.h */,
-				897FF17F0E719B8F00D62E90 /* simulator-ia32.cc */,
-				897FF1800E719B8F00D62E90 /* simulator-ia32.h */,
-				897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */,
-			);
-			name = ia32;
-			sourceTree = "<group>";
-		};
-		89B91C0C12D4F439002FF4BC /* arm */ = {
-			isa = PBXGroup;
-			children = (
-				897FF0FD0E719B8F00D62E90 /* assembler-arm-inl.h */,
-				897FF0FE0E719B8F00D62E90 /* assembler-arm.cc */,
-				897FF0FF0E719B8F00D62E90 /* assembler-arm.h */,
-				897FF1090E719B8F00D62E90 /* builtins-arm.cc */,
-				C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */,
-				C68081AC1225120B001EAFE4 /* code-stubs-arm.h */,
-				897FF1140E719B8F00D62E90 /* codegen-arm.cc */,
-				896448BC0E9D530500E7C516 /* codegen-arm.h */,
-				895FA748107FFE73006F39D4 /* constants-arm.cc */,
-				897FF11B0E719B8F00D62E90 /* constants-arm.h */,
-				898BD20C0EF6CC850068B00A /* debug-arm.cc */,
-				893E24C612B14B510083370F /* deoptimizer-arm.cc */,
-				9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */,
-				897FF1490E719B8F00D62E90 /* ic-arm.cc */,
-				893E24C712B14B510083370F /* lithium-arm.cc */,
-				893E24C812B14B510083370F /* lithium-arm.h */,
-				893E24C912B14B520083370F /* lithium-codegen-arm.cc */,
-				893E24CA12B14B520083370F /* lithium-codegen-arm.h */,
-				896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */,
-				896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */,
-				897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */,
-				897FF1550E719B8F00D62E90 /* macro-assembler-arm.h */,
-				89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */,
-				89A15C710EE466D000B48DEB /* regexp-macro-assembler-arm.h */,
-				897FF17D0E719B8F00D62E90 /* simulator-arm.cc */,
-				897FF17E0E719B8F00D62E90 /* simulator-arm.h */,
-				897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */,
-			);
-			name = arm;
-			sourceTree = "<group>";
-		};
-/* End PBXGroup section */
-
-/* Begin PBXNativeTarget section */
-		893987FE0F2A35FA007D5254 /* d8_shell */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 893988080F2A35FA007D5254 /* Build configuration list for PBXNativeTarget "d8_shell" */;
-			buildPhases = (
-				893988220F2A376C007D5254 /* ShellScript */,
-				893988030F2A35FA007D5254 /* Sources */,
-				893988050F2A35FA007D5254 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				893988010F2A35FA007D5254 /* PBXTargetDependency */,
-			);
-			name = d8_shell;
-			productName = v8_shell;
-			productReference = 8939880B0F2A35FA007D5254 /* d8 */;
-			productType = "com.apple.product-type.tool";
-		};
-		8956922712D4ED240072C313 /* v8-x64 */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 895692A712D4ED240072C313 /* Build configuration list for PBXNativeTarget "v8-x64" */;
-			buildPhases = (
-				8956922812D4ED240072C313 /* ShellScript */,
-				8956922912D4ED240072C313 /* Sources */,
-				895692A612D4ED240072C313 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = "v8-x64";
-			productName = v8;
-			productReference = 895692AA12D4ED240072C313 /* libv8-x64.a */;
-			productType = "com.apple.product-type.library.static";
-		};
-		8970F2EF0E719FB2006AE7B5 /* v8 */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 8970F2F70E719FC1006AE7B5 /* Build configuration list for PBXNativeTarget "v8" */;
-			buildPhases = (
-				89EA6FB50E71AA1F00F59E1B /* ShellScript */,
-				8970F2ED0E719FB2006AE7B5 /* Sources */,
-				8970F2EE0E719FB2006AE7B5 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = v8;
-			productName = v8;
-			productReference = 8970F2F00E719FB2006AE7B5 /* libv8.a */;
-			productType = "com.apple.product-type.library.static";
-		};
-		897C77CB12B68E3D000767A8 /* d8_shell-arm */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 897C77D612B68E3D000767A8 /* Build configuration list for PBXNativeTarget "d8_shell-arm" */;
-			buildPhases = (
-				897C77CE12B68E3D000767A8 /* ShellScript */,
-				897C77CF12B68E3D000767A8 /* Sources */,
-				897C77D412B68E3D000767A8 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				897C77DC12B68E5D000767A8 /* PBXTargetDependency */,
-			);
-			name = "d8_shell-arm";
-			productName = v8_shell;
-			productReference = 897C77D912B68E3D000767A8 /* d8-arm */;
-			productType = "com.apple.product-type.tool";
-		};
-		897F76790E71B4CC007ACF34 /* v8_shell */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 897F767E0E71B4EA007ACF34 /* Build configuration list for PBXNativeTarget "v8_shell" */;
-			buildPhases = (
-				897F76770E71B4CC007ACF34 /* Sources */,
-				897F76780E71B4CC007ACF34 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				897F76830E71B6AC007ACF34 /* PBXTargetDependency */,
-			);
-			name = v8_shell;
-			productName = v8_shell;
-			productReference = 897F767A0E71B4CC007ACF34 /* v8_shell */;
-			productType = "com.apple.product-type.tool";
-		};
-		89B91BB412D4F02A002FF4BC /* v8_shell-x64 */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 89B91BBB12D4F02A002FF4BC /* Build configuration list for PBXNativeTarget "v8_shell-x64" */;
-			buildPhases = (
-				89B91BB712D4F02A002FF4BC /* Sources */,
-				89B91BB912D4F02A002FF4BC /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				89B91BD112D4F036002FF4BC /* PBXTargetDependency */,
-			);
-			name = "v8_shell-x64";
-			productName = "v8_shell-arm";
-			productReference = 89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */;
-			productType = "com.apple.product-type.tool";
-		};
-		89B91BC012D4F02A002FF4BC /* d8_shell-x64 */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 89B91BCB12D4F02A002FF4BC /* Build configuration list for PBXNativeTarget "d8_shell-x64" */;
-			buildPhases = (
-				89B91BC312D4F02A002FF4BC /* ShellScript */,
-				89B91BC412D4F02A002FF4BC /* Sources */,
-				89B91BC912D4F02A002FF4BC /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				89B91BFD12D4F1BF002FF4BC /* PBXTargetDependency */,
-			);
-			name = "d8_shell-x64";
-			productName = v8_shell;
-			productReference = 89B91BCE12D4F02A002FF4BC /* d8-x64 */;
-			productType = "com.apple.product-type.tool";
-		};
-		89F23C3C0E78D5B2006B2466 /* v8-arm */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 89F23C840E78D5B2006B2466 /* Build configuration list for PBXNativeTarget "v8-arm" */;
-			buildPhases = (
-				89F23C3D0E78D5B2006B2466 /* ShellScript */,
-				89F23C3E0E78D5B2006B2466 /* Sources */,
-				89F23C830E78D5B2006B2466 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = "v8-arm";
-			productName = "v8-arm";
-			productReference = 89F23C870E78D5B2006B2466 /* libv8-arm.a */;
-			productType = "com.apple.product-type.library.static";
-		};
-		89F23C880E78D5B6006B2466 /* v8_shell-arm */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 89F23C920E78D5B6006B2466 /* Build configuration list for PBXNativeTarget "v8_shell-arm" */;
-			buildPhases = (
-				89F23C8D0E78D5B6006B2466 /* Sources */,
-				89F23C8F0E78D5B6006B2466 /* Frameworks */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				896FD03C0E78D71F003DFB6A /* PBXTargetDependency */,
-			);
-			name = "v8_shell-arm";
-			productName = "v8_shell-arm";
-			productReference = 89F23C950E78D5B6006B2466 /* v8_shell-arm */;
-			productType = "com.apple.product-type.tool";
-		};
-/* End PBXNativeTarget section */
-
-/* Begin PBXProject section */
-		8915B8680E719336009C4E19 /* Project object */ = {
-			isa = PBXProject;
-			buildConfigurationList = 8915B86B0E719336009C4E19 /* Build configuration list for PBXProject "v8" */;
-			compatibilityVersion = "Xcode 3.1";
-			developmentRegion = English;
-			hasScannedForEncodings = 0;
-			knownRegions = (
-				English,
-				Japanese,
-				French,
-				German,
-			);
-			mainGroup = 8915B8660E719336009C4E19;
-			productRefGroup = 897FF1C00E719CB600D62E90 /* Products */;
-			projectDirPath = "";
-			projectRoot = ..;
-			targets = (
-				7BF891930E73098D000BAF8A /* All */,
-				8970F2EF0E719FB2006AE7B5 /* v8 */,
-				897F76790E71B4CC007ACF34 /* v8_shell */,
-				893987FE0F2A35FA007D5254 /* d8_shell */,
-				89F23C3C0E78D5B2006B2466 /* v8-arm */,
-				89F23C880E78D5B6006B2466 /* v8_shell-arm */,
-				897C77CB12B68E3D000767A8 /* d8_shell-arm */,
-				8956922712D4ED240072C313 /* v8-x64 */,
-				89B91BB412D4F02A002FF4BC /* v8_shell-x64 */,
-				89B91BC012D4F02A002FF4BC /* d8_shell-x64 */,
-			);
-		};
-/* End PBXProject section */
-
-/* Begin PBXShellScriptBuildPhase section */
-		893988220F2A376C007D5254 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"d8.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nD8_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js.cc\"\nD8_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${D8_CC}.new\" \\\n  \"${D8_EMPTY_CC}.new\" \\\n  \"D8\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${D8_CC}.new\" \"${D8_CC}\" >& /dev/null ; then\n  mv \"${D8_CC}.new\" \"${D8_CC}\"\nelse\n  rm \"${D8_CC}.new\"\nfi\n\nif ! diff -q \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\"\nelse\n  rm \"${D8_EMPTY_CC}.new\"\nfi\n";
-		};
-		8956922812D4ED240072C313 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"runtime.js\"\\\n\" v8natives.js\"\\\n\" array.js\"\\\n\" string.js\"\\\n\" uri.js\"\\\n\" math.js\"\\\n\" messages.js\"\\\n\" apinatives.js\"\\\n\" debug-debugger.js\"\\\n\" liveedit-debugger.js\"\\\n\" mirror-debugger.js\"\\\n\" date.js\"\\\n\" json.js\"\\\n\" regexp.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nLIBRARIES_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries.cc\"\nLIBRARIES_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${LIBRARIES_CC}.new\" \\\n  \"${LIBRARIES_EMPTY_CC}.new\" \\\n  \"CORE\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\"\nelse\n  rm \"${LIBRARIES_CC}.new\"\nfi\n\nif ! diff -q \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\"\nelse\n  rm \"${LIBRARIES_EMPTY_CC}.new\"\nfi\n";
-		};
-		897C77CE12B68E3D000767A8 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"d8.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nD8_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js.cc\"\nD8_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${D8_CC}.new\" \\\n  \"${D8_EMPTY_CC}.new\" \\\n  \"D8\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${D8_CC}.new\" \"${D8_CC}\" >& /dev/null ; then\n  mv \"${D8_CC}.new\" \"${D8_CC}\"\nelse\n  rm \"${D8_CC}.new\"\nfi\n\nif ! diff -q \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\"\nelse\n  rm \"${D8_EMPTY_CC}.new\"\nfi\n";
-		};
-		89B91BC312D4F02A002FF4BC /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"d8.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nD8_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js.cc\"\nD8_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${D8_CC}.new\" \\\n  \"${D8_EMPTY_CC}.new\" \\\n  \"D8\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${D8_CC}.new\" \"${D8_CC}\" >& /dev/null ; then\n  mv \"${D8_CC}.new\" \"${D8_CC}\"\nelse\n  rm \"${D8_CC}.new\"\nfi\n\nif ! diff -q \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\"\nelse\n  rm \"${D8_EMPTY_CC}.new\"\nfi\n";
-		};
-		89EA6FB50E71AA1F00F59E1B /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"runtime.js\"\\\n\" v8natives.js\"\\\n\" array.js\"\\\n\" string.js\"\\\n\" uri.js\"\\\n\" math.js\"\\\n\" messages.js\"\\\n\" apinatives.js\"\\\n\" debug-debugger.js\"\\\n\" liveedit-debugger.js\"\\\n\" mirror-debugger.js\"\\\n\" date.js\"\\\n\" json.js\"\\\n\" regexp.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nLIBRARIES_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries.cc\"\nLIBRARIES_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${LIBRARIES_CC}.new\" \\\n  \"${LIBRARIES_EMPTY_CC}.new\" \\\n  \"CORE\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\"\nelse\n  rm \"${LIBRARIES_CC}.new\"\nfi\n\nif ! diff -q \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\"\nelse\n  rm \"${LIBRARIES_EMPTY_CC}.new\"\nfi\n";
-		};
-		89F23C3D0E78D5B2006B2466 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "set -ex\nJS_FILES=\"runtime.js\"\\\n\" v8natives.js\"\\\n\" array.js\"\\\n\" string.js\"\\\n\" uri.js\"\\\n\" math.js\"\\\n\" messages.js\"\\\n\" apinatives.js\"\\\n\" debug-debugger.js\"\\\n\" liveedit-debugger.js\"\\\n\" mirror-debugger.js\"\\\n\" date.js\"\\\n\" json.js\"\\\n\" regexp.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nLIBRARIES_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries.cc\"\nLIBRARIES_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/libraries-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${LIBRARIES_CC}.new\" \\\n  \"${LIBRARIES_EMPTY_CC}.new\" \\\n  \"CORE\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_CC}.new\" \"${LIBRARIES_CC}\"\nelse\n  rm \"${LIBRARIES_CC}.new\"\nfi\n\nif ! diff -q \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${LIBRARIES_EMPTY_CC}.new\" \"${LIBRARIES_EMPTY_CC}\"\nelse\n  rm \"${LIBRARIES_EMPTY_CC}.new\"\nfi\n";
-		};
-/* End PBXShellScriptBuildPhase section */
-
-/* Begin PBXSourcesBuildPhase section */
-		893988030F2A35FA007D5254 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				893988160F2A3688007D5254 /* d8-debug.cc in Sources */,
-				893988330F2A3B8F007D5254 /* d8-js.cc in Sources */,
-				89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */,
-				8939880D0F2A362A007D5254 /* d8.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		8956922912D4ED240072C313 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				8956922A12D4ED240072C313 /* objects-visiting.cc in Sources */,
-				8956922B12D4ED240072C313 /* accessors.cc in Sources */,
-				8956922C12D4ED240072C313 /* allocation.cc in Sources */,
-				8956922D12D4ED240072C313 /* api.cc in Sources */,
-				8956922F12D4ED240072C313 /* assembler.cc in Sources */,
-				8956923012D4ED240072C313 /* ast.cc in Sources */,
-				8956923112D4ED240072C313 /* bootstrapper.cc in Sources */,
-				8956923312D4ED240072C313 /* builtins.cc in Sources */,
-				8956923412D4ED240072C313 /* checks.cc in Sources */,
-				8956923512D4ED240072C313 /* circular-queue.cc in Sources */,
-				8956923612D4ED240072C313 /* code-stubs.cc in Sources */,
-				8956923812D4ED240072C313 /* codegen.cc in Sources */,
-				8956923912D4ED240072C313 /* compilation-cache.cc in Sources */,
-				8956923A12D4ED240072C313 /* compiler.cc in Sources */,
-				8956923B12D4ED240072C313 /* contexts.cc in Sources */,
-				8956923C12D4ED240072C313 /* conversions.cc in Sources */,
-				8956923D12D4ED240072C313 /* fixed-dtoa.cc in Sources */,
-				8956923E12D4ED240072C313 /* counters.cc in Sources */,
-				8956924012D4ED240072C313 /* cpu-profiler.cc in Sources */,
-				8956924112D4ED240072C313 /* data-flow.cc in Sources */,
-				8956924212D4ED240072C313 /* dateparser.cc in Sources */,
-				8956924312D4ED240072C313 /* debug-agent.cc in Sources */,
-				8956924512D4ED240072C313 /* dtoa.cc in Sources */,
-				8956924612D4ED240072C313 /* debug.cc in Sources */,
-				8956924812D4ED240072C313 /* disassembler.cc in Sources */,
-				8956924912D4ED240072C313 /* diy-fp.cc in Sources */,
-				8956924A12D4ED240072C313 /* execution.cc in Sources */,
-				8956924B12D4ED240072C313 /* factory.cc in Sources */,
-				8956924C12D4ED240072C313 /* fast-dtoa.cc in Sources */,
-				8956924D12D4ED240072C313 /* flags.cc in Sources */,
-				8956924E12D4ED240072C313 /* frame-element.cc in Sources */,
-				8956925012D4ED240072C313 /* frames.cc in Sources */,
-				8956925212D4ED240072C313 /* full-codegen.cc in Sources */,
-				8956925312D4ED240072C313 /* func-name-inferrer.cc in Sources */,
-				8956925412D4ED240072C313 /* global-handles.cc in Sources */,
-				8956925512D4ED240072C313 /* handles.cc in Sources */,
-				8956925612D4ED240072C313 /* hashmap.cc in Sources */,
-				8956925712D4ED240072C313 /* heap-profiler.cc in Sources */,
-				8956925812D4ED240072C313 /* heap.cc in Sources */,
-				8956925912D4ED240072C313 /* ic-ia32.cc in Sources */,
-				8956925A12D4ED240072C313 /* ic.cc in Sources */,
-				8956925B12D4ED240072C313 /* interpreter-irregexp.cc in Sources */,
-				8956925C12D4ED240072C313 /* jsregexp.cc in Sources */,
-				8956926012D4ED240072C313 /* libraries.cc in Sources */,
-				8956926112D4ED240072C313 /* liveedit.cc in Sources */,
-				8956926212D4ED240072C313 /* log-utils.cc in Sources */,
-				8956926312D4ED240072C313 /* log.cc in Sources */,
-				8956926512D4ED240072C313 /* mark-compact.cc in Sources */,
-				8956926612D4ED240072C313 /* messages.cc in Sources */,
-				8956926712D4ED240072C313 /* objects-debug.cc in Sources */,
-				8956926812D4ED240072C313 /* objects.cc in Sources */,
-				8956926A12D4ED240072C313 /* parser.cc in Sources */,
-				8956926B12D4ED240072C313 /* platform-macos.cc in Sources */,
-				8956926C12D4ED240072C313 /* platform-posix.cc in Sources */,
-				8956926D12D4ED240072C313 /* prettyprinter.cc in Sources */,
-				8956926E12D4ED240072C313 /* profile-generator.cc in Sources */,
-				8956926F12D4ED240072C313 /* property.cc in Sources */,
-				8956927112D4ED240072C313 /* regexp-macro-assembler-irregexp.cc in Sources */,
-				8956927212D4ED240072C313 /* regexp-macro-assembler-tracer.cc in Sources */,
-				8956927312D4ED240072C313 /* regexp-macro-assembler.cc in Sources */,
-				8956927412D4ED240072C313 /* regexp-stack.cc in Sources */,
-				8956927712D4ED240072C313 /* rewriter.cc in Sources */,
-				8956927812D4ED240072C313 /* runtime.cc in Sources */,
-				8956927912D4ED240072C313 /* scanner.cc in Sources */,
-				8956927A12D4ED240072C313 /* scopeinfo.cc in Sources */,
-				8956927B12D4ED240072C313 /* scopes.cc in Sources */,
-				8956927C12D4ED240072C313 /* serialize.cc in Sources */,
-				8956927D12D4ED240072C313 /* snapshot-common.cc in Sources */,
-				8956927E12D4ED240072C313 /* snapshot-empty.cc in Sources */,
-				8956927F12D4ED240072C313 /* spaces.cc in Sources */,
-				8956928012D4ED240072C313 /* string-stream.cc in Sources */,
-				8956928012D4ED240072C313 /* string-stream.cc in Sources */,
-				8956928212D4ED240072C313 /* stub-cache.cc in Sources */,
-				8956928312D4ED240072C313 /* token.cc in Sources */,
-				8956928412D4ED240072C313 /* top.cc in Sources */,
-				8956928512D4ED240072C313 /* type-info.cc in Sources */,
-				8956928612D4ED240072C313 /* unicode.cc in Sources */,
-				8956928712D4ED240072C313 /* utils.cc in Sources */,
-				8956928812D4ED240072C313 /* v8-counters.cc in Sources */,
-				8956928912D4ED240072C313 /* v8.cc in Sources */,
-				8956928A12D4ED240072C313 /* v8threads.cc in Sources */,
-				8956928B12D4ED240072C313 /* variables.cc in Sources */,
-				8956928C12D4ED240072C313 /* version.cc in Sources */,
-				8956929012D4ED240072C313 /* zone.cc in Sources */,
-				8956929212D4ED240072C313 /* bignum-dtoa.cc in Sources */,
-				8956929312D4ED240072C313 /* bignum.cc in Sources */,
-				8956929412D4ED240072C313 /* cached-powers.cc in Sources */,
-				8956929512D4ED240072C313 /* deoptimizer.cc in Sources */,
-				8956929612D4ED240072C313 /* hydrogen-instructions.cc in Sources */,
-				8956929712D4ED240072C313 /* hydrogen.cc in Sources */,
-				8956929812D4ED240072C313 /* lithium-allocator.cc in Sources */,
-				8956929912D4ED240072C313 /* preparse-data.cc in Sources */,
-				8956929A12D4ED240072C313 /* preparser.cc in Sources */,
-				8956929B12D4ED240072C313 /* runtime-profiler.cc in Sources */,
-				8956929C12D4ED240072C313 /* safepoint-table.cc in Sources */,
-				8956929D12D4ED240072C313 /* scanner-base.cc in Sources */,
-				8956929E12D4ED240072C313 /* string-search.cc in Sources */,
-				8956929F12D4ED240072C313 /* strtod.cc in Sources */,
-				895692A312D4ED240072C313 /* externalize-string-extension.cc in Sources */,
-				895692A412D4ED240072C313 /* gc-extension.cc in Sources */,
-				895692A512D4ED240072C313 /* objects-printer.cc in Sources */,
-				89B91B9B12D4EF95002FF4BC /* assembler-x64.cc in Sources */,
-				89B91B9C12D4EF95002FF4BC /* builtins-x64.cc in Sources */,
-				89B91B9D12D4EF95002FF4BC /* code-stubs-x64.cc in Sources */,
-				89B91B9E12D4EF95002FF4BC /* codegen-x64.cc in Sources */,
-				89B91B9F12D4EF95002FF4BC /* cpu-x64.cc in Sources */,
-				89B91BA012D4EF95002FF4BC /* debug-x64.cc in Sources */,
-				89B91BA112D4EF95002FF4BC /* deoptimizer-x64.cc in Sources */,
-				89B91BA212D4EF95002FF4BC /* disasm-x64.cc in Sources */,
-				89B91BA312D4EF95002FF4BC /* frames-x64.cc in Sources */,
-				89B91BA412D4EF95002FF4BC /* full-codegen-x64.cc in Sources */,
-				89B91BA512D4EF95002FF4BC /* ic-x64.cc in Sources */,
-				89B91BA712D4EF95002FF4BC /* macro-assembler-x64.cc in Sources */,
-				89B91BA812D4EF95002FF4BC /* regexp-macro-assembler-x64.cc in Sources */,
-				89B91BAA12D4EF95002FF4BC /* simulator-x64.cc in Sources */,
-				89B91BAB12D4EF95002FF4BC /* stub-cache-x64.cc in Sources */,
-				8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */,
-				894A59E912D777E80000766D /* lithium.cc in Sources */,
-				89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */,
-				89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
-				89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */,
-				8924315C12F8539900906AB2 /* lithium-gap-resolver-x64.cc in Sources */,
-				895D5B551334212D00254083 /* isolate.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		8970F2ED0E719FB2006AE7B5 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */,
-				89A88DEC0E71A5FF0043BA31 /* accessors.cc in Sources */,
-				89A88DED0E71A6000043BA31 /* allocation.cc in Sources */,
-				89A88DEE0E71A6010043BA31 /* api.cc in Sources */,
-				89A88DEF0E71A60A0043BA31 /* assembler-ia32.cc in Sources */,
-				89A88DF00E71A60A0043BA31 /* assembler.cc in Sources */,
-				89A88DF10E71A60B0043BA31 /* ast.cc in Sources */,
-				89A88DF20E71A60C0043BA31 /* bootstrapper.cc in Sources */,
-				89A88DF40E71A6160043BA31 /* builtins-ia32.cc in Sources */,
-				89A88DF50E71A6170043BA31 /* builtins.cc in Sources */,
-				89A88DF60E71A61C0043BA31 /* checks.cc in Sources */,
-				9F2B3712114FF62D007CDAF4 /* circular-queue.cc in Sources */,
-				893CCE640E71D83700357A03 /* code-stubs.cc in Sources */,
-				89A88DF70E71A6240043BA31 /* codegen-ia32.cc in Sources */,
-				89A88DF80E71A6260043BA31 /* codegen.cc in Sources */,
-				89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */,
-				89A88DF90E71A6430043BA31 /* compiler.cc in Sources */,
-				89A88DFA0E71A6440043BA31 /* contexts.cc in Sources */,
-				89A88DFB0E71A6440043BA31 /* conversions.cc in Sources */,
-				C2BD4BDB120165A70046BF9F /* fixed-dtoa.cc in Sources */,
-				89A88DFC0E71A6460043BA31 /* counters.cc in Sources */,
-				89A88DFD0E71A6470043BA31 /* cpu-ia32.cc in Sources */,
-				9F2B37271152CEA0007CDAF4 /* cpu-profiler.cc in Sources */,
-				9FA38BBA1175B2D200C4CD55 /* data-flow.cc in Sources */,
-				89A88DFE0E71A6480043BA31 /* dateparser.cc in Sources */,
-				8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */,
-				898BD20E0EF6CC930068B00A /* debug-ia32.cc in Sources */,
-				C2BD4BD7120165460046BF9F /* dtoa.cc in Sources */,
-				89A88DFF0E71A6530043BA31 /* debug.cc in Sources */,
-				89A88E000E71A6540043BA31 /* disasm-ia32.cc in Sources */,
-				89A88E010E71A6550043BA31 /* disassembler.cc in Sources */,
-				9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */,
-				89A88E030E71A65B0043BA31 /* execution.cc in Sources */,
-				89A88E040E71A65D0043BA31 /* factory.cc in Sources */,
-				9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
-				89A88E050E71A65D0043BA31 /* flags.cc in Sources */,
-				8981F6001010501900D1520E /* frame-element.cc in Sources */,
-				89A88E060E71A6600043BA31 /* frames-ia32.cc in Sources */,
-				89A88E070E71A6610043BA31 /* frames.cc in Sources */,
-				9FA38BC51175B2E500C4CD55 /* full-codegen-ia32.cc in Sources */,
-				9FA38BBE1175B2D200C4CD55 /* full-codegen.cc in Sources */,
-				9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */,
-				89A88E080E71A6620043BA31 /* global-handles.cc in Sources */,
-				89A88E090E71A6640043BA31 /* handles.cc in Sources */,
-				89A88E0A0E71A6650043BA31 /* hashmap.cc in Sources */,
-				9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
-				89A88E0B0E71A66C0043BA31 /* heap.cc in Sources */,
-				89A88E0C0E71A66D0043BA31 /* ic-ia32.cc in Sources */,
-				89A88E0D0E71A66E0043BA31 /* ic.cc in Sources */,
-				89A15C850EE4678B00B48DEB /* interpreter-irregexp.cc in Sources */,
-				89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */,
-				8900116C0E71CA2300F91F35 /* libraries.cc in Sources */,
-				9FA38BBF1175B2D200C4CD55 /* liveedit.cc in Sources */,
-				9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */,
-				89A88E0F0E71A6740043BA31 /* log.cc in Sources */,
-				89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */,
-				89A88E110E71A6780043BA31 /* mark-compact.cc in Sources */,
-				89A88E120E71A67A0043BA31 /* messages.cc in Sources */,
-				89A88E130E71A6860043BA31 /* objects-debug.cc in Sources */,
-				89A88E140E71A6870043BA31 /* objects.cc in Sources */,
-				89A88E150E71A68C0043BA31 /* parser.cc in Sources */,
-				89A88E160E71A68E0043BA31 /* platform-macos.cc in Sources */,
-				893A72240F7B101400303DD2 /* platform-posix.cc in Sources */,
-				89A88E170E71A6950043BA31 /* prettyprinter.cc in Sources */,
-				9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */,
-				89A88E180E71A6960043BA31 /* property.cc in Sources */,
-				89A15C7B0EE466EB00B48DEB /* regexp-macro-assembler-ia32.cc in Sources */,
-				89A15C830EE4675E00B48DEB /* regexp-macro-assembler-irregexp.cc in Sources */,
-				89A15C8A0EE467D100B48DEB /* regexp-macro-assembler-tracer.cc in Sources */,
-				89A15C810EE4674900B48DEB /* regexp-macro-assembler.cc in Sources */,
-				8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */,
-				89A88E190E71A6970043BA31 /* rewriter.cc in Sources */,
-				89A88E1A0E71A69B0043BA31 /* runtime.cc in Sources */,
-				89A88E1B0E71A69D0043BA31 /* scanner.cc in Sources */,
-				89A88E1C0E71A69E0043BA31 /* scopeinfo.cc in Sources */,
-				89A88E1D0E71A6A00043BA31 /* scopes.cc in Sources */,
-				89A88E1E0E71A6A30043BA31 /* serialize.cc in Sources */,
-				89A88E1F0E71A6B40043BA31 /* snapshot-common.cc in Sources */,
-				89A88E200E71A6B60043BA31 /* snapshot-empty.cc in Sources */,
-				89A88E210E71A6B70043BA31 /* spaces.cc in Sources */,
-				89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */,
-				89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */,
-				89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */,
-				89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */,
-				89A88E250E71A6C20043BA31 /* token.cc in Sources */,
-				89A88E260E71A6C90043BA31 /* top.cc in Sources */,
-				9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */,
-				89A88E270E71A6CB0043BA31 /* unicode.cc in Sources */,
-				89A88E290E71A6CE0043BA31 /* utils.cc in Sources */,
-				89A88E2A0E71A6D00043BA31 /* v8-counters.cc in Sources */,
-				89A88E2B0E71A6D10043BA31 /* v8.cc in Sources */,
-				89A88E2C0E71A6D20043BA31 /* v8threads.cc in Sources */,
-				89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */,
-				89B933AF0FAA0F9600201304 /* version.cc in Sources */,
-				89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
-				C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
-				893E24B712B14B3D0083370F /* bignum-dtoa.cc in Sources */,
-				893E24B812B14B3D0083370F /* bignum.cc in Sources */,
-				893E24B912B14B3D0083370F /* cached-powers.cc in Sources */,
-				893E24BA12B14B3D0083370F /* deoptimizer.cc in Sources */,
-				893E24BB12B14B3D0083370F /* hydrogen-instructions.cc in Sources */,
-				893E24BC12B14B3D0083370F /* hydrogen.cc in Sources */,
-				893E24BD12B14B3D0083370F /* lithium-allocator.cc in Sources */,
-				893E24BE12B14B3D0083370F /* preparse-data.cc in Sources */,
-				893E24C012B14B3D0083370F /* preparser.cc in Sources */,
-				893E24C112B14B3D0083370F /* runtime-profiler.cc in Sources */,
-				893E24C212B14B3D0083370F /* safepoint-table.cc in Sources */,
-				893E24C312B14B3D0083370F /* scanner-base.cc in Sources */,
-				893E24C412B14B3D0083370F /* string-search.cc in Sources */,
-				893E24C512B14B3D0083370F /* strtod.cc in Sources */,
-				893E24D512B14B8A0083370F /* deoptimizer-ia32.cc in Sources */,
-				893E24D612B14B8A0083370F /* lithium-codegen-ia32.cc in Sources */,
-				893E24D712B14B8A0083370F /* lithium-ia32.cc in Sources */,
-				893E24DC12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
-				893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
-				8946827512C26EB700C914BC /* objects-printer.cc in Sources */,
-				894A59EB12D777E80000766D /* lithium.cc in Sources */,
-				89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */,
-				89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
-				89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */,
-				895D5B531334212D00254083 /* isolate.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		897C77CF12B68E3D000767A8 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */,
-				897C77D112B68E3D000767A8 /* d8-js.cc in Sources */,
-				897C77D212B68E3D000767A8 /* d8-posix.cc in Sources */,
-				897C77D312B68E3D000767A8 /* d8.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		897F76770E71B4CC007ACF34 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				897F767F0E71B690007ACF34 /* shell.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89B91BB712D4F02A002FF4BC /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				89B91BB812D4F02A002FF4BC /* shell.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89B91BC412D4F02A002FF4BC /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				89B91BC512D4F02A002FF4BC /* d8-debug.cc in Sources */,
-				89B91BC612D4F02A002FF4BC /* d8-js.cc in Sources */,
-				89B91BC712D4F02A002FF4BC /* d8-posix.cc in Sources */,
-				89B91BC812D4F02A002FF4BC /* d8.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89F23C3E0E78D5B2006B2466 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */,
-				89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */,
-				89F23C400E78D5B2006B2466 /* allocation.cc in Sources */,
-				89F23C410E78D5B2006B2466 /* api.cc in Sources */,
-				89F23C970E78D5E3006B2466 /* assembler-arm.cc in Sources */,
-				89F23C430E78D5B2006B2466 /* assembler.cc in Sources */,
-				89F23C440E78D5B2006B2466 /* ast.cc in Sources */,
-				89F23C450E78D5B2006B2466 /* bootstrapper.cc in Sources */,
-				89F23C980E78D5E7006B2466 /* builtins-arm.cc in Sources */,
-				89F23C470E78D5B2006B2466 /* builtins.cc in Sources */,
-				89F23C480E78D5B2006B2466 /* checks.cc in Sources */,
-				9F2B3711114FF62D007CDAF4 /* circular-queue.cc in Sources */,
-				89F23C490E78D5B2006B2466 /* code-stubs.cc in Sources */,
-				89F23C990E78D5E9006B2466 /* codegen-arm.cc in Sources */,
-				89F23C4B0E78D5B2006B2466 /* codegen.cc in Sources */,
-				89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */,
-				89F23C4C0E78D5B2006B2466 /* compiler.cc in Sources */,
-				895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */,
-				89F23C4D0E78D5B2006B2466 /* contexts.cc in Sources */,
-				89F23C4E0E78D5B2006B2466 /* conversions.cc in Sources */,
-				89F23C4F0E78D5B2006B2466 /* counters.cc in Sources */,
-				89F23C9A0E78D5EC006B2466 /* cpu-arm.cc in Sources */,
-				9F2B37261152CEA0007CDAF4 /* cpu-profiler.cc in Sources */,
-				C2BD4BE4120166180046BF9F /* fixed-dtoa.cc in Sources */,
-				C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */,
-				9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */,
-				89F23C510E78D5B2006B2466 /* dateparser.cc in Sources */,
-				894599A30F5D8729008DA8FB /* debug-agent.cc in Sources */,
-				898BD20F0EF6CC9A0068B00A /* debug-arm.cc in Sources */,
-				89F23C520E78D5B2006B2466 /* debug.cc in Sources */,
-				89F23C9B0E78D5EE006B2466 /* disasm-arm.cc in Sources */,
-				89F23C540E78D5B2006B2466 /* disassembler.cc in Sources */,
-				9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */,
-				89F23C560E78D5B2006B2466 /* execution.cc in Sources */,
-				89F23C570E78D5B2006B2466 /* factory.cc in Sources */,
-				9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
-				89F23C580E78D5B2006B2466 /* flags.cc in Sources */,
-				8981F6011010502800D1520E /* frame-element.cc in Sources */,
-				89F23C9C0E78D5F1006B2466 /* frames-arm.cc in Sources */,
-				89F23C5A0E78D5B2006B2466 /* frames.cc in Sources */,
-				9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */,
-				9FA38BB71175B2D200C4CD55 /* full-codegen.cc in Sources */,
-				9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */,
-				89F23C5B0E78D5B2006B2466 /* global-handles.cc in Sources */,
-				89F23C5C0E78D5B2006B2466 /* handles.cc in Sources */,
-				89F23C5D0E78D5B2006B2466 /* hashmap.cc in Sources */,
-				9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
-				89F23C5E0E78D5B2006B2466 /* heap.cc in Sources */,
-				89F23C9D0E78D5FB006B2466 /* ic-arm.cc in Sources */,
-				89F23C600E78D5B2006B2466 /* ic.cc in Sources */,
-				890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */,
-				89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */,
-				89F23C620E78D5B2006B2466 /* libraries.cc in Sources */,
-				9FA38BB81175B2D200C4CD55 /* liveedit.cc in Sources */,
-				9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */,
-				89F23C630E78D5B2006B2466 /* log.cc in Sources */,
-				89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */,
-				89F23C650E78D5B2006B2466 /* mark-compact.cc in Sources */,
-				89F23C660E78D5B2006B2466 /* messages.cc in Sources */,
-				89F23C670E78D5B2006B2466 /* objects-debug.cc in Sources */,
-				89F23C680E78D5B2006B2466 /* objects.cc in Sources */,
-				89F23C690E78D5B2006B2466 /* parser.cc in Sources */,
-				89F23C6A0E78D5B2006B2466 /* platform-macos.cc in Sources */,
-				893A72250F7B101B00303DD2 /* platform-posix.cc in Sources */,
-				89F23C6B0E78D5B2006B2466 /* prettyprinter.cc in Sources */,
-				9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */,
-				89F23C6C0E78D5B2006B2466 /* property.cc in Sources */,
-				890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */,
-				890A14020EE9C4B400E49346 /* regexp-macro-assembler-irregexp.cc in Sources */,
-				890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */,
-				890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */,
-				8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */,
-				89F23C6D0E78D5B2006B2466 /* rewriter.cc in Sources */,
-				89F23C6E0E78D5B2006B2466 /* runtime.cc in Sources */,
-				89F23C6F0E78D5B2006B2466 /* scanner.cc in Sources */,
-				89F23C700E78D5B2006B2466 /* scopeinfo.cc in Sources */,
-				89F23C710E78D5B2006B2466 /* scopes.cc in Sources */,
-				89F23C720E78D5B2006B2466 /* serialize.cc in Sources */,
-				89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */,
-				89F23C730E78D5B2006B2466 /* snapshot-common.cc in Sources */,
-				89F23C740E78D5B2006B2466 /* snapshot-empty.cc in Sources */,
-				89F23C750E78D5B2006B2466 /* spaces.cc in Sources */,
-				89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */,
-				89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */,
-				89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */,
-				89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */,
-				89F23C790E78D5B2006B2466 /* token.cc in Sources */,
-				89F23C7A0E78D5B2006B2466 /* top.cc in Sources */,
-				9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */,
-				89F23C7B0E78D5B2006B2466 /* unicode.cc in Sources */,
-				89F23C7D0E78D5B2006B2466 /* utils.cc in Sources */,
-				89F23C7E0E78D5B2006B2466 /* v8-counters.cc in Sources */,
-				89F23C7F0E78D5B2006B2466 /* v8.cc in Sources */,
-				89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */,
-				89F23C810E78D5B2006B2466 /* variables.cc in Sources */,
-				89B933B00FAA0F9D00201304 /* version.cc in Sources */,
-				89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
-				C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
-				893E24A812B14B3D0083370F /* bignum-dtoa.cc in Sources */,
-				893E24A912B14B3D0083370F /* bignum.cc in Sources */,
-				893E24AA12B14B3D0083370F /* cached-powers.cc in Sources */,
-				893E24AB12B14B3D0083370F /* deoptimizer.cc in Sources */,
-				893E24AC12B14B3D0083370F /* hydrogen-instructions.cc in Sources */,
-				893E24AD12B14B3D0083370F /* hydrogen.cc in Sources */,
-				893E24AE12B14B3D0083370F /* lithium-allocator.cc in Sources */,
-				893E24AF12B14B3D0083370F /* preparse-data.cc in Sources */,
-				893E24B112B14B3D0083370F /* preparser.cc in Sources */,
-				893E24B212B14B3D0083370F /* runtime-profiler.cc in Sources */,
-				893E24B312B14B3D0083370F /* safepoint-table.cc in Sources */,
-				893E24B412B14B3D0083370F /* scanner-base.cc in Sources */,
-				893E24B512B14B3D0083370F /* string-search.cc in Sources */,
-				893E24B612B14B3D0083370F /* strtod.cc in Sources */,
-				893E24CC12B14B520083370F /* deoptimizer-arm.cc in Sources */,
-				893E24CD12B14B520083370F /* lithium-arm.cc in Sources */,
-				893E24CE12B14B520083370F /* lithium-codegen-arm.cc in Sources */,
-				893E24DE12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
-				893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
-				8946827612C26EB700C914BC /* objects-printer.cc in Sources */,
-				894A59EA12D777E80000766D /* lithium.cc in Sources */,
-				89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
-				89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */,
-				896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */,
-				895D5B541334212D00254083 /* isolate.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		89F23C8D0E78D5B6006B2466 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				89F23C8E0E78D5B6006B2466 /* shell.cc in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXSourcesBuildPhase section */
-
-/* Begin PBXTargetDependency section */
-		7BF891970E73099F000BAF8A /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8970F2EF0E719FB2006AE7B5 /* v8 */;
-			targetProxy = 7BF891960E73099F000BAF8A /* PBXContainerItemProxy */;
-		};
-		7BF891990E73099F000BAF8A /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 897F76790E71B4CC007ACF34 /* v8_shell */;
-			targetProxy = 7BF891980E73099F000BAF8A /* PBXContainerItemProxy */;
-		};
-		8938A29512D63A680080CDDE /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89B91BC012D4F02A002FF4BC /* d8_shell-x64 */;
-			targetProxy = 8938A29412D63A680080CDDE /* PBXContainerItemProxy */;
-		};
-		8938A29712D63A680080CDDE /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89B91BB412D4F02A002FF4BC /* v8_shell-x64 */;
-			targetProxy = 8938A29612D63A680080CDDE /* PBXContainerItemProxy */;
-		};
-		8938A29912D63A680080CDDE /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8956922712D4ED240072C313 /* v8-x64 */;
-			targetProxy = 8938A29812D63A680080CDDE /* PBXContainerItemProxy */;
-		};
-		893988010F2A35FA007D5254 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8970F2EF0E719FB2006AE7B5 /* v8 */;
-			targetProxy = 893988020F2A35FA007D5254 /* PBXContainerItemProxy */;
-		};
-		893988100F2A3647007D5254 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 893987FE0F2A35FA007D5254 /* d8_shell */;
-			targetProxy = 8939880F0F2A3647007D5254 /* PBXContainerItemProxy */;
-		};
-		896FD03C0E78D71F003DFB6A /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89F23C3C0E78D5B2006B2466 /* v8-arm */;
-			targetProxy = 896FD03B0E78D71F003DFB6A /* PBXContainerItemProxy */;
-		};
-		896FD03E0E78D731003DFB6A /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89F23C3C0E78D5B2006B2466 /* v8-arm */;
-			targetProxy = 896FD03D0E78D731003DFB6A /* PBXContainerItemProxy */;
-		};
-		896FD0400E78D735003DFB6A /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89F23C880E78D5B6006B2466 /* v8_shell-arm */;
-			targetProxy = 896FD03F0E78D735003DFB6A /* PBXContainerItemProxy */;
-		};
-		897C77DC12B68E5D000767A8 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 89F23C3C0E78D5B2006B2466 /* v8-arm */;
-			targetProxy = 897C77DB12B68E5D000767A8 /* PBXContainerItemProxy */;
-		};
-		897F76830E71B6AC007ACF34 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8970F2EF0E719FB2006AE7B5 /* v8 */;
-			targetProxy = 897F76820E71B6AC007ACF34 /* PBXContainerItemProxy */;
-		};
-		89B91BD112D4F036002FF4BC /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8956922712D4ED240072C313 /* v8-x64 */;
-			targetProxy = 89B91BD012D4F036002FF4BC /* PBXContainerItemProxy */;
-		};
-		89B91BFD12D4F1BF002FF4BC /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 8956922712D4ED240072C313 /* v8-x64 */;
-			targetProxy = 89B91BFC12D4F1BF002FF4BC /* PBXContainerItemProxy */;
-		};
-		89EED40D12B69A0A0075BE1C /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 897C77CB12B68E3D000767A8 /* d8_shell-arm */;
-			targetProxy = 89EED40C12B69A0A0075BE1C /* PBXContainerItemProxy */;
-		};
-/* End PBXTargetDependency section */
-
-/* Begin XCBuildConfiguration section */
-		7BF891940E73098D000BAF8A /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = All;
-			};
-			name = Debug;
-		};
-		7BF891950E73098D000BAF8A /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = All;
-			};
-			name = Release;
-		};
-		8915B8690E719336009C4E19 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				GCC_CW_ASM_SYNTAX = NO;
-				GCC_C_LANGUAGE_STANDARD = ansi;
-				GCC_DYNAMIC_NO_PIC = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = NO;
-				GCC_ENABLE_CPP_RTTI = NO;
-				GCC_ENABLE_PASCAL_STRINGS = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					DEBUG,
-					ENABLE_LOGGING_AND_PROFILING,
-					V8_ENABLE_CHECKS,
-					OBJECT_PRINT,
-					ENABLE_VMSTATE_TRACKING,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_TREAT_WARNINGS_AS_ERRORS = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				OTHER_CFLAGS = (
-					"$(OTHER_CFLAGS)",
-					"-fstack-protector",
-					"-fstack-protector-all",
-				);
-				PREBINDING = NO;
-				SYMROOT = ../xcodebuild;
-				USE_HEADERMAP = NO;
-				WARNING_CFLAGS = (
-					"$(WARNING_CFLAGS)",
-					"-Wall",
-					"-Wendif-labels",
-				);
-			};
-			name = Debug;
-		};
-		8915B86A0E719336009C4E19 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				DEAD_CODE_STRIPPING = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				DEPLOYMENT_POSTPROCESSING = YES;
-				GCC_CW_ASM_SYNTAX = NO;
-				GCC_C_LANGUAGE_STANDARD = ansi;
-				GCC_DYNAMIC_NO_PIC = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = NO;
-				GCC_ENABLE_CPP_RTTI = NO;
-				GCC_ENABLE_PASCAL_STRINGS = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_OPTIMIZATION_LEVEL = 2;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				PREBINDING = NO;
-				STRIP_STYLE = all;
-				SYMROOT = ../xcodebuild;
-				USE_HEADERMAP = NO;
-				WARNING_CFLAGS = (
-					"$(WARNING_CFLAGS)",
-					"-Wall",
-					"-Wendif-labels",
-				);
-			};
-			name = Release;
-		};
-		893988090F2A35FA007D5254 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-					DEBUG,
-					V8_ENABLE_CHECKS,
-					OBJECT_PRINT,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = d8;
-			};
-			name = Debug;
-		};
-		8939880A0F2A35FA007D5254 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = d8;
-			};
-			name = Release;
-		};
-		895692A812D4ED240072C313 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					ENABLE_DISASSEMBLER,
-					V8_TARGET_ARCH_X64,
-					ENABLE_LOGGING_AND_PROFILING,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8-x64";
-				STRIP_STYLE = debugging;
-			};
-			name = Debug;
-		};
-		895692A912D4ED240072C313 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_X64,
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8-x64";
-				STRIP_STYLE = debugging;
-			};
-			name = Release;
-		};
-		8970F2F10E719FB2006AE7B5 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					ENABLE_DISASSEMBLER,
-					V8_TARGET_ARCH_IA32,
-					ENABLE_LOGGING_AND_PROFILING,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8;
-				STRIP_STYLE = debugging;
-			};
-			name = Debug;
-		};
-		8970F2F20E719FB2006AE7B5 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8;
-				STRIP_STYLE = debugging;
-			};
-			name = Release;
-		};
-		897C77D712B68E3D000767A8 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-					DEBUG,
-					V8_ENABLE_CHECKS,
-					OBJECT_PRINT,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "d8-arm";
-			};
-			name = Debug;
-		};
-		897C77D812B68E3D000767A8 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "d8-arm";
-			};
-			name = Release;
-		};
-		897F767C0E71B4CC007ACF34 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8_shell;
-			};
-			name = Debug;
-		};
-		897F767D0E71B4CC007ACF34 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_IA32,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8_shell;
-			};
-			name = Release;
-		};
-		89B91BBC12D4F02A002FF4BC /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_X64,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8_shell-x64";
-			};
-			name = Debug;
-		};
-		89B91BBD12D4F02A002FF4BC /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_X64,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8_shell-x64";
-			};
-			name = Release;
-		};
-		89B91BCC12D4F02A002FF4BC /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_X64,
-					DEBUG,
-					V8_ENABLE_CHECKS,
-					OBJECT_PRINT,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "d8-x64";
-			};
-			name = Debug;
-		};
-		89B91BCD12D4F02A002FF4BC /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_X64,
-					NDEBUG,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "d8-x64";
-			};
-			name = Release;
-		};
-		89F23C850E78D5B2006B2466 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_ARM,
-					ENABLE_DISASSEMBLER,
-					ENABLE_LOGGING_AND_PROFILING,
-					ENABLE_DEBUGGER_SUPPORT,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8-arm";
-				STRIP_STYLE = debugging;
-			};
-			name = Debug;
-		};
-		89F23C860E78D5B2006B2466 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				DEPLOYMENT_POSTPROCESSING = NO;
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_ARM,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8-arm";
-				STRIP_STYLE = debugging;
-			};
-			name = Release;
-		};
-		89F23C930E78D5B6006B2466 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_ARM,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8_shell-arm";
-			};
-			name = Debug;
-		};
-		89F23C940E78D5B6006B2466 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				GCC_PREPROCESSOR_DEFINITIONS = (
-					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_TARGET_ARCH_ARM,
-				);
-				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = "v8_shell-arm";
-			};
-			name = Release;
-		};
-/* End XCBuildConfiguration section */
-
-/* Begin XCConfigurationList section */
-		7BF8919F0E7309BE000BAF8A /* Build configuration list for PBXAggregateTarget "All" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				7BF891940E73098D000BAF8A /* Debug */,
-				7BF891950E73098D000BAF8A /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		8915B86B0E719336009C4E19 /* Build configuration list for PBXProject "v8" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				8915B8690E719336009C4E19 /* Debug */,
-				8915B86A0E719336009C4E19 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		893988080F2A35FA007D5254 /* Build configuration list for PBXNativeTarget "d8_shell" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				893988090F2A35FA007D5254 /* Debug */,
-				8939880A0F2A35FA007D5254 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		895692A712D4ED240072C313 /* Build configuration list for PBXNativeTarget "v8-x64" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				895692A812D4ED240072C313 /* Debug */,
-				895692A912D4ED240072C313 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		8970F2F70E719FC1006AE7B5 /* Build configuration list for PBXNativeTarget "v8" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				8970F2F10E719FB2006AE7B5 /* Debug */,
-				8970F2F20E719FB2006AE7B5 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		897C77D612B68E3D000767A8 /* Build configuration list for PBXNativeTarget "d8_shell-arm" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				897C77D712B68E3D000767A8 /* Debug */,
-				897C77D812B68E3D000767A8 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		897F767E0E71B4EA007ACF34 /* Build configuration list for PBXNativeTarget "v8_shell" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				897F767C0E71B4CC007ACF34 /* Debug */,
-				897F767D0E71B4CC007ACF34 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		89B91BBB12D4F02A002FF4BC /* Build configuration list for PBXNativeTarget "v8_shell-x64" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				89B91BBC12D4F02A002FF4BC /* Debug */,
-				89B91BBD12D4F02A002FF4BC /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		89B91BCB12D4F02A002FF4BC /* Build configuration list for PBXNativeTarget "d8_shell-x64" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				89B91BCC12D4F02A002FF4BC /* Debug */,
-				89B91BCD12D4F02A002FF4BC /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		89F23C840E78D5B2006B2466 /* Build configuration list for PBXNativeTarget "v8-arm" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				89F23C850E78D5B2006B2466 /* Debug */,
-				89F23C860E78D5B2006B2466 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		89F23C920E78D5B6006B2466 /* Build configuration list for PBXNativeTarget "v8_shell-arm" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				89F23C930E78D5B6006B2466 /* Debug */,
-				89F23C940E78D5B6006B2466 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-/* End XCConfigurationList section */
-	};
-	rootObject = 8915B8680E719336009C4E19 /* Project object */;
-}
diff --git a/tools/visual_studio/README.txt b/tools/visual_studio/README.txt
index c46aa37..b199e18 100644
--- a/tools/visual_studio/README.txt
+++ b/tools/visual_studio/README.txt
@@ -1,70 +1,12 @@
-This directory contains Microsoft Visual Studio project files for including v8
-in a Visual Studio/Visual C++ Express solution. All these project files have
-been created for use with Microsoft Visual Studio 2005. They can however also
-be used in both Visual Studio 2008 and Visual C++ 2008 Express Edition. When
-using the project files in the 2008 editions minor upgrades to the files will
-be performed by Visual Studio.
+The Microsoft Visual Studio project files for including V8 in a Visual
+Studio/Visual C++ Express solution has been retired. If a Visual
+Studio project/solution is needed there is the option of using GYP to
+generate these. Please look in the build directory in the root of the
+V8 project. It contains the required infrastructure and a README.txt
+file explaining how to get started.
 
-v8_base.vcproj
---------------
-Base V8 library containing all the V8 code but no JavaScript library code.
+Generating Visual Studio projects using GYP is how the Chromium
+project integrated V8 into the Windows build.
 
-v8.vcproj
----------
-V8 library containing all the V8 and JavaScript library code embedded as source
-which is compiled as V8 is running.
-
-v8_mksnapshot.vcproj
---------------------
-Executable v8_mksnapshot.exe for building a heap snapshot from a running V8.
-
-v8_snapshot_cc.vcproj
----------------------
-Uses v8_mksnapshot.exe to generate snapshot.cc, which is used in
-v8_snapshot.vcproj.
-
-v8_snapshot.vcproj
-------------------
-V8 library containing all the V8 and JavaScript library code embedded as a heap
-snapshot instead of source to be compiled as V8 is running. Using this library
-provides significantly faster startup time than v8.vcproj.
-
-The property sheets common.vsprops, debug.vsprops and release.vsprops contains
-most of the configuration options and are inhireted by the project files
-described above. The location of the output directory used are defined in
-common.vsprops.
-
-With regard to Platform SDK version V8 has no specific requriments and builds
-with either what is supplied with Visual Studio 2005 or the latest Platform SDK
-from Microsoft.
-
-When adding these projects to a solution the following dependencies needs to be
-in place:
-
-  v8.vcproj depends on v8_base.vcproj
-  v8_mksnapshot.vcproj depends on v8.vcproj
-  v8_snapshot_cc.vcproj depends on v8_mksnapshot.vcproj
-  v8_snapshot.vcproj depends on v8_snapshot_cc.vcproj and v8_base.vcproj
-
-A project which uses V8 should then depend on v8_snapshot.vcproj.
-
-If V8 without snapshot if preferred only v8_base.vcproj and v8.vcproj are
-required and a project which uses V8 should depend on v8.vcproj.
-
-Two sample project files are available as well. These are v8_shell_sample.vcproj
-for building the sample in samples\shell.cc and v8_process_sample.vcproj for
-building the sample in samples\process.cc. Add either of these (or both) to a
-solution with v8_base, v8, v8_mksnapshot and v8_snapshot set up as described
-solution with v8_base, v8, v8_mksnapshot and v8_snapshot set up as described
-above and have them depend on v8_snapshot.
-
-Finally a sample Visual Studio solution file for is provided. This solution file
-includes the two sample projects together with the V8 projects and with the
-dependencies configured as described above.
-
-Python requirements
--------------------
-When using the Microsoft Visual Studio project files Python version 2.4 or later
-is required. Make sure that python.exe is on the path before running Visual
-Studio. The use of Python is in the command script js2c.cmd which is used in the
-Custom Build Step for v8natives.js in the v8.vcproj project.
+The main build system for V8 is still SCons, see the V8 wiki page
+http://code.google.com/p/v8/wiki/BuildingOnWindows for details.
diff --git a/tools/visual_studio/arm.vsprops b/tools/visual_studio/arm.vsprops
deleted file mode 100644
index 98d0f70..0000000
--- a/tools/visual_studio/arm.vsprops
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	OutputDirectory="$(SolutionDir)$(ConfigurationName)Arm"
-	IntermediateDirectory="$(SolutionDir)$(ConfigurationName)Arm\obj\$(ProjectName)"
-	Name="arm"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_ARM"
-		DisableSpecificWarnings="4996"
-	/>
-</VisualStudioPropertySheet>
diff --git a/tools/visual_studio/common.vsprops b/tools/visual_studio/common.vsprops
deleted file mode 100644
index fa78cdc..0000000
--- a/tools/visual_studio/common.vsprops
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="essential"
-	CharacterSet="1"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources"
-		PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_VMSTATE_TRACKING;ENABLE_LOGGING_AND_PROFILING;ENABLE_DEBUGGER_SUPPORT"
-		MinimalRebuild="false"
-		ExceptionHandling="0"
-		RuntimeTypeInfo="false"
-		WarningLevel="3"
-		WarnAsError="true"
-		Detect64BitPortabilityProblems="false"
-		DebugInformationFormat="3"
-		DisableSpecificWarnings="4351;4355;4800"
-		EnableFunctionLevelLinking="true"
-	/>
-	<Tool
-		Name="VCLibrarianTool"
-		OutputFile="$(OutDir)\lib\$(ProjectName).lib"
-	/>
-	<Tool
-		Name="VCLinkerTool"
-		GenerateDebugInformation="true"
-		MapFileName="$(OutDir)\$(TargetName).map"
-		ImportLibrary="$(OutDir)\lib\$(TargetName).lib"
-		FixedBaseAddress="1"
-		AdditionalOptions="/IGNORE:4221 /NXCOMPAT"
-	/>
-</VisualStudioPropertySheet>
diff --git a/tools/visual_studio/d8.vcproj b/tools/visual_studio/d8.vcproj
deleted file mode 100644
index 8372c67..0000000
--- a/tools/visual_studio/d8.vcproj
+++ /dev/null
@@ -1,193 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="d8"
-	ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
-	RootNamespace="d8"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\d8.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8.h"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-debug.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-debug.h"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-windows.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8.js"
-			>
-				<FileConfiguration
-					Name="Debug|Win32"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-												Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-				<FileConfiguration
-					Name="Release|Win32"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-		</File>
-		<Filter
-			Name="generated files"
-			>
-			<File
-				RelativePath="$(IntDir)\DerivedSources\natives.cc"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/d8_arm.vcproj b/tools/visual_studio/d8_arm.vcproj
deleted file mode 100644
index 66adcec..0000000
--- a/tools/visual_studio/d8_arm.vcproj
+++ /dev/null
@@ -1,193 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>

-<VisualStudioProject

-	ProjectType="Visual C++"

-	Version="8.00"

-	Name="d8"

-	ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"

-	RootNamespace="d8"

-	Keyword="Win32Proj"

-	>

-	<Platforms>

-		<Platform

-			Name="Win32"

-		/>

-	</Platforms>

-	<ToolFiles>

-	</ToolFiles>

-	<Configurations>

-		<Configuration

-			Name="Debug|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-		<Configuration

-			Name="Release|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-	</Configurations>

-	<References>

-	</References>

-	<Files>

-		<File

-			RelativePath="..\..\src\d8.cc"

-			>

-		</File>

-		<File

-			RelativePath="..\..\src\d8.h"

-			>

-		</File>

-		<File

-			RelativePath="..\..\src\d8-debug.cc"

-			>

-		</File>

-		<File

-			RelativePath="..\..\src\d8-debug.h"

-			>

-		</File>

-		<File

-			RelativePath="..\..\src\d8-windows.cc"

-			>

-		</File>

-		<File

-			RelativePath="..\..\src\d8.js"

-			>

-				<FileConfiguration

-					Name="Debug|Win32"

-					>

-					<Tool

-						Name="VCCustomBuildTool"

-						Description="Processing js files..."

-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-												Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

-					/>

-				</FileConfiguration>

-				<FileConfiguration

-					Name="Release|Win32"

-					>

-					<Tool

-						Name="VCCustomBuildTool"

-						Description="Processing js files..."

-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

-					/>

-				</FileConfiguration>

-		</File>

-		<Filter

-			Name="generated files"

-			>

-			<File

-				RelativePath="$(IntDir)\DerivedSources\natives.cc"

-				>

-			</File>

-		</Filter>

-	</Files>

-	<Globals>

-	</Globals>

-</VisualStudioProject>

diff --git a/tools/visual_studio/d8_x64.vcproj b/tools/visual_studio/d8_x64.vcproj
deleted file mode 100644
index b534a92..0000000
--- a/tools/visual_studio/d8_x64.vcproj
+++ /dev/null
@@ -1,209 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="d8"
-	ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
-	RootNamespace="d8"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\d8.cc"
-			>
-			<FileConfiguration
-				Name="Debug|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-			<FileConfiguration
-				Name="Release|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-		</File>
-		<File
-			RelativePath="..\..\src\d8.h"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-debug.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-debug.h"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8-windows.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\src\d8.js"
-			>
-				<FileConfiguration
-					Name="Debug|x64"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-												Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-				<FileConfiguration
-					Name="Release|x64"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-		</File>
-		<Filter
-			Name="generated files"
-			>
-			<File
-				RelativePath="$(IntDir)\DerivedSources\natives.cc"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/d8js2c.cmd b/tools/visual_studio/d8js2c.cmd
deleted file mode 100644
index 04d8e26..0000000
--- a/tools/visual_studio/d8js2c.cmd
+++ /dev/null
@@ -1,6 +0,0 @@
-@echo off

-set SOURCE_DIR=%1

-set TARGET_DIR=%2

-set PYTHON="..\..\..\third_party\python_24\python.exe"

-if not exist %PYTHON% set PYTHON=python.exe

-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc D8 %SOURCE_DIR%\macros.py %SOURCE_DIR%\d8.js

diff --git a/tools/visual_studio/debug.vsprops b/tools/visual_studio/debug.vsprops
deleted file mode 100644
index 60b79fe..0000000
--- a/tools/visual_studio/debug.vsprops
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="debug"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		Optimization="0"
-		PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS,OBJECT_PRINT"
-		RuntimeLibrary="1"
-	/>
-	<Tool
-		Name="VCLinkerTool"
-		LinkIncremental="2"
-	/>
-</VisualStudioPropertySheet>
diff --git a/tools/visual_studio/ia32.vsprops b/tools/visual_studio/ia32.vsprops
deleted file mode 100644
index b574660..0000000
--- a/tools/visual_studio/ia32.vsprops
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	OutputDirectory="$(SolutionDir)$(ConfigurationName)"
-	IntermediateDirectory="$(SolutionDir)$(ConfigurationName)\obj\$(ProjectName)"
-	Name="ia32"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_IA32"
-	/>
-	<Tool
-		Name="VCLinkerTool"
-		TargetMachine="1"
-	/>
-</VisualStudioPropertySheet>
diff --git a/tools/visual_studio/js2c.cmd b/tools/visual_studio/js2c.cmd
deleted file mode 100644
index 82722ff..0000000
--- a/tools/visual_studio/js2c.cmd
+++ /dev/null
@@ -1,6 +0,0 @@
-@echo off
-set SOURCE_DIR=%1
-set TARGET_DIR=%2
-set PYTHON="..\..\..\third_party\python_24\python.exe"
-if not exist %PYTHON% set PYTHON=python.exe
-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-debugger.js %SOURCE_DIR%\liveedit-debugger.js %SOURCE_DIR%\mirror-debugger.js %SOURCE_DIR%\date.js %SOURCE_DIR%\regexp.js %SOURCE_DIR%\json.js
diff --git a/tools/visual_studio/release.vsprops b/tools/visual_studio/release.vsprops
deleted file mode 100644
index d7b26bc..0000000
--- a/tools/visual_studio/release.vsprops
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="release"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		RuntimeLibrary="0"
-		Optimization="2"
-		InlineFunctionExpansion="2"
-		EnableIntrinsicFunctions="true"
-		FavorSizeOrSpeed="0"
-		OmitFramePointers="true"
-		StringPooling="true"
-	/>
-	<Tool
-		Name="VCLinkerTool"
-		LinkIncremental="1"
-		OptimizeReferences="2"
-		OptimizeForWindows98="1"
-		EnableCOMDATFolding="2"
-	/>
-</VisualStudioPropertySheet>
diff --git a/tools/visual_studio/v8.sln b/tools/visual_studio/v8.sln
deleted file mode 100644
index db84858..0000000
--- a/tools/visual_studio/v8.sln
+++ /dev/null
@@ -1,101 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_base", "v8_base.vcproj", "{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8", "v8.vcproj", "{21E22961-22BF-4493-BD3A-868F93DA5179}"
-	ProjectSection(ProjectDependencies) = postProject
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_mksnapshot", "v8_mksnapshot.vcproj", "{865575D0-37E2-405E-8CBA-5F6C485B5A26}"
-	ProjectSection(ProjectDependencies) = postProject
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_snapshot", "v8_snapshot.vcproj", "{C0334F9A-1168-4101-9DD8-C30FB252D435}"
-	ProjectSection(ProjectDependencies) = postProject
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F} = {0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell_sample.vcproj", "{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{E131F77D-B713-48F3-B86D-097ECDCC4C3A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_process_sample", "v8_process_sample.vcproj", "{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_cctest", "v8_cctest.vcproj", "{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{AD933CE2-1303-448E-89C8-60B1FDD18EC3}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "d8", "d8.vcproj", "{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_snapshot_cc", "v8_snapshot_cc.vcproj", "{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}"
-	ProjectSection(ProjectDependencies) = postProject
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26} = {865575D0-37E2-405E-8CBA-5F6C485B5A26}
-	EndProjectSection
-EndProject
-Global
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|Win32 = Debug|Win32
-		Release|Win32 = Release|Win32
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Debug|Win32.ActiveCfg = Debug|Win32
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Debug|Win32.Build.0 = Debug|Win32
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Release|Win32.ActiveCfg = Release|Win32
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Release|Win32.Build.0 = Release|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|Win32.ActiveCfg = Debug|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|Win32.Build.0 = Debug|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|Win32.ActiveCfg = Release|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|Win32.Build.0 = Release|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|Win32.ActiveCfg = Debug|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|Win32.Build.0 = Debug|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|Win32.ActiveCfg = Release|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|Win32.Build.0 = Release|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|Win32.ActiveCfg = Debug|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|Win32.Build.0 = Debug|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|Win32.ActiveCfg = Release|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|Win32.Build.0 = Release|Win32
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Debug|Win32.ActiveCfg = Debug|Win32
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Debug|Win32.Build.0 = Debug|Win32
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Release|Win32.ActiveCfg = Release|Win32
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Release|Win32.Build.0 = Release|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|Win32.ActiveCfg = Debug|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|Win32.Build.0 = Debug|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|Win32.ActiveCfg = Release|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|Win32.Build.0 = Release|Win32
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Debug|Win32.ActiveCfg = Debug|Win32
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Debug|Win32.Build.0 = Debug|Win32
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Release|Win32.ActiveCfg = Release|Win32
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Release|Win32.Build.0 = Release|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|Win32.ActiveCfg = Debug|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|Win32.Build.0 = Debug|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|Win32.ActiveCfg = Release|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|Win32.Build.0 = Release|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|Win32.ActiveCfg = Debug|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|Win32.Build.0 = Debug|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|Win32.ActiveCfg = Release|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|Win32.Build.0 = Release|Win32
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-	GlobalSection(NestedProjects) = preSolution
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-		{97ECC711-7430-4FC4-90FD-004DA880E72A} = {AD933CE2-1303-448E-89C8-60B1FDD18EC3}
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-	EndGlobalSection
-EndGlobal
diff --git a/tools/visual_studio/v8.vcproj b/tools/visual_studio/v8.vcproj
deleted file mode 100644
index 30b488f..0000000
--- a/tools/visual_studio/v8.vcproj
+++ /dev/null
@@ -1,227 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8"
-	ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}"
-	RootNamespace="v8"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="js"
-			>
-			<File
-				RelativePath="..\..\src\apinatives.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\array.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\date.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\macros.py"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\math.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mirror-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\json.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\uri.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8natives.js"
-				>
-				<FileConfiguration
-					Name="Debug|Win32"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-				<FileConfiguration
-					Name="Release|Win32"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-			</File>
-		</Filter>
-		<Filter
-			Name="generated files"
-			>
-			<File
-				RelativePath="$(IntDir)\DerivedSources\natives.cc"
-				>
-			</File>
-		</Filter>
-		<File
-			RelativePath="..\..\src\snapshot-empty.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_arm.sln b/tools/visual_studio/v8_arm.sln
deleted file mode 100644
index 069ff32..0000000
--- a/tools/visual_studio/v8_arm.sln
+++ /dev/null
@@ -1,74 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8", "v8_arm.vcproj", "{21E22961-22BF-4493-BD3A-868F93DA5179}"
-	ProjectSection(ProjectDependencies) = postProject
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell_sample_arm.vcproj", "{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
-	ProjectSection(ProjectDependencies) = postProject
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{E131F77D-B713-48F3-B86D-097ECDCC4C3A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_process_sample", "v8_process_sample_arm.vcproj", "{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{AD933CE2-1303-448E-89C8-60B1FDD18EC3}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "d8", "d8_arm.vcproj", "{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_base", "v8_base_arm.vcproj", "{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_cctest", "v8_cctest_arm.vcproj", "{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	ProjectSection(ProjectDependencies) = postProject
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Global
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|Win32 = Debug|Win32
-		Release|Win32 = Release|Win32
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|Win32.ActiveCfg = Debug|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|Win32.Build.0 = Debug|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|Win32.ActiveCfg = Release|Win32
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|Win32.Build.0 = Release|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|Win32.ActiveCfg = Debug|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|Win32.Build.0 = Debug|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|Win32.ActiveCfg = Release|Win32
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|Win32.Build.0 = Release|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|Win32.ActiveCfg = Debug|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|Win32.Build.0 = Debug|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|Win32.ActiveCfg = Release|Win32
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|Win32.Build.0 = Release|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|Win32.ActiveCfg = Debug|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|Win32.Build.0 = Debug|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|Win32.ActiveCfg = Release|Win32
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|Win32.Build.0 = Release|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|Win32.ActiveCfg = Debug|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|Win32.Build.0 = Debug|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|Win32.ActiveCfg = Release|Win32
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|Win32.Build.0 = Release|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|Win32.ActiveCfg = Debug|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|Win32.Build.0 = Debug|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|Win32.ActiveCfg = Release|Win32
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|Win32.Build.0 = Release|Win32
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-	GlobalSection(NestedProjects) = preSolution
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-		{97ECC711-7430-4FC4-90FD-004DA880E72A} = {AD933CE2-1303-448E-89C8-60B1FDD18EC3}
-	EndGlobalSection
-EndGlobal
diff --git a/tools/visual_studio/v8_arm.vcproj b/tools/visual_studio/v8_arm.vcproj
deleted file mode 100644
index cdba58e..0000000
--- a/tools/visual_studio/v8_arm.vcproj
+++ /dev/null
@@ -1,227 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>

-<VisualStudioProject

-	ProjectType="Visual C++"

-	Version="8.00"

-	Name="v8"

-	ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}"

-	RootNamespace="v8"

-	Keyword="Win32Proj"

-	>

-	<Platforms>

-		<Platform

-			Name="Win32"

-		/>

-	</Platforms>

-	<ToolFiles>

-	</ToolFiles>

-	<Configurations>

-		<Configuration

-			Name="Debug|Win32"

-			ConfigurationType="4"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLibrarianTool"

-				LinkLibraryDependencies="true"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-		<Configuration

-			Name="Release|Win32"

-			ConfigurationType="4"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLibrarianTool"

-				LinkLibraryDependencies="true"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-	</Configurations>

-	<References>

-	</References>

-	<Files>

-		<Filter

-			Name="js"

-			>

-			<File

-				RelativePath="..\..\src\apinatives.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\array.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\date.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\debug-debugger.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\liveedit-debugger.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\macros.py"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\math.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\messages.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\mirror-debugger.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\regexp.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\json.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\runtime.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\string.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\uri.js"

-				>

-			</File>

-			<File

-				RelativePath="..\..\src\v8natives.js"

-				>

-				<FileConfiguration

-					Name="Debug|Win32"

-					>

-					<Tool

-						Name="VCCustomBuildTool"

-						Description="Processing js files..."

-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"

-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

-					/>

-				</FileConfiguration>

-				<FileConfiguration

-					Name="Release|Win32"

-					>

-					<Tool

-						Name="VCCustomBuildTool"

-						Description="Processing js files..."

-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"

-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

-					/>

-				</FileConfiguration>

-			</File>

-		</Filter>

-		<Filter

-			Name="generated files"

-			>

-			<File

-				RelativePath="$(IntDir)\DerivedSources\natives.cc"

-				>

-			</File>

-		</Filter>

-		<File

-			RelativePath="..\..\src\snapshot-empty.cc"

-			>

-		</File>

-	</Files>

-	<Globals>

-	</Globals>

-</VisualStudioProject>

diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
deleted file mode 100644
index a005f7a..0000000
--- a/tools/visual_studio/v8_base.vcproj
+++ /dev/null
@@ -1,1308 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-  ProjectType="Visual C++"
-  Version="8.00"
-  Name="v8_base"
-  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-  RootNamespace="v8_base"
-  Keyword="Win32Proj"
-  >
-  <Platforms>
-    <Platform
-      Name="Win32"
-    />
-  </Platforms>
-  <ToolFiles>
-  </ToolFiles>
-  <Configurations>
-    <Configuration
-      Name="Debug|Win32"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-    <Configuration
-      Name="Release|Win32"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-  </Configurations>
-  <References>
-  </References>
-  <Files>
-    <Filter
-      Name="src"
-      >
-      <File
-        RelativePath="..\..\src\accessors.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\accessors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arguments.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\assembler-ia32-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\assembler-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\assembler-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\builtins-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bytecodes-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\code-stubs-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\code-stubs-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\codegen-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\codegen-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\cpu-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\debug-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\deoptimizer-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\double.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\frames-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\frames-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\full-codegen-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\func-name-inferrer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\func-name-inferrer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\globals.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen-instructions.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen-instructions.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\ic-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interceptors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\jump-target-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium.h"
-        >
-      </File>
-       <File
-        RelativePath="..\..\src\lithium-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-codegen-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-codegen-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\macro-assembler-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\macro-assembler-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8memory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\natives.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-debug.cc"
-        >
-        <FileConfiguration
-          Name="Release|Win32"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\objects-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-printer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.h"
-        >
-      </File>
-
-      <File
-        RelativePath="..\..\src\objects.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparse-data.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparse-data.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform-tls-win32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform-tls.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform-win32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.cc"
-        >
-        <FileConfiguration
-          Name="Release|Win32"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\register-allocator-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner-base.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner-base.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\shell.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\small-pointer-list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot-common.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\stub-cache-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8checks.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8globals.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-heavy-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\virtual-frame-ia32.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ia32\virtual-frame-ia32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-heavy.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\externalize-string-extension.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\externalize-string-extension.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\gc-extension.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\gc-extension.h"
-        >
-      </File>
-      <Filter
-        Name="third party"
-        >
-        <File
-          RelativePath="..\..\src\ia32\disasm-ia32.cc"
-          >
-        </File>
-        <File
-          RelativePath="..\..\src\disasm.h"
-          >
-        </File>
-      </Filter>
-      <Filter
-        Name="generated files"
-        >
-        <File
-          RelativePath="..\..\src\unicode.cc"
-          >
-        </File>
-      </Filter>
-    </Filter>
-    <Filter
-      Name="include"
-      >
-      <File
-        RelativePath="..\..\include\v8-debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-testing.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8stdint.h"
-        >
-      </File>
-    </Filter>
-  </Files>
-  <Globals>
-  </Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
deleted file mode 100644
index 87c178a..0000000
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ /dev/null
@@ -1,1238 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-  ProjectType="Visual C++"
-  Version="8.00"
-  Name="v8_base"
-  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-  RootNamespace="v8_base"
-  Keyword="Win32Proj"
-  >
-  <Platforms>
-    <Platform
-      Name="Win32"
-    />
-  </Platforms>
-  <ToolFiles>
-  </ToolFiles>
-  <Configurations>
-    <Configuration
-      Name="Debug|Win32"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\debug.vsprops;.\arm.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-    <Configuration
-      Name="Release|Win32"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\release.vsprops;.\arm.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-  </Configurations>
-  <References>
-  </References>
-  <Files>
-    <Filter
-      Name="src"
-      >
-      <File
-        RelativePath="..\..\src\accessors.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\accessors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arguments.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\assembler-arm-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\assembler-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\assembler-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\builtins-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bytecodes-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\code-stubs-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\code-stubs-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\codegen-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\codegen-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\constants-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\constants-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\cpu-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\debug-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\deoptimizer-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\double.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flow-graph.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flow-graph.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\frames-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\frames-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\full-codegen-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\func-name-inferrer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\func-name-inferrer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\globals.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\ic-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interceptors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-light-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-light.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\jump-target-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-light.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\lithium-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\lithium-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\lithium-codegen-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\lithium-codegen-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\macro-assembler-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\macro-assembler-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8memory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\natives.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-debug.cc"
-        >
-        <FileConfiguration
-          Name="Release|Win32"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\objects-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-printer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.h"
-        >
-      <File
-        RelativePath="..\..\src\objects.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform-win32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.cc"
-        >
-        <FileConfiguration
-          Name="Release|Win32"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\regexp-macro-assembler-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\regexp-macro-assembler-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\register-allocator-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\shell.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\small-pointer-list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot-common.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\simulator-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\simulator-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\stub-cache-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-light-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\virtual-frame-arm-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\virtual-frame-arm.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arm\virtual-frame-arm.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-light.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.h"
-        >
-      </File>
-      <Filter
-        Name="third party"
-        >
-        <File
-          RelativePath="..\..\src\arm\disasm-arm.cc"
-          >
-        </File>
-        <File
-          RelativePath="..\..\src\disasm.h"
-          >
-        </File>
-      </Filter>
-      <Filter
-        Name="generated files"
-        >
-        <File
-          RelativePath="..\..\src\unicode.cc"
-          >
-        </File>
-      </Filter>
-    </Filter>
-    <Filter
-      Name="include"
-      >
-      <File
-        RelativePath="..\..\include\v8-debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-testing.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8.h"
-        >
-      </File>
-    </Filter>
-  </Files>
-  <Globals>
-  </Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
deleted file mode 100644
index de921bc..0000000
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ /dev/null
@@ -1,1300 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-  ProjectType="Visual C++"
-  Version="8.00"
-  Name="v8_base"
-  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-  RootNamespace="v8_base"
-  Keyword="Win32Proj"
-  >
-  <Platforms>
-    <Platform
-      Name="x64"
-    />
-  </Platforms>
-  <ToolFiles>
-  </ToolFiles>
-  <Configurations>
-    <Configuration
-      Name="Debug|x64"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-    <Configuration
-      Name="Release|x64"
-      ConfigurationType="4"
-      InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-      >
-      <Tool
-        Name="VCPreBuildEventTool"
-      />
-      <Tool
-        Name="VCCustomBuildTool"
-      />
-      <Tool
-        Name="VCXMLDataGeneratorTool"
-      />
-      <Tool
-        Name="VCWebServiceProxyGeneratorTool"
-      />
-      <Tool
-        Name="VCMIDLTool"
-      />
-      <Tool
-        Name="VCCLCompilerTool"
-      />
-      <Tool
-        Name="VCManagedResourceCompilerTool"
-      />
-      <Tool
-        Name="VCResourceCompilerTool"
-      />
-      <Tool
-        Name="VCPreLinkEventTool"
-      />
-      <Tool
-        Name="VCLibrarianTool"
-      />
-      <Tool
-        Name="VCALinkTool"
-      />
-      <Tool
-        Name="VCXDCMakeTool"
-      />
-      <Tool
-        Name="VCBscMakeTool"
-      />
-      <Tool
-        Name="VCFxCopTool"
-      />
-      <Tool
-        Name="VCPostBuildEventTool"
-      />
-    </Configuration>
-  </Configurations>
-  <References>
-  </References>
-  <Files>
-    <Filter
-      Name="src"
-      >
-      <File
-        RelativePath="..\..\src\accessors.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\accessors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\allocation.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\api.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\arguments.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\assembler-x64-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\assembler-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\assembler-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ast.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bignum-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bootstrapper.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\builtins-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\builtins.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\bytecodes-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cached-powers.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\char-predicates.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\checks.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\circular-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code-stubs.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\code-stubs-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\code-stubs-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\code.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\codegen-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\codegen-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\codegen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compilation-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\compiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\contexts.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\conversions.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\cpu-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\cpu-profiler-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\data-flow.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dateparser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug-agent.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\debug-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\deoptimizer-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\deoptimizer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\disassembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\diy-fp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\double.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\execution.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\factory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fast-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\fixed-dtoa.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flags.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frame-element.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\frames-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\frames-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\frames.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\full-codegen-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\full-codegen.h"
-        >
-      </File>
-      <File
-
-        RelativePath="..\..\src\func-name-inferrer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\func-name-inferrer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\global-handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\globals.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\handles.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hashmap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\heap-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen-instructions.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\hydrogen-instructions.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\ic-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\ic.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interceptors.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\interpreter-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\jump-target-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jump-target-heavy.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\jsregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium.h"
-        >
-      </File>
-       <File
-        RelativePath="..\..\src\lithium-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\lithium-allocator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-codegen-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-codegen-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-gap-resolver-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\lithium-gap-resolver-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\liveedit.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\log-utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\macro-assembler-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\macro-assembler-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\mark-compact.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8memory.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\messages.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\natives.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-debug.cc"
-        >
-        <FileConfiguration
-          Name="Release|x64"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\objects-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-printer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects-visiting.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\objects.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\parser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparser.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparser.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparse-data.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\preparse-data.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\profile-generator-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform-win32.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\platform.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.cc"
-        >
-        <FileConfiguration
-          Name="Release|x64"
-          ExcludedFromBuild="true"
-          >
-          <Tool
-            Name="VCCLCompilerTool"
-          />
-        </FileConfiguration>
-      </File>
-      <File
-        RelativePath="..\..\src\prettyprinter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\property.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\regexp-macro-assembler-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\regexp-macro-assembler-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\regexp-stack.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\register-allocator.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\register-allocator-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\rewriter.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\runtime-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\safepoint-table.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner-base.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner-base.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scanner.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopeinfo.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\scopes.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\serialize.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\shell.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\small-pointer-list.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot-common.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\snapshot.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\spaces.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-search.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\string-stream.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\strtod.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\stub-cache-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\stub-cache.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\token.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\top.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\type-info.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unbound-queue.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\unicode.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8-counters.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8checks.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8globals.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8threads.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\v8utils.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\variables.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\version.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-heavy-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\virtual-frame-x64.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\x64\virtual-frame-x64.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\virtual-frame-heavy.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\vm-state.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone-inl.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\zone.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\externalize-string-extension.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\externalize-string-extension.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\gc-extension.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\extensions\gc-extension.h"
-        >
-      </File>
-      <Filter
-        Name="third party"
-        >
-        <File
-          RelativePath="..\..\src\x64\disasm-x64.cc"
-          >
-        </File>
-        <File
-          RelativePath="..\..\src\disasm.h"
-          >
-        </File>
-      </Filter>
-      <Filter
-        Name="generated files"
-        >
-        <File
-          RelativePath="..\..\src\unicode.cc"
-          >
-        </File>
-      </Filter>
-    </Filter>
-    <Filter
-      Name="include"
-      >
-      <File
-        RelativePath="..\..\include\v8-debug.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-profiler.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8-testing.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8.h"
-        >
-      </File>
-      <File
-        RelativePath="..\..\include\v8stdint.h"
-        >
-      </File>
-    </Filter>
-  </Files>
-  <Globals>
-  </Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_cctest.vcproj b/tools/visual_studio/v8_cctest.vcproj
deleted file mode 100644
index cca6eba..0000000
--- a/tools/visual_studio/v8_cctest.vcproj
+++ /dev/null
@@ -1,265 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_cctest"
-	ProjectGUID="{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	RootNamespace="v8_cctest"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\test\cctest\cctest.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-alloc.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-api.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-assembler-ia32.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-ast.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-circular-queue.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-compiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-conversions.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-debug.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-decls.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-disasm-ia32.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-flags.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-func-name-inference.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-hashmap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-lock.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log-stack-tracer.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-mark-compact.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-platform-win32.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-profile-generator.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-serialize.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-sockets.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-spaces.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-strings.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-unbound-queue.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-version.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_cctest_arm.vcproj b/tools/visual_studio/v8_cctest_arm.vcproj
deleted file mode 100644
index 92f7fc3..0000000
--- a/tools/visual_studio/v8_cctest_arm.vcproj
+++ /dev/null
@@ -1,249 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_cctest"
-	ProjectGUID="{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	RootNamespace="v8_cctest"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\debug.vsprops;.\arm.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\release.vsprops;.\arm.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\test\cctest\cctest.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-alloc.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-api.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-assembler-arm.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-ast.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-circular-queue.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-compiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-conversions.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-debug.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-decls.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-disasm-arm.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-flags.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-hashmap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-lock.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-mark-compact.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-platform-win32.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-profile-generator.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-serialize.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-spaces.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-strings.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-version.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_cctest_x64.vcproj b/tools/visual_studio/v8_cctest_x64.vcproj
deleted file mode 100644
index dea4d52..0000000
--- a/tools/visual_studio/v8_cctest_x64.vcproj
+++ /dev/null
@@ -1,257 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_cctest"
-	ProjectGUID="{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	RootNamespace="v8_cctest"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\test\cctest\cctest.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-alloc.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-api.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-assembler-x64.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-ast.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-circular-queue.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-compiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-conversions.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-debug.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-decls.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-flags.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-func-name-inference.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-hashmap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-heap-profiler.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-lock.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-log-stack-tracer.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-mark-compact.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-platform-win32.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-profile-generator.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-serialize.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-sockets.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-spaces.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-strings.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-utils.cc"
-			>
-		</File>
-		<File
-			RelativePath="..\..\test\cctest\test-version.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_mksnapshot.vcproj b/tools/visual_studio/v8_mksnapshot.vcproj
deleted file mode 100644
index cb9e048..0000000
--- a/tools/visual_studio/v8_mksnapshot.vcproj
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_mksnapshot"
-	ProjectGUID="{865575D0-37E2-405E-8CBA-5F6C485B5A26}"
-	RootNamespace="v8_mksnapshot"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\mksnapshot.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_mksnapshot_x64.vcproj b/tools/visual_studio/v8_mksnapshot_x64.vcproj
deleted file mode 100644
index e684af0..0000000
--- a/tools/visual_studio/v8_mksnapshot_x64.vcproj
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_mksnapshot"
-	ProjectGUID="{865575D0-37E2-405E-8CBA-5F6C485B5A26}"
-	RootNamespace="v8_mksnapshot"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\mksnapshot.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_process_sample.vcproj b/tools/visual_studio/v8_process_sample.vcproj
deleted file mode 100644
index dc3fb3a..0000000
--- a/tools/visual_studio/v8_process_sample.vcproj
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_process_sample"
-	ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
-	RootNamespace="v8_process_sample"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\samples\process.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_process_sample_arm.vcproj b/tools/visual_studio/v8_process_sample_arm.vcproj
deleted file mode 100644
index 2d63f69..0000000
--- a/tools/visual_studio/v8_process_sample_arm.vcproj
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>

-<VisualStudioProject

-	ProjectType="Visual C++"

-	Version="8.00"

-	Name="v8_process_sample"

-	ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}"

-	RootNamespace="v8_process_sample"

-	Keyword="Win32Proj"

-	>

-	<Platforms>

-		<Platform

-			Name="Win32"

-		/>

-	</Platforms>

-	<ToolFiles>

-	</ToolFiles>

-	<Configurations>

-		<Configuration

-			Name="Debug|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-		<Configuration

-			Name="Release|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-	</Configurations>

-	<References>

-	</References>

-	<Files>

-		<File

-			RelativePath="..\..\samples\process.cc"

-			>

-		</File>

-	</Files>

-	<Globals>

-	</Globals>

-</VisualStudioProject>

diff --git a/tools/visual_studio/v8_process_sample_x64.vcproj b/tools/visual_studio/v8_process_sample_x64.vcproj
deleted file mode 100644
index 1d7f01a..0000000
--- a/tools/visual_studio/v8_process_sample_x64.vcproj
+++ /dev/null
@@ -1,161 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_process_sample"
-	ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
-	RootNamespace="v8_process_sample"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\samples\process.cc"
-			>
-			<FileConfiguration
-				Name="Debug|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-			<FileConfiguration
-				Name="Release|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_shell_sample.vcproj b/tools/visual_studio/v8_shell_sample.vcproj
deleted file mode 100644
index 4eb38bf..0000000
--- a/tools/visual_studio/v8_shell_sample.vcproj
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_shell_sample"
-	ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
-	RootNamespace="v8_shell_sample"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\samples\shell.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_shell_sample_arm.vcproj b/tools/visual_studio/v8_shell_sample_arm.vcproj
deleted file mode 100644
index b4260e0..0000000
--- a/tools/visual_studio/v8_shell_sample_arm.vcproj
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>

-<VisualStudioProject

-	ProjectType="Visual C++"

-	Version="8.00"

-	Name="v8_shell_sample"

-	ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"

-	RootNamespace="v8_shell_sample"

-	Keyword="Win32Proj"

-	>

-	<Platforms>

-		<Platform

-			Name="Win32"

-		/>

-	</Platforms>

-	<ToolFiles>

-	</ToolFiles>

-	<Configurations>

-		<Configuration

-			Name="Debug|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-		<Configuration

-			Name="Release|Win32"

-			ConfigurationType="1"

-			InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"

-			>

-			<Tool

-				Name="VCPreBuildEventTool"

-			/>

-			<Tool

-				Name="VCCustomBuildTool"

-			/>

-			<Tool

-				Name="VCXMLDataGeneratorTool"

-			/>

-			<Tool

-				Name="VCWebServiceProxyGeneratorTool"

-			/>

-			<Tool

-				Name="VCMIDLTool"

-			/>

-			<Tool

-				Name="VCCLCompilerTool"

-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"

-			/>

-			<Tool

-				Name="VCManagedResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCResourceCompilerTool"

-			/>

-			<Tool

-				Name="VCPreLinkEventTool"

-			/>

-			<Tool

-				Name="VCLinkerTool"

-				AdditionalDependencies="winmm.lib Ws2_32.lib"

-			/>

-			<Tool

-				Name="VCALinkTool"

-			/>

-			<Tool

-				Name="VCManifestTool"

-			/>

-			<Tool

-				Name="VCXDCMakeTool"

-			/>

-			<Tool

-				Name="VCBscMakeTool"

-			/>

-			<Tool

-				Name="VCFxCopTool"

-			/>

-			<Tool

-				Name="VCAppVerifierTool"

-			/>

-			<Tool

-				Name="VCPostBuildEventTool"

-			/>

-		</Configuration>

-	</Configurations>

-	<References>

-	</References>

-	<Files>

-		<File

-			RelativePath="..\..\samples\shell.cc"

-			>

-		</File>

-	</Files>

-	<Globals>

-	</Globals>

-</VisualStudioProject>

diff --git a/tools/visual_studio/v8_shell_sample_x64.vcproj b/tools/visual_studio/v8_shell_sample_x64.vcproj
deleted file mode 100644
index 9ba6703..0000000
--- a/tools/visual_studio/v8_shell_sample_x64.vcproj
+++ /dev/null
@@ -1,163 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_shell_sample"
-	ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
-	RootNamespace="v8_shell_sample"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
-        />
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="1"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="winmm.lib Ws2_32.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\samples\shell.cc"
-			>
-			<FileConfiguration
-				Name="Debug|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-			<FileConfiguration
-				Name="Release|x64"
-				>
-				<Tool
-					Name="VCCLCompilerTool"
-					DisableSpecificWarnings="4267"
-				/>
-			</FileConfiguration>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_snapshot.vcproj b/tools/visual_studio/v8_snapshot.vcproj
deleted file mode 100644
index 29db4f8..0000000
--- a/tools/visual_studio/v8_snapshot.vcproj
+++ /dev/null
@@ -1,142 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_snapshot"
-	ProjectGUID="{C0334F9A-1168-4101-9DD8-C30FB252D435}"
-	RootNamespace="v8_snapshot"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="generated files"
-			SourceControlFiles="false"
-			>
-			<File
-				RelativePath="$(IntDir)\..\v8\DerivedSources\natives-empty.cc"
-				>
-			</File>
-			<File
-				RelativePath="$(IntDir)\..\v8_snapshot_cc\DerivedSources\snapshot.cc"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_snapshot_cc.vcproj b/tools/visual_studio/v8_snapshot_cc.vcproj
deleted file mode 100644
index 7c4799a..0000000
--- a/tools/visual_studio/v8_snapshot_cc.vcproj
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_snapshot_cc"
-	ProjectGUID="{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}"
-	RootNamespace="v8_snapshot_cc"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="10"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="10"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="generated files"
-			SourceControlFiles="false"
-			>
-      <File
-        RelativePath="$(OutDir)\v8_mksnapshot.exe"
-        >
-        <FileConfiguration
-          Name="Debug|Win32"
-          >
-          <Tool
-            Name="VCCustomBuildTool"
-            Description="Building snapshot..."
-            CommandLine="&quot;$(OutDir)\v8_mksnapshot.exe&quot; &quot;$(IntDir)\DerivedSources\snapshot.cc&quot;&#x0D;&#x0A;"
-            AdditionalDependencies="$(OutDir)\v8_mksnapshot.exe"
-            Outputs="$(IntDir)\DerivedSources\snapshot.cc"
-          />
-        </FileConfiguration>
-        <FileConfiguration
-          Name="Release|Win32"
-          >
-          <Tool
-            Name="VCCustomBuildTool"
-            Description="Building snapshot..."
-            CommandLine="&quot;$(OutDir)\v8_mksnapshot.exe&quot; &quot;$(IntDir)\DerivedSources\snapshot.cc&quot;&#x0D;&#x0A;"
-            AdditionalDependencies="$(OutDir)\v8_mksnapshot.exe"
-            Outputs="$(IntDir)\DerivedSources\snapshot.cc"
-          />
-        </FileConfiguration>
-      </File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_snapshot_cc_x64.vcproj b/tools/visual_studio/v8_snapshot_cc_x64.vcproj
deleted file mode 100644
index 9c6f9d2..0000000
--- a/tools/visual_studio/v8_snapshot_cc_x64.vcproj
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_snapshot_cc"
-	ProjectGUID="{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}"
-	RootNamespace="v8_snapshot_cc"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="10"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="10"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="generated files"
-			SourceControlFiles="false"
-			>
-      <File
-        RelativePath="$(OutDir)\v8_mksnapshot.exe"
-        >
-        <FileConfiguration
-          Name="Debug|x64"
-          >
-          <Tool
-            Name="VCCustomBuildTool"
-            Description="Building snapshot..."
-            CommandLine="&quot;$(OutDir)\v8_mksnapshot.exe&quot; &quot;$(IntDir)\DerivedSources\snapshot.cc&quot;&#x0D;&#x0A;"
-            AdditionalDependencies="$(OutDir)\v8_mksnapshot.exe"
-            Outputs="$(IntDir)\DerivedSources\snapshot.cc"
-          />
-        </FileConfiguration>
-        <FileConfiguration
-          Name="Release|x64"
-          >
-          <Tool
-            Name="VCCustomBuildTool"
-            Description="Building snapshot..."
-            CommandLine="&quot;$(OutDir)\v8_mksnapshot.exe&quot; &quot;$(IntDir)\DerivedSources\snapshot.cc&quot;&#x0D;&#x0A;"
-            AdditionalDependencies="$(OutDir)\v8_mksnapshot.exe"
-            Outputs="$(IntDir)\DerivedSources\snapshot.cc"
-          />
-        </FileConfiguration>
-      </File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_snapshot_x64.vcproj b/tools/visual_studio/v8_snapshot_x64.vcproj
deleted file mode 100644
index 0f6c70f..0000000
--- a/tools/visual_studio/v8_snapshot_x64.vcproj
+++ /dev/null
@@ -1,142 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_snapshot"
-	ProjectGUID="{C0334F9A-1168-4101-9DD8-C30FB252D435}"
-	RootNamespace="v8_snapshot"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="generated files"
-			SourceControlFiles="false"
-			>
-			<File
-				RelativePath="$(IntDir)\..\v8\DerivedSources\natives-empty.cc"
-				>
-			</File>
-			<File
-				RelativePath="$(IntDir)\..\v8_snapshot_cc\DerivedSources\snapshot.cc"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/v8_x64.sln b/tools/visual_studio/v8_x64.sln
deleted file mode 100644
index 1fa2f16..0000000
--- a/tools/visual_studio/v8_x64.sln
+++ /dev/null
@@ -1,101 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_base", "v8_base_x64.vcproj", "{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8", "v8_x64.vcproj", "{21E22961-22BF-4493-BD3A-868F93DA5179}"
-	ProjectSection(ProjectDependencies) = postProject
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_mksnapshot", "v8_mksnapshot_x64.vcproj", "{865575D0-37E2-405E-8CBA-5F6C485B5A26}"
-	ProjectSection(ProjectDependencies) = postProject
-		{21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_snapshot", "v8_snapshot_x64.vcproj", "{C0334F9A-1168-4101-9DD8-C30FB252D435}"
-	ProjectSection(ProjectDependencies) = postProject
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F} = {0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}
-		{EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell_sample_x64.vcproj", "{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{E131F77D-B713-48F3-B86D-097ECDCC4C3A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_process_sample", "v8_process_sample_x64.vcproj", "{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_cctest", "v8_cctest_x64.vcproj", "{97ECC711-7430-4FC4-90FD-004DA880E72A}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{AD933CE2-1303-448E-89C8-60B1FDD18EC3}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "d8", "d8_x64.vcproj", "{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
-	ProjectSection(ProjectDependencies) = postProject
-		{C0334F9A-1168-4101-9DD8-C30FB252D435} = {C0334F9A-1168-4101-9DD8-C30FB252D435}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_snapshot_cc", "v8_snapshot_cc_x64.vcproj", "{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}"
-	ProjectSection(ProjectDependencies) = postProject
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26} = {865575D0-37E2-405E-8CBA-5F6C485B5A26}
-	EndProjectSection
-EndProject
-Global
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|x64 = Debug|x64
-		Release|x64 = Release|x64
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Debug|x64.ActiveCfg = Debug|x64
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Debug|x64.Build.0 = Debug|x64
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Release|x64.ActiveCfg = Release|x64
-		{0DDBDA8B-A49F-4CC7-A1D5-5BB8297C8A3F}.Release|x64.Build.0 = Release|x64
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|x64.ActiveCfg = Debug|x64
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Debug|x64.Build.0 = Debug|x64
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|x64.ActiveCfg = Release|x64
-		{21E22961-22BF-4493-BD3A-868F93DA5179}.Release|x64.Build.0 = Release|x64
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|x64.ActiveCfg = Debug|x64
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Debug|x64.Build.0 = Debug|x64
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|x64.ActiveCfg = Release|x64
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}.Release|x64.Build.0 = Release|x64
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|x64.ActiveCfg = Debug|x64
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Debug|x64.Build.0 = Debug|x64
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|x64.ActiveCfg = Release|x64
-		{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}.Release|x64.Build.0 = Release|x64
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Debug|x64.ActiveCfg = Debug|x64
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Debug|x64.Build.0 = Debug|x64
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Release|x64.ActiveCfg = Release|x64
-		{865575D0-37E2-405E-8CBA-5F6C485B5A26}.Release|x64.Build.0 = Release|x64
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|x64.ActiveCfg = Debug|x64
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Debug|x64.Build.0 = Debug|x64
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|x64.ActiveCfg = Release|x64
-		{97ECC711-7430-4FC4-90FD-004DA880E72A}.Release|x64.Build.0 = Release|x64
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Debug|x64.ActiveCfg = Debug|x64
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Debug|x64.Build.0 = Debug|x64
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Release|x64.ActiveCfg = Release|x64
-		{C0334F9A-1168-4101-9DD8-C30FB252D435}.Release|x64.Build.0 = Release|x64
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|x64.ActiveCfg = Debug|x64
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Debug|x64.Build.0 = Debug|x64
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|x64.ActiveCfg = Release|x64
-		{EC8B7909-62AF-470D-A75D-E1D89C837142}.Release|x64.Build.0 = Release|x64
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|x64.ActiveCfg = Debug|x64
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Debug|x64.Build.0 = Debug|x64
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|x64.ActiveCfg = Release|x64
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C}.Release|x64.Build.0 = Release|x64
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-	GlobalSection(NestedProjects) = preSolution
-		{2DE20FFA-6F5E-48D9-84D8-09B044A5B119} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-		{97ECC711-7430-4FC4-90FD-004DA880E72A} = {AD933CE2-1303-448E-89C8-60B1FDD18EC3}
-		{EF019874-D38A-40E3-B17C-DB5923F0A79C} = {E131F77D-B713-48F3-B86D-097ECDCC4C3A}
-	EndGlobalSection
-EndGlobal
diff --git a/tools/visual_studio/v8_x64.vcproj b/tools/visual_studio/v8_x64.vcproj
deleted file mode 100644
index 5ffd291..0000000
--- a/tools/visual_studio/v8_x64.vcproj
+++ /dev/null
@@ -1,227 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8"
-	ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}"
-	RootNamespace="v8"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				LinkLibraryDependencies="true"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="js"
-			>
-			<File
-				RelativePath="..\..\src\apinatives.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\array.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\date.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\macros.py"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\math.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mirror-debugger.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\json.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\uri.js"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8natives.js"
-				>
-				<FileConfiguration
-					Name="Debug|x64"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-				<FileConfiguration
-					Name="Release|x64"
-					>
-					<Tool
-						Name="VCCustomBuildTool"
-						Description="Processing js files..."
-						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
-						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
-					/>
-				</FileConfiguration>
-			</File>
-		</Filter>
-		<Filter
-			Name="generated files"
-			>
-			<File
-				RelativePath="$(IntDir)\DerivedSources\natives.cc"
-				>
-			</File>
-		</Filter>
-		<File
-			RelativePath="..\..\src\snapshot-empty.cc"
-			>
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/tools/visual_studio/x64.vsprops b/tools/visual_studio/x64.vsprops
deleted file mode 100644
index 04d9c65..0000000
--- a/tools/visual_studio/x64.vsprops
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioPropertySheet
-	ProjectType="Visual C++"
-	Version="8.00"
-	OutputDirectory="$(SolutionDir)$(ConfigurationName)64"
-	IntermediateDirectory="$(SolutionDir)$(ConfigurationName)64\obj\$(ProjectName)"
-	Name="x64"
-	>
-	<Tool
-		Name="VCCLCompilerTool"
-		PreprocessorDefinitions="V8_TARGET_ARCH_X64"
-	/>
-	<Tool
-		Name="VCLinkerTool"
-                StackReserveSize="2091752"
-		TargetMachine="17"
-	/>
-</VisualStudioPropertySheet>